##// END OF EJS Templates
context: move logic from changectx.__init__ to localrepo.__getitem__ (API)...
Martin von Zweigbergk -
r39994:3d35304b default
parent child Browse files
Show More
@@ -1,2497 +1,2437 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
19 hex,
18 hex,
20 modifiednodeid,
19 modifiednodeid,
21 nullid,
20 nullid,
22 nullrev,
21 nullrev,
23 short,
22 short,
24 wdirfilenodeids,
23 wdirfilenodeids,
25 wdirid,
24 wdirid,
26 )
25 )
27 from . import (
26 from . import (
28 dagop,
27 dagop,
29 encoding,
28 encoding,
30 error,
29 error,
31 fileset,
30 fileset,
32 match as matchmod,
31 match as matchmod,
33 obsolete as obsmod,
32 obsolete as obsmod,
34 patch,
33 patch,
35 pathutil,
34 pathutil,
36 phases,
35 phases,
37 pycompat,
36 pycompat,
38 repoview,
37 repoview,
39 scmutil,
38 scmutil,
40 sparse,
39 sparse,
41 subrepo,
40 subrepo,
42 subrepoutil,
41 subrepoutil,
43 util,
42 util,
44 )
43 )
45 from .utils import (
44 from .utils import (
46 dateutil,
45 dateutil,
47 stringutil,
46 stringutil,
48 )
47 )
49
48
50 propertycache = util.propertycache
49 propertycache = util.propertycache
51
50
52 class basectx(object):
51 class basectx(object):
53 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
54 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
55 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
56 be committed,
55 be committed,
57 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
58 be committed."""
57 be committed."""
59
58
60 def __init__(self, repo):
59 def __init__(self, repo):
61 self._repo = repo
60 self._repo = repo
62
61
63 def __bytes__(self):
62 def __bytes__(self):
64 return short(self.node())
63 return short(self.node())
65
64
66 __str__ = encoding.strmethod(__bytes__)
65 __str__ = encoding.strmethod(__bytes__)
67
66
68 def __repr__(self):
67 def __repr__(self):
69 return r"<%s %s>" % (type(self).__name__, str(self))
68 return r"<%s %s>" % (type(self).__name__, str(self))
70
69
71 def __eq__(self, other):
70 def __eq__(self, other):
72 try:
71 try:
73 return type(self) == type(other) and self._rev == other._rev
72 return type(self) == type(other) and self._rev == other._rev
74 except AttributeError:
73 except AttributeError:
75 return False
74 return False
76
75
77 def __ne__(self, other):
76 def __ne__(self, other):
78 return not (self == other)
77 return not (self == other)
79
78
80 def __contains__(self, key):
79 def __contains__(self, key):
81 return key in self._manifest
80 return key in self._manifest
82
81
83 def __getitem__(self, key):
82 def __getitem__(self, key):
84 return self.filectx(key)
83 return self.filectx(key)
85
84
86 def __iter__(self):
85 def __iter__(self):
87 return iter(self._manifest)
86 return iter(self._manifest)
88
87
89 def _buildstatusmanifest(self, status):
88 def _buildstatusmanifest(self, status):
90 """Builds a manifest that includes the given status results, if this is
89 """Builds a manifest that includes the given status results, if this is
91 a working copy context. For non-working copy contexts, it just returns
90 a working copy context. For non-working copy contexts, it just returns
92 the normal manifest."""
91 the normal manifest."""
93 return self.manifest()
92 return self.manifest()
94
93
95 def _matchstatus(self, other, match):
94 def _matchstatus(self, other, match):
96 """This internal method provides a way for child objects to override the
95 """This internal method provides a way for child objects to override the
97 match operator.
96 match operator.
98 """
97 """
99 return match
98 return match
100
99
101 def _buildstatus(self, other, s, match, listignored, listclean,
100 def _buildstatus(self, other, s, match, listignored, listclean,
102 listunknown):
101 listunknown):
103 """build a status with respect to another context"""
102 """build a status with respect to another context"""
104 # Load earliest manifest first for caching reasons. More specifically,
103 # Load earliest manifest first for caching reasons. More specifically,
105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 # 1000 and cache it so that when you read 1001, we just need to apply a
106 # 1000 and cache it so that when you read 1001, we just need to apply a
108 # delta to what's in the cache. So that's one full reconstruction + one
107 # delta to what's in the cache. So that's one full reconstruction + one
109 # delta application.
108 # delta application.
110 mf2 = None
109 mf2 = None
111 if self.rev() is not None and self.rev() < other.rev():
110 if self.rev() is not None and self.rev() < other.rev():
112 mf2 = self._buildstatusmanifest(s)
111 mf2 = self._buildstatusmanifest(s)
113 mf1 = other._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
114 if mf2 is None:
113 if mf2 is None:
115 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
116
115
117 modified, added = [], []
116 modified, added = [], []
118 removed = []
117 removed = []
119 clean = []
118 clean = []
120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 deletedset = set(deleted)
120 deletedset = set(deleted)
122 d = mf1.diff(mf2, match=match, clean=listclean)
121 d = mf1.diff(mf2, match=match, clean=listclean)
123 for fn, value in d.iteritems():
122 for fn, value in d.iteritems():
124 if fn in deletedset:
123 if fn in deletedset:
125 continue
124 continue
126 if value is None:
125 if value is None:
127 clean.append(fn)
126 clean.append(fn)
128 continue
127 continue
129 (node1, flag1), (node2, flag2) = value
128 (node1, flag1), (node2, flag2) = value
130 if node1 is None:
129 if node1 is None:
131 added.append(fn)
130 added.append(fn)
132 elif node2 is None:
131 elif node2 is None:
133 removed.append(fn)
132 removed.append(fn)
134 elif flag1 != flag2:
133 elif flag1 != flag2:
135 modified.append(fn)
134 modified.append(fn)
136 elif node2 not in wdirfilenodeids:
135 elif node2 not in wdirfilenodeids:
137 # When comparing files between two commits, we save time by
136 # When comparing files between two commits, we save time by
138 # not comparing the file contents when the nodeids differ.
137 # not comparing the file contents when the nodeids differ.
139 # Note that this means we incorrectly report a reverted change
138 # Note that this means we incorrectly report a reverted change
140 # to a file as a modification.
139 # to a file as a modification.
141 modified.append(fn)
140 modified.append(fn)
142 elif self[fn].cmp(other[fn]):
141 elif self[fn].cmp(other[fn]):
143 modified.append(fn)
142 modified.append(fn)
144 else:
143 else:
145 clean.append(fn)
144 clean.append(fn)
146
145
147 if removed:
146 if removed:
148 # need to filter files if they are already reported as removed
147 # need to filter files if they are already reported as removed
149 unknown = [fn for fn in unknown if fn not in mf1 and
148 unknown = [fn for fn in unknown if fn not in mf1 and
150 (not match or match(fn))]
149 (not match or match(fn))]
151 ignored = [fn for fn in ignored if fn not in mf1 and
150 ignored = [fn for fn in ignored if fn not in mf1 and
152 (not match or match(fn))]
151 (not match or match(fn))]
153 # if they're deleted, don't report them as removed
152 # if they're deleted, don't report them as removed
154 removed = [fn for fn in removed if fn not in deletedset]
153 removed = [fn for fn in removed if fn not in deletedset]
155
154
156 return scmutil.status(modified, added, removed, deleted, unknown,
155 return scmutil.status(modified, added, removed, deleted, unknown,
157 ignored, clean)
156 ignored, clean)
158
157
159 @propertycache
158 @propertycache
160 def substate(self):
159 def substate(self):
161 return subrepoutil.state(self, self._repo.ui)
160 return subrepoutil.state(self, self._repo.ui)
162
161
163 def subrev(self, subpath):
162 def subrev(self, subpath):
164 return self.substate[subpath][1]
163 return self.substate[subpath][1]
165
164
166 def rev(self):
165 def rev(self):
167 return self._rev
166 return self._rev
168 def node(self):
167 def node(self):
169 return self._node
168 return self._node
170 def hex(self):
169 def hex(self):
171 return hex(self.node())
170 return hex(self.node())
172 def manifest(self):
171 def manifest(self):
173 return self._manifest
172 return self._manifest
174 def manifestctx(self):
173 def manifestctx(self):
175 return self._manifestctx
174 return self._manifestctx
176 def repo(self):
175 def repo(self):
177 return self._repo
176 return self._repo
178 def phasestr(self):
177 def phasestr(self):
179 return phases.phasenames[self.phase()]
178 return phases.phasenames[self.phase()]
180 def mutable(self):
179 def mutable(self):
181 return self.phase() > phases.public
180 return self.phase() > phases.public
182
181
183 def matchfileset(self, expr, badfn=None):
182 def matchfileset(self, expr, badfn=None):
184 return fileset.match(self, expr, badfn=badfn)
183 return fileset.match(self, expr, badfn=badfn)
185
184
186 def obsolete(self):
185 def obsolete(self):
187 """True if the changeset is obsolete"""
186 """True if the changeset is obsolete"""
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189
188
190 def extinct(self):
189 def extinct(self):
191 """True if the changeset is extinct"""
190 """True if the changeset is extinct"""
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193
192
194 def orphan(self):
193 def orphan(self):
195 """True if the changeset is not obsolete, but its ancestor is"""
194 """True if the changeset is not obsolete, but its ancestor is"""
196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197
196
198 def phasedivergent(self):
197 def phasedivergent(self):
199 """True if the changeset tries to be a successor of a public changeset
198 """True if the changeset tries to be a successor of a public changeset
200
199
201 Only non-public and non-obsolete changesets may be phase-divergent.
200 Only non-public and non-obsolete changesets may be phase-divergent.
202 """
201 """
203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204
203
205 def contentdivergent(self):
204 def contentdivergent(self):
206 """Is a successor of a changeset with multiple possible successor sets
205 """Is a successor of a changeset with multiple possible successor sets
207
206
208 Only non-public and non-obsolete changesets may be content-divergent.
207 Only non-public and non-obsolete changesets may be content-divergent.
209 """
208 """
210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211
210
212 def isunstable(self):
211 def isunstable(self):
213 """True if the changeset is either orphan, phase-divergent or
212 """True if the changeset is either orphan, phase-divergent or
214 content-divergent"""
213 content-divergent"""
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216
215
217 def instabilities(self):
216 def instabilities(self):
218 """return the list of instabilities affecting this changeset.
217 """return the list of instabilities affecting this changeset.
219
218
220 Instabilities are returned as strings. possible values are:
219 Instabilities are returned as strings. possible values are:
221 - orphan,
220 - orphan,
222 - phase-divergent,
221 - phase-divergent,
223 - content-divergent.
222 - content-divergent.
224 """
223 """
225 instabilities = []
224 instabilities = []
226 if self.orphan():
225 if self.orphan():
227 instabilities.append('orphan')
226 instabilities.append('orphan')
228 if self.phasedivergent():
227 if self.phasedivergent():
229 instabilities.append('phase-divergent')
228 instabilities.append('phase-divergent')
230 if self.contentdivergent():
229 if self.contentdivergent():
231 instabilities.append('content-divergent')
230 instabilities.append('content-divergent')
232 return instabilities
231 return instabilities
233
232
234 def parents(self):
233 def parents(self):
235 """return contexts for each parent changeset"""
234 """return contexts for each parent changeset"""
236 return self._parents
235 return self._parents
237
236
238 def p1(self):
237 def p1(self):
239 return self._parents[0]
238 return self._parents[0]
240
239
241 def p2(self):
240 def p2(self):
242 parents = self._parents
241 parents = self._parents
243 if len(parents) == 2:
242 if len(parents) == 2:
244 return parents[1]
243 return parents[1]
245 return self._repo[nullrev]
244 return self._repo[nullrev]
246
245
247 def _fileinfo(self, path):
246 def _fileinfo(self, path):
248 if r'_manifest' in self.__dict__:
247 if r'_manifest' in self.__dict__:
249 try:
248 try:
250 return self._manifest[path], self._manifest.flags(path)
249 return self._manifest[path], self._manifest.flags(path)
251 except KeyError:
250 except KeyError:
252 raise error.ManifestLookupError(self._node, path,
251 raise error.ManifestLookupError(self._node, path,
253 _('not found in manifest'))
252 _('not found in manifest'))
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 if path in self._manifestdelta:
254 if path in self._manifestdelta:
256 return (self._manifestdelta[path],
255 return (self._manifestdelta[path],
257 self._manifestdelta.flags(path))
256 self._manifestdelta.flags(path))
258 mfl = self._repo.manifestlog
257 mfl = self._repo.manifestlog
259 try:
258 try:
260 node, flag = mfl[self._changeset.manifest].find(path)
259 node, flag = mfl[self._changeset.manifest].find(path)
261 except KeyError:
260 except KeyError:
262 raise error.ManifestLookupError(self._node, path,
261 raise error.ManifestLookupError(self._node, path,
263 _('not found in manifest'))
262 _('not found in manifest'))
264
263
265 return node, flag
264 return node, flag
266
265
267 def filenode(self, path):
266 def filenode(self, path):
268 return self._fileinfo(path)[0]
267 return self._fileinfo(path)[0]
269
268
270 def flags(self, path):
269 def flags(self, path):
271 try:
270 try:
272 return self._fileinfo(path)[1]
271 return self._fileinfo(path)[1]
273 except error.LookupError:
272 except error.LookupError:
274 return ''
273 return ''
275
274
276 def sub(self, path, allowcreate=True):
275 def sub(self, path, allowcreate=True):
277 '''return a subrepo for the stored revision of path, never wdir()'''
276 '''return a subrepo for the stored revision of path, never wdir()'''
278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
279
278
280 def nullsub(self, path, pctx):
279 def nullsub(self, path, pctx):
281 return subrepo.nullsubrepo(self, path, pctx)
280 return subrepo.nullsubrepo(self, path, pctx)
282
281
283 def workingsub(self, path):
282 def workingsub(self, path):
284 '''return a subrepo for the stored revision, or wdir if this is a wdir
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
285 context.
284 context.
286 '''
285 '''
287 return subrepo.subrepo(self, path, allowwdir=True)
286 return subrepo.subrepo(self, path, allowwdir=True)
288
287
289 def match(self, pats=None, include=None, exclude=None, default='glob',
288 def match(self, pats=None, include=None, exclude=None, default='glob',
290 listsubrepos=False, badfn=None):
289 listsubrepos=False, badfn=None):
291 r = self._repo
290 r = self._repo
292 return matchmod.match(r.root, r.getcwd(), pats,
291 return matchmod.match(r.root, r.getcwd(), pats,
293 include, exclude, default,
292 include, exclude, default,
294 auditor=r.nofsauditor, ctx=self,
293 auditor=r.nofsauditor, ctx=self,
295 listsubrepos=listsubrepos, badfn=badfn)
294 listsubrepos=listsubrepos, badfn=badfn)
296
295
297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
298 losedatafn=None, prefix='', relroot='', copy=None,
297 losedatafn=None, prefix='', relroot='', copy=None,
299 hunksfilterfn=None):
298 hunksfilterfn=None):
300 """Returns a diff generator for the given contexts and matcher"""
299 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
300 if ctx2 is None:
302 ctx2 = self.p1()
301 ctx2 = self.p1()
303 if ctx2 is not None:
302 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
303 ctx2 = self._repo[ctx2]
305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
306 opts=opts, losedatafn=losedatafn, prefix=prefix,
305 opts=opts, losedatafn=losedatafn, prefix=prefix,
307 relroot=relroot, copy=copy,
306 relroot=relroot, copy=copy,
308 hunksfilterfn=hunksfilterfn)
307 hunksfilterfn=hunksfilterfn)
309
308
310 def dirs(self):
309 def dirs(self):
311 return self._manifest.dirs()
310 return self._manifest.dirs()
312
311
313 def hasdir(self, dir):
312 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
313 return self._manifest.hasdir(dir)
315
314
316 def status(self, other=None, match=None, listignored=False,
315 def status(self, other=None, match=None, listignored=False,
317 listclean=False, listunknown=False, listsubrepos=False):
316 listclean=False, listunknown=False, listsubrepos=False):
318 """return status of files between two nodes or node and working
317 """return status of files between two nodes or node and working
319 directory.
318 directory.
320
319
321 If other is None, compare this node with working directory.
320 If other is None, compare this node with working directory.
322
321
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 """
323 """
325
324
326 ctx1 = self
325 ctx1 = self
327 ctx2 = self._repo[other]
326 ctx2 = self._repo[other]
328
327
329 # This next code block is, admittedly, fragile logic that tests for
328 # This next code block is, admittedly, fragile logic that tests for
330 # reversing the contexts and wouldn't need to exist if it weren't for
329 # reversing the contexts and wouldn't need to exist if it weren't for
331 # the fast (and common) code path of comparing the working directory
330 # the fast (and common) code path of comparing the working directory
332 # with its first parent.
331 # with its first parent.
333 #
332 #
334 # What we're aiming for here is the ability to call:
333 # What we're aiming for here is the ability to call:
335 #
334 #
336 # workingctx.status(parentctx)
335 # workingctx.status(parentctx)
337 #
336 #
338 # If we always built the manifest for each context and compared those,
337 # If we always built the manifest for each context and compared those,
339 # then we'd be done. But the special case of the above call means we
338 # then we'd be done. But the special case of the above call means we
340 # just copy the manifest of the parent.
339 # just copy the manifest of the parent.
341 reversed = False
340 reversed = False
342 if (not isinstance(ctx1, changectx)
341 if (not isinstance(ctx1, changectx)
343 and isinstance(ctx2, changectx)):
342 and isinstance(ctx2, changectx)):
344 reversed = True
343 reversed = True
345 ctx1, ctx2 = ctx2, ctx1
344 ctx1, ctx2 = ctx2, ctx1
346
345
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
346 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 match = ctx2._matchstatus(ctx1, match)
347 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
348 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
350 listunknown)
352
351
353 if reversed:
352 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
353 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
354 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
356 r.clean)
358
357
359 if listsubrepos:
358 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
360 try:
362 rev2 = ctx2.subrev(subpath)
361 rev2 = ctx2.subrev(subpath)
363 except KeyError:
362 except KeyError:
364 # A subrepo that existed in node1 was deleted between
363 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
364 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
365 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
366 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
367 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
368 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
369 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
370 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
371 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
373
375 narrowmatch = self._repo.narrowmatch()
374 narrowmatch = self._repo.narrowmatch()
376 if not narrowmatch.always():
375 if not narrowmatch.always():
377 for l in r:
376 for l in r:
378 l[:] = list(filter(narrowmatch, l))
377 l[:] = list(filter(narrowmatch, l))
379 for l in r:
378 for l in r:
380 l.sort()
379 l.sort()
381
380
382 return r
381 return r
383
382
384 class changectx(basectx):
383 class changectx(basectx):
385 """A changecontext object makes access to data related to a particular
384 """A changecontext object makes access to data related to a particular
386 changeset convenient. It represents a read-only context already present in
385 changeset convenient. It represents a read-only context already present in
387 the repo."""
386 the repo."""
388 def __init__(self, repo, changeid='.'):
387 def __init__(self, repo, rev, node):
389 """changeid is a revision number, node, or tag"""
388 """changeid is a revision number, node, or tag"""
390 super(changectx, self).__init__(repo)
389 super(changectx, self).__init__(repo)
391
390 self._rev = rev
392 try:
391 self._node = node
393 if isinstance(changeid, int):
394 self._node = repo.changelog.node(changeid)
395 self._rev = changeid
396 return
397 elif changeid == 'null':
398 self._node = nullid
399 self._rev = nullrev
400 return
401 elif changeid == 'tip':
402 self._node = repo.changelog.tip()
403 self._rev = repo.changelog.rev(self._node)
404 return
405 elif (changeid == '.'
406 or repo.local() and changeid == repo.dirstate.p1()):
407 # this is a hack to delay/avoid loading obsmarkers
408 # when we know that '.' won't be hidden
409 self._node = repo.dirstate.p1()
410 self._rev = repo.unfiltered().changelog.rev(self._node)
411 return
412 elif len(changeid) == 20:
413 try:
414 self._node = changeid
415 self._rev = repo.changelog.rev(changeid)
416 return
417 except error.FilteredLookupError:
418 changeid = hex(changeid) # for the error message
419 raise
420 except LookupError:
421 # check if it might have come from damaged dirstate
422 #
423 # XXX we could avoid the unfiltered if we had a recognizable
424 # exception for filtered changeset access
425 if (repo.local()
426 and changeid in repo.unfiltered().dirstate.parents()):
427 msg = _("working directory has unknown parent '%s'!")
428 raise error.Abort(msg % short(changeid))
429 changeid = hex(changeid) # for the error message
430
431 elif len(changeid) == 40:
432 try:
433 self._node = bin(changeid)
434 self._rev = repo.changelog.rev(self._node)
435 return
436 except error.FilteredLookupError:
437 raise
438 except LookupError:
439 pass
440 else:
441 raise error.ProgrammingError(
442 "unsupported changeid '%s' of type %s" %
443 (changeid, type(changeid)))
444
445 except (error.FilteredIndexError, error.FilteredLookupError):
446 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
447 % pycompat.bytestr(changeid))
448 except IndexError:
449 pass
450 raise error.RepoLookupError(
451 _("unknown revision '%s'") % changeid)
452
392
453 def __hash__(self):
393 def __hash__(self):
454 try:
394 try:
455 return hash(self._rev)
395 return hash(self._rev)
456 except AttributeError:
396 except AttributeError:
457 return id(self)
397 return id(self)
458
398
459 def __nonzero__(self):
399 def __nonzero__(self):
460 return self._rev != nullrev
400 return self._rev != nullrev
461
401
462 __bool__ = __nonzero__
402 __bool__ = __nonzero__
463
403
464 @propertycache
404 @propertycache
465 def _changeset(self):
405 def _changeset(self):
466 return self._repo.changelog.changelogrevision(self.rev())
406 return self._repo.changelog.changelogrevision(self.rev())
467
407
468 @propertycache
408 @propertycache
469 def _manifest(self):
409 def _manifest(self):
470 return self._manifestctx.read()
410 return self._manifestctx.read()
471
411
472 @property
412 @property
473 def _manifestctx(self):
413 def _manifestctx(self):
474 return self._repo.manifestlog[self._changeset.manifest]
414 return self._repo.manifestlog[self._changeset.manifest]
475
415
476 @propertycache
416 @propertycache
477 def _manifestdelta(self):
417 def _manifestdelta(self):
478 return self._manifestctx.readdelta()
418 return self._manifestctx.readdelta()
479
419
480 @propertycache
420 @propertycache
481 def _parents(self):
421 def _parents(self):
482 repo = self._repo
422 repo = self._repo
483 p1, p2 = repo.changelog.parentrevs(self._rev)
423 p1, p2 = repo.changelog.parentrevs(self._rev)
484 if p2 == nullrev:
424 if p2 == nullrev:
485 return [repo[p1]]
425 return [repo[p1]]
486 return [repo[p1], repo[p2]]
426 return [repo[p1], repo[p2]]
487
427
488 def changeset(self):
428 def changeset(self):
489 c = self._changeset
429 c = self._changeset
490 return (
430 return (
491 c.manifest,
431 c.manifest,
492 c.user,
432 c.user,
493 c.date,
433 c.date,
494 c.files,
434 c.files,
495 c.description,
435 c.description,
496 c.extra,
436 c.extra,
497 )
437 )
498 def manifestnode(self):
438 def manifestnode(self):
499 return self._changeset.manifest
439 return self._changeset.manifest
500
440
501 def user(self):
441 def user(self):
502 return self._changeset.user
442 return self._changeset.user
503 def date(self):
443 def date(self):
504 return self._changeset.date
444 return self._changeset.date
505 def files(self):
445 def files(self):
506 return self._changeset.files
446 return self._changeset.files
507 def description(self):
447 def description(self):
508 return self._changeset.description
448 return self._changeset.description
509 def branch(self):
449 def branch(self):
510 return encoding.tolocal(self._changeset.extra.get("branch"))
450 return encoding.tolocal(self._changeset.extra.get("branch"))
511 def closesbranch(self):
451 def closesbranch(self):
512 return 'close' in self._changeset.extra
452 return 'close' in self._changeset.extra
513 def extra(self):
453 def extra(self):
514 """Return a dict of extra information."""
454 """Return a dict of extra information."""
515 return self._changeset.extra
455 return self._changeset.extra
516 def tags(self):
456 def tags(self):
517 """Return a list of byte tag names"""
457 """Return a list of byte tag names"""
518 return self._repo.nodetags(self._node)
458 return self._repo.nodetags(self._node)
519 def bookmarks(self):
459 def bookmarks(self):
520 """Return a list of byte bookmark names."""
460 """Return a list of byte bookmark names."""
521 return self._repo.nodebookmarks(self._node)
461 return self._repo.nodebookmarks(self._node)
522 def phase(self):
462 def phase(self):
523 return self._repo._phasecache.phase(self._repo, self._rev)
463 return self._repo._phasecache.phase(self._repo, self._rev)
524 def hidden(self):
464 def hidden(self):
525 return self._rev in repoview.filterrevs(self._repo, 'visible')
465 return self._rev in repoview.filterrevs(self._repo, 'visible')
526
466
527 def isinmemory(self):
467 def isinmemory(self):
528 return False
468 return False
529
469
530 def children(self):
470 def children(self):
531 """return list of changectx contexts for each child changeset.
471 """return list of changectx contexts for each child changeset.
532
472
533 This returns only the immediate child changesets. Use descendants() to
473 This returns only the immediate child changesets. Use descendants() to
534 recursively walk children.
474 recursively walk children.
535 """
475 """
536 c = self._repo.changelog.children(self._node)
476 c = self._repo.changelog.children(self._node)
537 return [self._repo[x] for x in c]
477 return [self._repo[x] for x in c]
538
478
539 def ancestors(self):
479 def ancestors(self):
540 for a in self._repo.changelog.ancestors([self._rev]):
480 for a in self._repo.changelog.ancestors([self._rev]):
541 yield self._repo[a]
481 yield self._repo[a]
542
482
543 def descendants(self):
483 def descendants(self):
544 """Recursively yield all children of the changeset.
484 """Recursively yield all children of the changeset.
545
485
546 For just the immediate children, use children()
486 For just the immediate children, use children()
547 """
487 """
548 for d in self._repo.changelog.descendants([self._rev]):
488 for d in self._repo.changelog.descendants([self._rev]):
549 yield self._repo[d]
489 yield self._repo[d]
550
490
551 def filectx(self, path, fileid=None, filelog=None):
491 def filectx(self, path, fileid=None, filelog=None):
552 """get a file context from this changeset"""
492 """get a file context from this changeset"""
553 if fileid is None:
493 if fileid is None:
554 fileid = self.filenode(path)
494 fileid = self.filenode(path)
555 return filectx(self._repo, path, fileid=fileid,
495 return filectx(self._repo, path, fileid=fileid,
556 changectx=self, filelog=filelog)
496 changectx=self, filelog=filelog)
557
497
558 def ancestor(self, c2, warn=False):
498 def ancestor(self, c2, warn=False):
559 """return the "best" ancestor context of self and c2
499 """return the "best" ancestor context of self and c2
560
500
561 If there are multiple candidates, it will show a message and check
501 If there are multiple candidates, it will show a message and check
562 merge.preferancestor configuration before falling back to the
502 merge.preferancestor configuration before falling back to the
563 revlog ancestor."""
503 revlog ancestor."""
564 # deal with workingctxs
504 # deal with workingctxs
565 n2 = c2._node
505 n2 = c2._node
566 if n2 is None:
506 if n2 is None:
567 n2 = c2._parents[0]._node
507 n2 = c2._parents[0]._node
568 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
508 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
569 if not cahs:
509 if not cahs:
570 anc = nullid
510 anc = nullid
571 elif len(cahs) == 1:
511 elif len(cahs) == 1:
572 anc = cahs[0]
512 anc = cahs[0]
573 else:
513 else:
574 # experimental config: merge.preferancestor
514 # experimental config: merge.preferancestor
575 for r in self._repo.ui.configlist('merge', 'preferancestor'):
515 for r in self._repo.ui.configlist('merge', 'preferancestor'):
576 try:
516 try:
577 ctx = scmutil.revsymbol(self._repo, r)
517 ctx = scmutil.revsymbol(self._repo, r)
578 except error.RepoLookupError:
518 except error.RepoLookupError:
579 continue
519 continue
580 anc = ctx.node()
520 anc = ctx.node()
581 if anc in cahs:
521 if anc in cahs:
582 break
522 break
583 else:
523 else:
584 anc = self._repo.changelog.ancestor(self._node, n2)
524 anc = self._repo.changelog.ancestor(self._node, n2)
585 if warn:
525 if warn:
586 self._repo.ui.status(
526 self._repo.ui.status(
587 (_("note: using %s as ancestor of %s and %s\n") %
527 (_("note: using %s as ancestor of %s and %s\n") %
588 (short(anc), short(self._node), short(n2))) +
528 (short(anc), short(self._node), short(n2))) +
589 ''.join(_(" alternatively, use --config "
529 ''.join(_(" alternatively, use --config "
590 "merge.preferancestor=%s\n") %
530 "merge.preferancestor=%s\n") %
591 short(n) for n in sorted(cahs) if n != anc))
531 short(n) for n in sorted(cahs) if n != anc))
592 return self._repo[anc]
532 return self._repo[anc]
593
533
594 def isancestorof(self, other):
534 def isancestorof(self, other):
595 """True if this changeset is an ancestor of other"""
535 """True if this changeset is an ancestor of other"""
596 return self._repo.changelog.isancestorrev(self._rev, other._rev)
536 return self._repo.changelog.isancestorrev(self._rev, other._rev)
597
537
598 def walk(self, match):
538 def walk(self, match):
599 '''Generates matching file names.'''
539 '''Generates matching file names.'''
600
540
601 # Wrap match.bad method to have message with nodeid
541 # Wrap match.bad method to have message with nodeid
602 def bad(fn, msg):
542 def bad(fn, msg):
603 # The manifest doesn't know about subrepos, so don't complain about
543 # The manifest doesn't know about subrepos, so don't complain about
604 # paths into valid subrepos.
544 # paths into valid subrepos.
605 if any(fn == s or fn.startswith(s + '/')
545 if any(fn == s or fn.startswith(s + '/')
606 for s in self.substate):
546 for s in self.substate):
607 return
547 return
608 match.bad(fn, _('no such file in rev %s') % self)
548 match.bad(fn, _('no such file in rev %s') % self)
609
549
610 m = matchmod.badmatch(match, bad)
550 m = matchmod.badmatch(match, bad)
611 return self._manifest.walk(m)
551 return self._manifest.walk(m)
612
552
613 def matches(self, match):
553 def matches(self, match):
614 return self.walk(match)
554 return self.walk(match)
615
555
616 class basefilectx(object):
556 class basefilectx(object):
617 """A filecontext object represents the common logic for its children:
557 """A filecontext object represents the common logic for its children:
618 filectx: read-only access to a filerevision that is already present
558 filectx: read-only access to a filerevision that is already present
619 in the repo,
559 in the repo,
620 workingfilectx: a filecontext that represents files from the working
560 workingfilectx: a filecontext that represents files from the working
621 directory,
561 directory,
622 memfilectx: a filecontext that represents files in-memory,
562 memfilectx: a filecontext that represents files in-memory,
623 """
563 """
624 @propertycache
564 @propertycache
625 def _filelog(self):
565 def _filelog(self):
626 return self._repo.file(self._path)
566 return self._repo.file(self._path)
627
567
628 @propertycache
568 @propertycache
629 def _changeid(self):
569 def _changeid(self):
630 if r'_changeid' in self.__dict__:
570 if r'_changeid' in self.__dict__:
631 return self._changeid
571 return self._changeid
632 elif r'_changectx' in self.__dict__:
572 elif r'_changectx' in self.__dict__:
633 return self._changectx.rev()
573 return self._changectx.rev()
634 elif r'_descendantrev' in self.__dict__:
574 elif r'_descendantrev' in self.__dict__:
635 # this file context was created from a revision with a known
575 # this file context was created from a revision with a known
636 # descendant, we can (lazily) correct for linkrev aliases
576 # descendant, we can (lazily) correct for linkrev aliases
637 return self._adjustlinkrev(self._descendantrev)
577 return self._adjustlinkrev(self._descendantrev)
638 else:
578 else:
639 return self._filelog.linkrev(self._filerev)
579 return self._filelog.linkrev(self._filerev)
640
580
641 @propertycache
581 @propertycache
642 def _filenode(self):
582 def _filenode(self):
643 if r'_fileid' in self.__dict__:
583 if r'_fileid' in self.__dict__:
644 return self._filelog.lookup(self._fileid)
584 return self._filelog.lookup(self._fileid)
645 else:
585 else:
646 return self._changectx.filenode(self._path)
586 return self._changectx.filenode(self._path)
647
587
648 @propertycache
588 @propertycache
649 def _filerev(self):
589 def _filerev(self):
650 return self._filelog.rev(self._filenode)
590 return self._filelog.rev(self._filenode)
651
591
652 @propertycache
592 @propertycache
653 def _repopath(self):
593 def _repopath(self):
654 return self._path
594 return self._path
655
595
656 def __nonzero__(self):
596 def __nonzero__(self):
657 try:
597 try:
658 self._filenode
598 self._filenode
659 return True
599 return True
660 except error.LookupError:
600 except error.LookupError:
661 # file is missing
601 # file is missing
662 return False
602 return False
663
603
664 __bool__ = __nonzero__
604 __bool__ = __nonzero__
665
605
666 def __bytes__(self):
606 def __bytes__(self):
667 try:
607 try:
668 return "%s@%s" % (self.path(), self._changectx)
608 return "%s@%s" % (self.path(), self._changectx)
669 except error.LookupError:
609 except error.LookupError:
670 return "%s@???" % self.path()
610 return "%s@???" % self.path()
671
611
672 __str__ = encoding.strmethod(__bytes__)
612 __str__ = encoding.strmethod(__bytes__)
673
613
674 def __repr__(self):
614 def __repr__(self):
675 return r"<%s %s>" % (type(self).__name__, str(self))
615 return r"<%s %s>" % (type(self).__name__, str(self))
676
616
677 def __hash__(self):
617 def __hash__(self):
678 try:
618 try:
679 return hash((self._path, self._filenode))
619 return hash((self._path, self._filenode))
680 except AttributeError:
620 except AttributeError:
681 return id(self)
621 return id(self)
682
622
683 def __eq__(self, other):
623 def __eq__(self, other):
684 try:
624 try:
685 return (type(self) == type(other) and self._path == other._path
625 return (type(self) == type(other) and self._path == other._path
686 and self._filenode == other._filenode)
626 and self._filenode == other._filenode)
687 except AttributeError:
627 except AttributeError:
688 return False
628 return False
689
629
690 def __ne__(self, other):
630 def __ne__(self, other):
691 return not (self == other)
631 return not (self == other)
692
632
693 def filerev(self):
633 def filerev(self):
694 return self._filerev
634 return self._filerev
695 def filenode(self):
635 def filenode(self):
696 return self._filenode
636 return self._filenode
697 @propertycache
637 @propertycache
698 def _flags(self):
638 def _flags(self):
699 return self._changectx.flags(self._path)
639 return self._changectx.flags(self._path)
700 def flags(self):
640 def flags(self):
701 return self._flags
641 return self._flags
702 def filelog(self):
642 def filelog(self):
703 return self._filelog
643 return self._filelog
704 def rev(self):
644 def rev(self):
705 return self._changeid
645 return self._changeid
706 def linkrev(self):
646 def linkrev(self):
707 return self._filelog.linkrev(self._filerev)
647 return self._filelog.linkrev(self._filerev)
708 def node(self):
648 def node(self):
709 return self._changectx.node()
649 return self._changectx.node()
710 def hex(self):
650 def hex(self):
711 return self._changectx.hex()
651 return self._changectx.hex()
712 def user(self):
652 def user(self):
713 return self._changectx.user()
653 return self._changectx.user()
714 def date(self):
654 def date(self):
715 return self._changectx.date()
655 return self._changectx.date()
716 def files(self):
656 def files(self):
717 return self._changectx.files()
657 return self._changectx.files()
718 def description(self):
658 def description(self):
719 return self._changectx.description()
659 return self._changectx.description()
720 def branch(self):
660 def branch(self):
721 return self._changectx.branch()
661 return self._changectx.branch()
722 def extra(self):
662 def extra(self):
723 return self._changectx.extra()
663 return self._changectx.extra()
724 def phase(self):
664 def phase(self):
725 return self._changectx.phase()
665 return self._changectx.phase()
726 def phasestr(self):
666 def phasestr(self):
727 return self._changectx.phasestr()
667 return self._changectx.phasestr()
728 def obsolete(self):
668 def obsolete(self):
729 return self._changectx.obsolete()
669 return self._changectx.obsolete()
730 def instabilities(self):
670 def instabilities(self):
731 return self._changectx.instabilities()
671 return self._changectx.instabilities()
732 def manifest(self):
672 def manifest(self):
733 return self._changectx.manifest()
673 return self._changectx.manifest()
734 def changectx(self):
674 def changectx(self):
735 return self._changectx
675 return self._changectx
736 def renamed(self):
676 def renamed(self):
737 return self._copied
677 return self._copied
738 def repo(self):
678 def repo(self):
739 return self._repo
679 return self._repo
740 def size(self):
680 def size(self):
741 return len(self.data())
681 return len(self.data())
742
682
743 def path(self):
683 def path(self):
744 return self._path
684 return self._path
745
685
746 def isbinary(self):
686 def isbinary(self):
747 try:
687 try:
748 return stringutil.binary(self.data())
688 return stringutil.binary(self.data())
749 except IOError:
689 except IOError:
750 return False
690 return False
751 def isexec(self):
691 def isexec(self):
752 return 'x' in self.flags()
692 return 'x' in self.flags()
753 def islink(self):
693 def islink(self):
754 return 'l' in self.flags()
694 return 'l' in self.flags()
755
695
756 def isabsent(self):
696 def isabsent(self):
757 """whether this filectx represents a file not in self._changectx
697 """whether this filectx represents a file not in self._changectx
758
698
759 This is mainly for merge code to detect change/delete conflicts. This is
699 This is mainly for merge code to detect change/delete conflicts. This is
760 expected to be True for all subclasses of basectx."""
700 expected to be True for all subclasses of basectx."""
761 return False
701 return False
762
702
763 _customcmp = False
703 _customcmp = False
764 def cmp(self, fctx):
704 def cmp(self, fctx):
765 """compare with other file context
705 """compare with other file context
766
706
767 returns True if different than fctx.
707 returns True if different than fctx.
768 """
708 """
769 if fctx._customcmp:
709 if fctx._customcmp:
770 return fctx.cmp(self)
710 return fctx.cmp(self)
771
711
772 if (fctx._filenode is None
712 if (fctx._filenode is None
773 and (self._repo._encodefilterpats
713 and (self._repo._encodefilterpats
774 # if file data starts with '\1\n', empty metadata block is
714 # if file data starts with '\1\n', empty metadata block is
775 # prepended, which adds 4 bytes to filelog.size().
715 # prepended, which adds 4 bytes to filelog.size().
776 or self.size() - 4 == fctx.size())
716 or self.size() - 4 == fctx.size())
777 or self.size() == fctx.size()):
717 or self.size() == fctx.size()):
778 return self._filelog.cmp(self._filenode, fctx.data())
718 return self._filelog.cmp(self._filenode, fctx.data())
779
719
780 return True
720 return True
781
721
782 def _adjustlinkrev(self, srcrev, inclusive=False):
722 def _adjustlinkrev(self, srcrev, inclusive=False):
783 """return the first ancestor of <srcrev> introducing <fnode>
723 """return the first ancestor of <srcrev> introducing <fnode>
784
724
785 If the linkrev of the file revision does not point to an ancestor of
725 If the linkrev of the file revision does not point to an ancestor of
786 srcrev, we'll walk down the ancestors until we find one introducing
726 srcrev, we'll walk down the ancestors until we find one introducing
787 this file revision.
727 this file revision.
788
728
789 :srcrev: the changeset revision we search ancestors from
729 :srcrev: the changeset revision we search ancestors from
790 :inclusive: if true, the src revision will also be checked
730 :inclusive: if true, the src revision will also be checked
791 """
731 """
792 repo = self._repo
732 repo = self._repo
793 cl = repo.unfiltered().changelog
733 cl = repo.unfiltered().changelog
794 mfl = repo.manifestlog
734 mfl = repo.manifestlog
795 # fetch the linkrev
735 # fetch the linkrev
796 lkr = self.linkrev()
736 lkr = self.linkrev()
797 # hack to reuse ancestor computation when searching for renames
737 # hack to reuse ancestor computation when searching for renames
798 memberanc = getattr(self, '_ancestrycontext', None)
738 memberanc = getattr(self, '_ancestrycontext', None)
799 iteranc = None
739 iteranc = None
800 if srcrev is None:
740 if srcrev is None:
801 # wctx case, used by workingfilectx during mergecopy
741 # wctx case, used by workingfilectx during mergecopy
802 revs = [p.rev() for p in self._repo[None].parents()]
742 revs = [p.rev() for p in self._repo[None].parents()]
803 inclusive = True # we skipped the real (revless) source
743 inclusive = True # we skipped the real (revless) source
804 else:
744 else:
805 revs = [srcrev]
745 revs = [srcrev]
806 if memberanc is None:
746 if memberanc is None:
807 memberanc = iteranc = cl.ancestors(revs, lkr,
747 memberanc = iteranc = cl.ancestors(revs, lkr,
808 inclusive=inclusive)
748 inclusive=inclusive)
809 # check if this linkrev is an ancestor of srcrev
749 # check if this linkrev is an ancestor of srcrev
810 if lkr not in memberanc:
750 if lkr not in memberanc:
811 if iteranc is None:
751 if iteranc is None:
812 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
752 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
813 fnode = self._filenode
753 fnode = self._filenode
814 path = self._path
754 path = self._path
815 for a in iteranc:
755 for a in iteranc:
816 ac = cl.read(a) # get changeset data (we avoid object creation)
756 ac = cl.read(a) # get changeset data (we avoid object creation)
817 if path in ac[3]: # checking the 'files' field.
757 if path in ac[3]: # checking the 'files' field.
818 # The file has been touched, check if the content is
758 # The file has been touched, check if the content is
819 # similar to the one we search for.
759 # similar to the one we search for.
820 if fnode == mfl[ac[0]].readfast().get(path):
760 if fnode == mfl[ac[0]].readfast().get(path):
821 return a
761 return a
822 # In theory, we should never get out of that loop without a result.
762 # In theory, we should never get out of that loop without a result.
823 # But if manifest uses a buggy file revision (not children of the
763 # But if manifest uses a buggy file revision (not children of the
824 # one it replaces) we could. Such a buggy situation will likely
764 # one it replaces) we could. Such a buggy situation will likely
825 # result is crash somewhere else at to some point.
765 # result is crash somewhere else at to some point.
826 return lkr
766 return lkr
827
767
828 def introrev(self):
768 def introrev(self):
829 """return the rev of the changeset which introduced this file revision
769 """return the rev of the changeset which introduced this file revision
830
770
831 This method is different from linkrev because it take into account the
771 This method is different from linkrev because it take into account the
832 changeset the filectx was created from. It ensures the returned
772 changeset the filectx was created from. It ensures the returned
833 revision is one of its ancestors. This prevents bugs from
773 revision is one of its ancestors. This prevents bugs from
834 'linkrev-shadowing' when a file revision is used by multiple
774 'linkrev-shadowing' when a file revision is used by multiple
835 changesets.
775 changesets.
836 """
776 """
837 lkr = self.linkrev()
777 lkr = self.linkrev()
838 attrs = vars(self)
778 attrs = vars(self)
839 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
779 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
840 if noctx or self.rev() == lkr:
780 if noctx or self.rev() == lkr:
841 return self.linkrev()
781 return self.linkrev()
842 return self._adjustlinkrev(self.rev(), inclusive=True)
782 return self._adjustlinkrev(self.rev(), inclusive=True)
843
783
844 def introfilectx(self):
784 def introfilectx(self):
845 """Return filectx having identical contents, but pointing to the
785 """Return filectx having identical contents, but pointing to the
846 changeset revision where this filectx was introduced"""
786 changeset revision where this filectx was introduced"""
847 introrev = self.introrev()
787 introrev = self.introrev()
848 if self.rev() == introrev:
788 if self.rev() == introrev:
849 return self
789 return self
850 return self.filectx(self.filenode(), changeid=introrev)
790 return self.filectx(self.filenode(), changeid=introrev)
851
791
852 def _parentfilectx(self, path, fileid, filelog):
792 def _parentfilectx(self, path, fileid, filelog):
853 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
793 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
854 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
794 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
855 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
795 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
856 # If self is associated with a changeset (probably explicitly
796 # If self is associated with a changeset (probably explicitly
857 # fed), ensure the created filectx is associated with a
797 # fed), ensure the created filectx is associated with a
858 # changeset that is an ancestor of self.changectx.
798 # changeset that is an ancestor of self.changectx.
859 # This lets us later use _adjustlinkrev to get a correct link.
799 # This lets us later use _adjustlinkrev to get a correct link.
860 fctx._descendantrev = self.rev()
800 fctx._descendantrev = self.rev()
861 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
801 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
862 elif r'_descendantrev' in vars(self):
802 elif r'_descendantrev' in vars(self):
863 # Otherwise propagate _descendantrev if we have one associated.
803 # Otherwise propagate _descendantrev if we have one associated.
864 fctx._descendantrev = self._descendantrev
804 fctx._descendantrev = self._descendantrev
865 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
805 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
866 return fctx
806 return fctx
867
807
868 def parents(self):
808 def parents(self):
869 _path = self._path
809 _path = self._path
870 fl = self._filelog
810 fl = self._filelog
871 parents = self._filelog.parents(self._filenode)
811 parents = self._filelog.parents(self._filenode)
872 pl = [(_path, node, fl) for node in parents if node != nullid]
812 pl = [(_path, node, fl) for node in parents if node != nullid]
873
813
874 r = fl.renamed(self._filenode)
814 r = fl.renamed(self._filenode)
875 if r:
815 if r:
876 # - In the simple rename case, both parent are nullid, pl is empty.
816 # - In the simple rename case, both parent are nullid, pl is empty.
877 # - In case of merge, only one of the parent is null id and should
817 # - In case of merge, only one of the parent is null id and should
878 # be replaced with the rename information. This parent is -always-
818 # be replaced with the rename information. This parent is -always-
879 # the first one.
819 # the first one.
880 #
820 #
881 # As null id have always been filtered out in the previous list
821 # As null id have always been filtered out in the previous list
882 # comprehension, inserting to 0 will always result in "replacing
822 # comprehension, inserting to 0 will always result in "replacing
883 # first nullid parent with rename information.
823 # first nullid parent with rename information.
884 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
824 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
885
825
886 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
826 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
887
827
888 def p1(self):
828 def p1(self):
889 return self.parents()[0]
829 return self.parents()[0]
890
830
891 def p2(self):
831 def p2(self):
892 p = self.parents()
832 p = self.parents()
893 if len(p) == 2:
833 if len(p) == 2:
894 return p[1]
834 return p[1]
895 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
835 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
896
836
897 def annotate(self, follow=False, skiprevs=None, diffopts=None):
837 def annotate(self, follow=False, skiprevs=None, diffopts=None):
898 """Returns a list of annotateline objects for each line in the file
838 """Returns a list of annotateline objects for each line in the file
899
839
900 - line.fctx is the filectx of the node where that line was last changed
840 - line.fctx is the filectx of the node where that line was last changed
901 - line.lineno is the line number at the first appearance in the managed
841 - line.lineno is the line number at the first appearance in the managed
902 file
842 file
903 - line.text is the data on that line (including newline character)
843 - line.text is the data on that line (including newline character)
904 """
844 """
905 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
845 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
906
846
907 def parents(f):
847 def parents(f):
908 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
848 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
909 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
849 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
910 # from the topmost introrev (= srcrev) down to p.linkrev() if it
850 # from the topmost introrev (= srcrev) down to p.linkrev() if it
911 # isn't an ancestor of the srcrev.
851 # isn't an ancestor of the srcrev.
912 f._changeid
852 f._changeid
913 pl = f.parents()
853 pl = f.parents()
914
854
915 # Don't return renamed parents if we aren't following.
855 # Don't return renamed parents if we aren't following.
916 if not follow:
856 if not follow:
917 pl = [p for p in pl if p.path() == f.path()]
857 pl = [p for p in pl if p.path() == f.path()]
918
858
919 # renamed filectx won't have a filelog yet, so set it
859 # renamed filectx won't have a filelog yet, so set it
920 # from the cache to save time
860 # from the cache to save time
921 for p in pl:
861 for p in pl:
922 if not r'_filelog' in p.__dict__:
862 if not r'_filelog' in p.__dict__:
923 p._filelog = getlog(p.path())
863 p._filelog = getlog(p.path())
924
864
925 return pl
865 return pl
926
866
927 # use linkrev to find the first changeset where self appeared
867 # use linkrev to find the first changeset where self appeared
928 base = self.introfilectx()
868 base = self.introfilectx()
929 if getattr(base, '_ancestrycontext', None) is None:
869 if getattr(base, '_ancestrycontext', None) is None:
930 cl = self._repo.changelog
870 cl = self._repo.changelog
931 if base.rev() is None:
871 if base.rev() is None:
932 # wctx is not inclusive, but works because _ancestrycontext
872 # wctx is not inclusive, but works because _ancestrycontext
933 # is used to test filelog revisions
873 # is used to test filelog revisions
934 ac = cl.ancestors([p.rev() for p in base.parents()],
874 ac = cl.ancestors([p.rev() for p in base.parents()],
935 inclusive=True)
875 inclusive=True)
936 else:
876 else:
937 ac = cl.ancestors([base.rev()], inclusive=True)
877 ac = cl.ancestors([base.rev()], inclusive=True)
938 base._ancestrycontext = ac
878 base._ancestrycontext = ac
939
879
940 return dagop.annotate(base, parents, skiprevs=skiprevs,
880 return dagop.annotate(base, parents, skiprevs=skiprevs,
941 diffopts=diffopts)
881 diffopts=diffopts)
942
882
943 def ancestors(self, followfirst=False):
883 def ancestors(self, followfirst=False):
944 visit = {}
884 visit = {}
945 c = self
885 c = self
946 if followfirst:
886 if followfirst:
947 cut = 1
887 cut = 1
948 else:
888 else:
949 cut = None
889 cut = None
950
890
951 while True:
891 while True:
952 for parent in c.parents()[:cut]:
892 for parent in c.parents()[:cut]:
953 visit[(parent.linkrev(), parent.filenode())] = parent
893 visit[(parent.linkrev(), parent.filenode())] = parent
954 if not visit:
894 if not visit:
955 break
895 break
956 c = visit.pop(max(visit))
896 c = visit.pop(max(visit))
957 yield c
897 yield c
958
898
959 def decodeddata(self):
899 def decodeddata(self):
960 """Returns `data()` after running repository decoding filters.
900 """Returns `data()` after running repository decoding filters.
961
901
962 This is often equivalent to how the data would be expressed on disk.
902 This is often equivalent to how the data would be expressed on disk.
963 """
903 """
964 return self._repo.wwritedata(self.path(), self.data())
904 return self._repo.wwritedata(self.path(), self.data())
965
905
966 class filectx(basefilectx):
906 class filectx(basefilectx):
967 """A filecontext object makes access to data related to a particular
907 """A filecontext object makes access to data related to a particular
968 filerevision convenient."""
908 filerevision convenient."""
969 def __init__(self, repo, path, changeid=None, fileid=None,
909 def __init__(self, repo, path, changeid=None, fileid=None,
970 filelog=None, changectx=None):
910 filelog=None, changectx=None):
971 """changeid can be a changeset revision, node, or tag.
911 """changeid can be a changeset revision, node, or tag.
972 fileid can be a file revision or node."""
912 fileid can be a file revision or node."""
973 self._repo = repo
913 self._repo = repo
974 self._path = path
914 self._path = path
975
915
976 assert (changeid is not None
916 assert (changeid is not None
977 or fileid is not None
917 or fileid is not None
978 or changectx is not None), \
918 or changectx is not None), \
979 ("bad args: changeid=%r, fileid=%r, changectx=%r"
919 ("bad args: changeid=%r, fileid=%r, changectx=%r"
980 % (changeid, fileid, changectx))
920 % (changeid, fileid, changectx))
981
921
982 if filelog is not None:
922 if filelog is not None:
983 self._filelog = filelog
923 self._filelog = filelog
984
924
985 if changeid is not None:
925 if changeid is not None:
986 self._changeid = changeid
926 self._changeid = changeid
987 if changectx is not None:
927 if changectx is not None:
988 self._changectx = changectx
928 self._changectx = changectx
989 if fileid is not None:
929 if fileid is not None:
990 self._fileid = fileid
930 self._fileid = fileid
991
931
992 @propertycache
932 @propertycache
993 def _changectx(self):
933 def _changectx(self):
994 try:
934 try:
995 return self._repo[self._changeid]
935 return self._repo[self._changeid]
996 except error.FilteredRepoLookupError:
936 except error.FilteredRepoLookupError:
997 # Linkrev may point to any revision in the repository. When the
937 # Linkrev may point to any revision in the repository. When the
998 # repository is filtered this may lead to `filectx` trying to build
938 # repository is filtered this may lead to `filectx` trying to build
999 # `changectx` for filtered revision. In such case we fallback to
939 # `changectx` for filtered revision. In such case we fallback to
1000 # creating `changectx` on the unfiltered version of the reposition.
940 # creating `changectx` on the unfiltered version of the reposition.
1001 # This fallback should not be an issue because `changectx` from
941 # This fallback should not be an issue because `changectx` from
1002 # `filectx` are not used in complex operations that care about
942 # `filectx` are not used in complex operations that care about
1003 # filtering.
943 # filtering.
1004 #
944 #
1005 # This fallback is a cheap and dirty fix that prevent several
945 # This fallback is a cheap and dirty fix that prevent several
1006 # crashes. It does not ensure the behavior is correct. However the
946 # crashes. It does not ensure the behavior is correct. However the
1007 # behavior was not correct before filtering either and "incorrect
947 # behavior was not correct before filtering either and "incorrect
1008 # behavior" is seen as better as "crash"
948 # behavior" is seen as better as "crash"
1009 #
949 #
1010 # Linkrevs have several serious troubles with filtering that are
950 # Linkrevs have several serious troubles with filtering that are
1011 # complicated to solve. Proper handling of the issue here should be
951 # complicated to solve. Proper handling of the issue here should be
1012 # considered when solving linkrev issue are on the table.
952 # considered when solving linkrev issue are on the table.
1013 return self._repo.unfiltered()[self._changeid]
953 return self._repo.unfiltered()[self._changeid]
1014
954
1015 def filectx(self, fileid, changeid=None):
955 def filectx(self, fileid, changeid=None):
1016 '''opens an arbitrary revision of the file without
956 '''opens an arbitrary revision of the file without
1017 opening a new filelog'''
957 opening a new filelog'''
1018 return filectx(self._repo, self._path, fileid=fileid,
958 return filectx(self._repo, self._path, fileid=fileid,
1019 filelog=self._filelog, changeid=changeid)
959 filelog=self._filelog, changeid=changeid)
1020
960
1021 def rawdata(self):
961 def rawdata(self):
1022 return self._filelog.revision(self._filenode, raw=True)
962 return self._filelog.revision(self._filenode, raw=True)
1023
963
1024 def rawflags(self):
964 def rawflags(self):
1025 """low-level revlog flags"""
965 """low-level revlog flags"""
1026 return self._filelog.flags(self._filerev)
966 return self._filelog.flags(self._filerev)
1027
967
1028 def data(self):
968 def data(self):
1029 try:
969 try:
1030 return self._filelog.read(self._filenode)
970 return self._filelog.read(self._filenode)
1031 except error.CensoredNodeError:
971 except error.CensoredNodeError:
1032 if self._repo.ui.config("censor", "policy") == "ignore":
972 if self._repo.ui.config("censor", "policy") == "ignore":
1033 return ""
973 return ""
1034 raise error.Abort(_("censored node: %s") % short(self._filenode),
974 raise error.Abort(_("censored node: %s") % short(self._filenode),
1035 hint=_("set censor.policy to ignore errors"))
975 hint=_("set censor.policy to ignore errors"))
1036
976
1037 def size(self):
977 def size(self):
1038 return self._filelog.size(self._filerev)
978 return self._filelog.size(self._filerev)
1039
979
1040 @propertycache
980 @propertycache
1041 def _copied(self):
981 def _copied(self):
1042 """check if file was actually renamed in this changeset revision
982 """check if file was actually renamed in this changeset revision
1043
983
1044 If rename logged in file revision, we report copy for changeset only
984 If rename logged in file revision, we report copy for changeset only
1045 if file revisions linkrev points back to the changeset in question
985 if file revisions linkrev points back to the changeset in question
1046 or both changeset parents contain different file revisions.
986 or both changeset parents contain different file revisions.
1047 """
987 """
1048
988
1049 renamed = self._filelog.renamed(self._filenode)
989 renamed = self._filelog.renamed(self._filenode)
1050 if not renamed:
990 if not renamed:
1051 return None
991 return None
1052
992
1053 if self.rev() == self.linkrev():
993 if self.rev() == self.linkrev():
1054 return renamed
994 return renamed
1055
995
1056 name = self.path()
996 name = self.path()
1057 fnode = self._filenode
997 fnode = self._filenode
1058 for p in self._changectx.parents():
998 for p in self._changectx.parents():
1059 try:
999 try:
1060 if fnode == p.filenode(name):
1000 if fnode == p.filenode(name):
1061 return None
1001 return None
1062 except error.LookupError:
1002 except error.LookupError:
1063 pass
1003 pass
1064 return renamed
1004 return renamed
1065
1005
1066 def children(self):
1006 def children(self):
1067 # hard for renames
1007 # hard for renames
1068 c = self._filelog.children(self._filenode)
1008 c = self._filelog.children(self._filenode)
1069 return [filectx(self._repo, self._path, fileid=x,
1009 return [filectx(self._repo, self._path, fileid=x,
1070 filelog=self._filelog) for x in c]
1010 filelog=self._filelog) for x in c]
1071
1011
1072 class committablectx(basectx):
1012 class committablectx(basectx):
1073 """A committablectx object provides common functionality for a context that
1013 """A committablectx object provides common functionality for a context that
1074 wants the ability to commit, e.g. workingctx or memctx."""
1014 wants the ability to commit, e.g. workingctx or memctx."""
1075 def __init__(self, repo, text="", user=None, date=None, extra=None,
1015 def __init__(self, repo, text="", user=None, date=None, extra=None,
1076 changes=None):
1016 changes=None):
1077 super(committablectx, self).__init__(repo)
1017 super(committablectx, self).__init__(repo)
1078 self._rev = None
1018 self._rev = None
1079 self._node = None
1019 self._node = None
1080 self._text = text
1020 self._text = text
1081 if date:
1021 if date:
1082 self._date = dateutil.parsedate(date)
1022 self._date = dateutil.parsedate(date)
1083 if user:
1023 if user:
1084 self._user = user
1024 self._user = user
1085 if changes:
1025 if changes:
1086 self._status = changes
1026 self._status = changes
1087
1027
1088 self._extra = {}
1028 self._extra = {}
1089 if extra:
1029 if extra:
1090 self._extra = extra.copy()
1030 self._extra = extra.copy()
1091 if 'branch' not in self._extra:
1031 if 'branch' not in self._extra:
1092 try:
1032 try:
1093 branch = encoding.fromlocal(self._repo.dirstate.branch())
1033 branch = encoding.fromlocal(self._repo.dirstate.branch())
1094 except UnicodeDecodeError:
1034 except UnicodeDecodeError:
1095 raise error.Abort(_('branch name not in UTF-8!'))
1035 raise error.Abort(_('branch name not in UTF-8!'))
1096 self._extra['branch'] = branch
1036 self._extra['branch'] = branch
1097 if self._extra['branch'] == '':
1037 if self._extra['branch'] == '':
1098 self._extra['branch'] = 'default'
1038 self._extra['branch'] = 'default'
1099
1039
1100 def __bytes__(self):
1040 def __bytes__(self):
1101 return bytes(self._parents[0]) + "+"
1041 return bytes(self._parents[0]) + "+"
1102
1042
1103 __str__ = encoding.strmethod(__bytes__)
1043 __str__ = encoding.strmethod(__bytes__)
1104
1044
1105 def __nonzero__(self):
1045 def __nonzero__(self):
1106 return True
1046 return True
1107
1047
1108 __bool__ = __nonzero__
1048 __bool__ = __nonzero__
1109
1049
1110 def _buildflagfunc(self):
1050 def _buildflagfunc(self):
1111 # Create a fallback function for getting file flags when the
1051 # Create a fallback function for getting file flags when the
1112 # filesystem doesn't support them
1052 # filesystem doesn't support them
1113
1053
1114 copiesget = self._repo.dirstate.copies().get
1054 copiesget = self._repo.dirstate.copies().get
1115 parents = self.parents()
1055 parents = self.parents()
1116 if len(parents) < 2:
1056 if len(parents) < 2:
1117 # when we have one parent, it's easy: copy from parent
1057 # when we have one parent, it's easy: copy from parent
1118 man = parents[0].manifest()
1058 man = parents[0].manifest()
1119 def func(f):
1059 def func(f):
1120 f = copiesget(f, f)
1060 f = copiesget(f, f)
1121 return man.flags(f)
1061 return man.flags(f)
1122 else:
1062 else:
1123 # merges are tricky: we try to reconstruct the unstored
1063 # merges are tricky: we try to reconstruct the unstored
1124 # result from the merge (issue1802)
1064 # result from the merge (issue1802)
1125 p1, p2 = parents
1065 p1, p2 = parents
1126 pa = p1.ancestor(p2)
1066 pa = p1.ancestor(p2)
1127 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1067 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1128
1068
1129 def func(f):
1069 def func(f):
1130 f = copiesget(f, f) # may be wrong for merges with copies
1070 f = copiesget(f, f) # may be wrong for merges with copies
1131 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1071 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1132 if fl1 == fl2:
1072 if fl1 == fl2:
1133 return fl1
1073 return fl1
1134 if fl1 == fla:
1074 if fl1 == fla:
1135 return fl2
1075 return fl2
1136 if fl2 == fla:
1076 if fl2 == fla:
1137 return fl1
1077 return fl1
1138 return '' # punt for conflicts
1078 return '' # punt for conflicts
1139
1079
1140 return func
1080 return func
1141
1081
1142 @propertycache
1082 @propertycache
1143 def _flagfunc(self):
1083 def _flagfunc(self):
1144 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1084 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1145
1085
1146 @propertycache
1086 @propertycache
1147 def _status(self):
1087 def _status(self):
1148 return self._repo.status()
1088 return self._repo.status()
1149
1089
1150 @propertycache
1090 @propertycache
1151 def _user(self):
1091 def _user(self):
1152 return self._repo.ui.username()
1092 return self._repo.ui.username()
1153
1093
1154 @propertycache
1094 @propertycache
1155 def _date(self):
1095 def _date(self):
1156 ui = self._repo.ui
1096 ui = self._repo.ui
1157 date = ui.configdate('devel', 'default-date')
1097 date = ui.configdate('devel', 'default-date')
1158 if date is None:
1098 if date is None:
1159 date = dateutil.makedate()
1099 date = dateutil.makedate()
1160 return date
1100 return date
1161
1101
1162 def subrev(self, subpath):
1102 def subrev(self, subpath):
1163 return None
1103 return None
1164
1104
1165 def manifestnode(self):
1105 def manifestnode(self):
1166 return None
1106 return None
1167 def user(self):
1107 def user(self):
1168 return self._user or self._repo.ui.username()
1108 return self._user or self._repo.ui.username()
1169 def date(self):
1109 def date(self):
1170 return self._date
1110 return self._date
1171 def description(self):
1111 def description(self):
1172 return self._text
1112 return self._text
1173 def files(self):
1113 def files(self):
1174 return sorted(self._status.modified + self._status.added +
1114 return sorted(self._status.modified + self._status.added +
1175 self._status.removed)
1115 self._status.removed)
1176
1116
1177 def modified(self):
1117 def modified(self):
1178 return self._status.modified
1118 return self._status.modified
1179 def added(self):
1119 def added(self):
1180 return self._status.added
1120 return self._status.added
1181 def removed(self):
1121 def removed(self):
1182 return self._status.removed
1122 return self._status.removed
1183 def deleted(self):
1123 def deleted(self):
1184 return self._status.deleted
1124 return self._status.deleted
1185 def branch(self):
1125 def branch(self):
1186 return encoding.tolocal(self._extra['branch'])
1126 return encoding.tolocal(self._extra['branch'])
1187 def closesbranch(self):
1127 def closesbranch(self):
1188 return 'close' in self._extra
1128 return 'close' in self._extra
1189 def extra(self):
1129 def extra(self):
1190 return self._extra
1130 return self._extra
1191
1131
1192 def isinmemory(self):
1132 def isinmemory(self):
1193 return False
1133 return False
1194
1134
1195 def tags(self):
1135 def tags(self):
1196 return []
1136 return []
1197
1137
1198 def bookmarks(self):
1138 def bookmarks(self):
1199 b = []
1139 b = []
1200 for p in self.parents():
1140 for p in self.parents():
1201 b.extend(p.bookmarks())
1141 b.extend(p.bookmarks())
1202 return b
1142 return b
1203
1143
1204 def phase(self):
1144 def phase(self):
1205 phase = phases.draft # default phase to draft
1145 phase = phases.draft # default phase to draft
1206 for p in self.parents():
1146 for p in self.parents():
1207 phase = max(phase, p.phase())
1147 phase = max(phase, p.phase())
1208 return phase
1148 return phase
1209
1149
1210 def hidden(self):
1150 def hidden(self):
1211 return False
1151 return False
1212
1152
1213 def children(self):
1153 def children(self):
1214 return []
1154 return []
1215
1155
1216 def flags(self, path):
1156 def flags(self, path):
1217 if r'_manifest' in self.__dict__:
1157 if r'_manifest' in self.__dict__:
1218 try:
1158 try:
1219 return self._manifest.flags(path)
1159 return self._manifest.flags(path)
1220 except KeyError:
1160 except KeyError:
1221 return ''
1161 return ''
1222
1162
1223 try:
1163 try:
1224 return self._flagfunc(path)
1164 return self._flagfunc(path)
1225 except OSError:
1165 except OSError:
1226 return ''
1166 return ''
1227
1167
1228 def ancestor(self, c2):
1168 def ancestor(self, c2):
1229 """return the "best" ancestor context of self and c2"""
1169 """return the "best" ancestor context of self and c2"""
1230 return self._parents[0].ancestor(c2) # punt on two parents for now
1170 return self._parents[0].ancestor(c2) # punt on two parents for now
1231
1171
1232 def walk(self, match):
1172 def walk(self, match):
1233 '''Generates matching file names.'''
1173 '''Generates matching file names.'''
1234 return sorted(self._repo.dirstate.walk(match,
1174 return sorted(self._repo.dirstate.walk(match,
1235 subrepos=sorted(self.substate),
1175 subrepos=sorted(self.substate),
1236 unknown=True, ignored=False))
1176 unknown=True, ignored=False))
1237
1177
1238 def matches(self, match):
1178 def matches(self, match):
1239 ds = self._repo.dirstate
1179 ds = self._repo.dirstate
1240 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1180 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1241
1181
1242 def ancestors(self):
1182 def ancestors(self):
1243 for p in self._parents:
1183 for p in self._parents:
1244 yield p
1184 yield p
1245 for a in self._repo.changelog.ancestors(
1185 for a in self._repo.changelog.ancestors(
1246 [p.rev() for p in self._parents]):
1186 [p.rev() for p in self._parents]):
1247 yield self._repo[a]
1187 yield self._repo[a]
1248
1188
1249 def markcommitted(self, node):
1189 def markcommitted(self, node):
1250 """Perform post-commit cleanup necessary after committing this ctx
1190 """Perform post-commit cleanup necessary after committing this ctx
1251
1191
1252 Specifically, this updates backing stores this working context
1192 Specifically, this updates backing stores this working context
1253 wraps to reflect the fact that the changes reflected by this
1193 wraps to reflect the fact that the changes reflected by this
1254 workingctx have been committed. For example, it marks
1194 workingctx have been committed. For example, it marks
1255 modified and added files as normal in the dirstate.
1195 modified and added files as normal in the dirstate.
1256
1196
1257 """
1197 """
1258
1198
1259 with self._repo.dirstate.parentchange():
1199 with self._repo.dirstate.parentchange():
1260 for f in self.modified() + self.added():
1200 for f in self.modified() + self.added():
1261 self._repo.dirstate.normal(f)
1201 self._repo.dirstate.normal(f)
1262 for f in self.removed():
1202 for f in self.removed():
1263 self._repo.dirstate.drop(f)
1203 self._repo.dirstate.drop(f)
1264 self._repo.dirstate.setparents(node)
1204 self._repo.dirstate.setparents(node)
1265
1205
1266 # write changes out explicitly, because nesting wlock at
1206 # write changes out explicitly, because nesting wlock at
1267 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1207 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1268 # from immediately doing so for subsequent changing files
1208 # from immediately doing so for subsequent changing files
1269 self._repo.dirstate.write(self._repo.currenttransaction())
1209 self._repo.dirstate.write(self._repo.currenttransaction())
1270
1210
1271 def dirty(self, missing=False, merge=True, branch=True):
1211 def dirty(self, missing=False, merge=True, branch=True):
1272 return False
1212 return False
1273
1213
1274 class workingctx(committablectx):
1214 class workingctx(committablectx):
1275 """A workingctx object makes access to data related to
1215 """A workingctx object makes access to data related to
1276 the current working directory convenient.
1216 the current working directory convenient.
1277 date - any valid date string or (unixtime, offset), or None.
1217 date - any valid date string or (unixtime, offset), or None.
1278 user - username string, or None.
1218 user - username string, or None.
1279 extra - a dictionary of extra values, or None.
1219 extra - a dictionary of extra values, or None.
1280 changes - a list of file lists as returned by localrepo.status()
1220 changes - a list of file lists as returned by localrepo.status()
1281 or None to use the repository status.
1221 or None to use the repository status.
1282 """
1222 """
1283 def __init__(self, repo, text="", user=None, date=None, extra=None,
1223 def __init__(self, repo, text="", user=None, date=None, extra=None,
1284 changes=None):
1224 changes=None):
1285 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1225 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1286
1226
1287 def __iter__(self):
1227 def __iter__(self):
1288 d = self._repo.dirstate
1228 d = self._repo.dirstate
1289 for f in d:
1229 for f in d:
1290 if d[f] != 'r':
1230 if d[f] != 'r':
1291 yield f
1231 yield f
1292
1232
1293 def __contains__(self, key):
1233 def __contains__(self, key):
1294 return self._repo.dirstate[key] not in "?r"
1234 return self._repo.dirstate[key] not in "?r"
1295
1235
1296 def hex(self):
1236 def hex(self):
1297 return hex(wdirid)
1237 return hex(wdirid)
1298
1238
1299 @propertycache
1239 @propertycache
1300 def _parents(self):
1240 def _parents(self):
1301 p = self._repo.dirstate.parents()
1241 p = self._repo.dirstate.parents()
1302 if p[1] == nullid:
1242 if p[1] == nullid:
1303 p = p[:-1]
1243 p = p[:-1]
1304 return [self._repo[x] for x in p]
1244 return [self._repo[x] for x in p]
1305
1245
1306 def _fileinfo(self, path):
1246 def _fileinfo(self, path):
1307 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1247 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1308 self._manifest
1248 self._manifest
1309 return super(workingctx, self)._fileinfo(path)
1249 return super(workingctx, self)._fileinfo(path)
1310
1250
1311 def filectx(self, path, filelog=None):
1251 def filectx(self, path, filelog=None):
1312 """get a file context from the working directory"""
1252 """get a file context from the working directory"""
1313 return workingfilectx(self._repo, path, workingctx=self,
1253 return workingfilectx(self._repo, path, workingctx=self,
1314 filelog=filelog)
1254 filelog=filelog)
1315
1255
1316 def dirty(self, missing=False, merge=True, branch=True):
1256 def dirty(self, missing=False, merge=True, branch=True):
1317 "check whether a working directory is modified"
1257 "check whether a working directory is modified"
1318 # check subrepos first
1258 # check subrepos first
1319 for s in sorted(self.substate):
1259 for s in sorted(self.substate):
1320 if self.sub(s).dirty(missing=missing):
1260 if self.sub(s).dirty(missing=missing):
1321 return True
1261 return True
1322 # check current working dir
1262 # check current working dir
1323 return ((merge and self.p2()) or
1263 return ((merge and self.p2()) or
1324 (branch and self.branch() != self.p1().branch()) or
1264 (branch and self.branch() != self.p1().branch()) or
1325 self.modified() or self.added() or self.removed() or
1265 self.modified() or self.added() or self.removed() or
1326 (missing and self.deleted()))
1266 (missing and self.deleted()))
1327
1267
1328 def add(self, list, prefix=""):
1268 def add(self, list, prefix=""):
1329 with self._repo.wlock():
1269 with self._repo.wlock():
1330 ui, ds = self._repo.ui, self._repo.dirstate
1270 ui, ds = self._repo.ui, self._repo.dirstate
1331 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1271 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1332 rejected = []
1272 rejected = []
1333 lstat = self._repo.wvfs.lstat
1273 lstat = self._repo.wvfs.lstat
1334 for f in list:
1274 for f in list:
1335 # ds.pathto() returns an absolute file when this is invoked from
1275 # ds.pathto() returns an absolute file when this is invoked from
1336 # the keyword extension. That gets flagged as non-portable on
1276 # the keyword extension. That gets flagged as non-portable on
1337 # Windows, since it contains the drive letter and colon.
1277 # Windows, since it contains the drive letter and colon.
1338 scmutil.checkportable(ui, os.path.join(prefix, f))
1278 scmutil.checkportable(ui, os.path.join(prefix, f))
1339 try:
1279 try:
1340 st = lstat(f)
1280 st = lstat(f)
1341 except OSError:
1281 except OSError:
1342 ui.warn(_("%s does not exist!\n") % uipath(f))
1282 ui.warn(_("%s does not exist!\n") % uipath(f))
1343 rejected.append(f)
1283 rejected.append(f)
1344 continue
1284 continue
1345 limit = ui.configbytes('ui', 'large-file-limit')
1285 limit = ui.configbytes('ui', 'large-file-limit')
1346 if limit != 0 and st.st_size > limit:
1286 if limit != 0 and st.st_size > limit:
1347 ui.warn(_("%s: up to %d MB of RAM may be required "
1287 ui.warn(_("%s: up to %d MB of RAM may be required "
1348 "to manage this file\n"
1288 "to manage this file\n"
1349 "(use 'hg revert %s' to cancel the "
1289 "(use 'hg revert %s' to cancel the "
1350 "pending addition)\n")
1290 "pending addition)\n")
1351 % (f, 3 * st.st_size // 1000000, uipath(f)))
1291 % (f, 3 * st.st_size // 1000000, uipath(f)))
1352 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1292 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1353 ui.warn(_("%s not added: only files and symlinks "
1293 ui.warn(_("%s not added: only files and symlinks "
1354 "supported currently\n") % uipath(f))
1294 "supported currently\n") % uipath(f))
1355 rejected.append(f)
1295 rejected.append(f)
1356 elif ds[f] in 'amn':
1296 elif ds[f] in 'amn':
1357 ui.warn(_("%s already tracked!\n") % uipath(f))
1297 ui.warn(_("%s already tracked!\n") % uipath(f))
1358 elif ds[f] == 'r':
1298 elif ds[f] == 'r':
1359 ds.normallookup(f)
1299 ds.normallookup(f)
1360 else:
1300 else:
1361 ds.add(f)
1301 ds.add(f)
1362 return rejected
1302 return rejected
1363
1303
1364 def forget(self, files, prefix=""):
1304 def forget(self, files, prefix=""):
1365 with self._repo.wlock():
1305 with self._repo.wlock():
1366 ds = self._repo.dirstate
1306 ds = self._repo.dirstate
1367 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1307 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1368 rejected = []
1308 rejected = []
1369 for f in files:
1309 for f in files:
1370 if f not in self._repo.dirstate:
1310 if f not in self._repo.dirstate:
1371 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1311 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1372 rejected.append(f)
1312 rejected.append(f)
1373 elif self._repo.dirstate[f] != 'a':
1313 elif self._repo.dirstate[f] != 'a':
1374 self._repo.dirstate.remove(f)
1314 self._repo.dirstate.remove(f)
1375 else:
1315 else:
1376 self._repo.dirstate.drop(f)
1316 self._repo.dirstate.drop(f)
1377 return rejected
1317 return rejected
1378
1318
1379 def undelete(self, list):
1319 def undelete(self, list):
1380 pctxs = self.parents()
1320 pctxs = self.parents()
1381 with self._repo.wlock():
1321 with self._repo.wlock():
1382 ds = self._repo.dirstate
1322 ds = self._repo.dirstate
1383 for f in list:
1323 for f in list:
1384 if self._repo.dirstate[f] != 'r':
1324 if self._repo.dirstate[f] != 'r':
1385 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1325 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1386 else:
1326 else:
1387 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1327 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1388 t = fctx.data()
1328 t = fctx.data()
1389 self._repo.wwrite(f, t, fctx.flags())
1329 self._repo.wwrite(f, t, fctx.flags())
1390 self._repo.dirstate.normal(f)
1330 self._repo.dirstate.normal(f)
1391
1331
1392 def copy(self, source, dest):
1332 def copy(self, source, dest):
1393 try:
1333 try:
1394 st = self._repo.wvfs.lstat(dest)
1334 st = self._repo.wvfs.lstat(dest)
1395 except OSError as err:
1335 except OSError as err:
1396 if err.errno != errno.ENOENT:
1336 if err.errno != errno.ENOENT:
1397 raise
1337 raise
1398 self._repo.ui.warn(_("%s does not exist!\n")
1338 self._repo.ui.warn(_("%s does not exist!\n")
1399 % self._repo.dirstate.pathto(dest))
1339 % self._repo.dirstate.pathto(dest))
1400 return
1340 return
1401 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1341 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1402 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1342 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1403 "symbolic link\n")
1343 "symbolic link\n")
1404 % self._repo.dirstate.pathto(dest))
1344 % self._repo.dirstate.pathto(dest))
1405 else:
1345 else:
1406 with self._repo.wlock():
1346 with self._repo.wlock():
1407 if self._repo.dirstate[dest] in '?':
1347 if self._repo.dirstate[dest] in '?':
1408 self._repo.dirstate.add(dest)
1348 self._repo.dirstate.add(dest)
1409 elif self._repo.dirstate[dest] in 'r':
1349 elif self._repo.dirstate[dest] in 'r':
1410 self._repo.dirstate.normallookup(dest)
1350 self._repo.dirstate.normallookup(dest)
1411 self._repo.dirstate.copy(source, dest)
1351 self._repo.dirstate.copy(source, dest)
1412
1352
1413 def match(self, pats=None, include=None, exclude=None, default='glob',
1353 def match(self, pats=None, include=None, exclude=None, default='glob',
1414 listsubrepos=False, badfn=None):
1354 listsubrepos=False, badfn=None):
1415 r = self._repo
1355 r = self._repo
1416
1356
1417 # Only a case insensitive filesystem needs magic to translate user input
1357 # Only a case insensitive filesystem needs magic to translate user input
1418 # to actual case in the filesystem.
1358 # to actual case in the filesystem.
1419 icasefs = not util.fscasesensitive(r.root)
1359 icasefs = not util.fscasesensitive(r.root)
1420 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1360 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1421 default, auditor=r.auditor, ctx=self,
1361 default, auditor=r.auditor, ctx=self,
1422 listsubrepos=listsubrepos, badfn=badfn,
1362 listsubrepos=listsubrepos, badfn=badfn,
1423 icasefs=icasefs)
1363 icasefs=icasefs)
1424
1364
1425 def _filtersuspectsymlink(self, files):
1365 def _filtersuspectsymlink(self, files):
1426 if not files or self._repo.dirstate._checklink:
1366 if not files or self._repo.dirstate._checklink:
1427 return files
1367 return files
1428
1368
1429 # Symlink placeholders may get non-symlink-like contents
1369 # Symlink placeholders may get non-symlink-like contents
1430 # via user error or dereferencing by NFS or Samba servers,
1370 # via user error or dereferencing by NFS or Samba servers,
1431 # so we filter out any placeholders that don't look like a
1371 # so we filter out any placeholders that don't look like a
1432 # symlink
1372 # symlink
1433 sane = []
1373 sane = []
1434 for f in files:
1374 for f in files:
1435 if self.flags(f) == 'l':
1375 if self.flags(f) == 'l':
1436 d = self[f].data()
1376 d = self[f].data()
1437 if (d == '' or len(d) >= 1024 or '\n' in d
1377 if (d == '' or len(d) >= 1024 or '\n' in d
1438 or stringutil.binary(d)):
1378 or stringutil.binary(d)):
1439 self._repo.ui.debug('ignoring suspect symlink placeholder'
1379 self._repo.ui.debug('ignoring suspect symlink placeholder'
1440 ' "%s"\n' % f)
1380 ' "%s"\n' % f)
1441 continue
1381 continue
1442 sane.append(f)
1382 sane.append(f)
1443 return sane
1383 return sane
1444
1384
1445 def _checklookup(self, files):
1385 def _checklookup(self, files):
1446 # check for any possibly clean files
1386 # check for any possibly clean files
1447 if not files:
1387 if not files:
1448 return [], [], []
1388 return [], [], []
1449
1389
1450 modified = []
1390 modified = []
1451 deleted = []
1391 deleted = []
1452 fixup = []
1392 fixup = []
1453 pctx = self._parents[0]
1393 pctx = self._parents[0]
1454 # do a full compare of any files that might have changed
1394 # do a full compare of any files that might have changed
1455 for f in sorted(files):
1395 for f in sorted(files):
1456 try:
1396 try:
1457 # This will return True for a file that got replaced by a
1397 # This will return True for a file that got replaced by a
1458 # directory in the interim, but fixing that is pretty hard.
1398 # directory in the interim, but fixing that is pretty hard.
1459 if (f not in pctx or self.flags(f) != pctx.flags(f)
1399 if (f not in pctx or self.flags(f) != pctx.flags(f)
1460 or pctx[f].cmp(self[f])):
1400 or pctx[f].cmp(self[f])):
1461 modified.append(f)
1401 modified.append(f)
1462 else:
1402 else:
1463 fixup.append(f)
1403 fixup.append(f)
1464 except (IOError, OSError):
1404 except (IOError, OSError):
1465 # A file become inaccessible in between? Mark it as deleted,
1405 # A file become inaccessible in between? Mark it as deleted,
1466 # matching dirstate behavior (issue5584).
1406 # matching dirstate behavior (issue5584).
1467 # The dirstate has more complex behavior around whether a
1407 # The dirstate has more complex behavior around whether a
1468 # missing file matches a directory, etc, but we don't need to
1408 # missing file matches a directory, etc, but we don't need to
1469 # bother with that: if f has made it to this point, we're sure
1409 # bother with that: if f has made it to this point, we're sure
1470 # it's in the dirstate.
1410 # it's in the dirstate.
1471 deleted.append(f)
1411 deleted.append(f)
1472
1412
1473 return modified, deleted, fixup
1413 return modified, deleted, fixup
1474
1414
1475 def _poststatusfixup(self, status, fixup):
1415 def _poststatusfixup(self, status, fixup):
1476 """update dirstate for files that are actually clean"""
1416 """update dirstate for files that are actually clean"""
1477 poststatus = self._repo.postdsstatus()
1417 poststatus = self._repo.postdsstatus()
1478 if fixup or poststatus:
1418 if fixup or poststatus:
1479 try:
1419 try:
1480 oldid = self._repo.dirstate.identity()
1420 oldid = self._repo.dirstate.identity()
1481
1421
1482 # updating the dirstate is optional
1422 # updating the dirstate is optional
1483 # so we don't wait on the lock
1423 # so we don't wait on the lock
1484 # wlock can invalidate the dirstate, so cache normal _after_
1424 # wlock can invalidate the dirstate, so cache normal _after_
1485 # taking the lock
1425 # taking the lock
1486 with self._repo.wlock(False):
1426 with self._repo.wlock(False):
1487 if self._repo.dirstate.identity() == oldid:
1427 if self._repo.dirstate.identity() == oldid:
1488 if fixup:
1428 if fixup:
1489 normal = self._repo.dirstate.normal
1429 normal = self._repo.dirstate.normal
1490 for f in fixup:
1430 for f in fixup:
1491 normal(f)
1431 normal(f)
1492 # write changes out explicitly, because nesting
1432 # write changes out explicitly, because nesting
1493 # wlock at runtime may prevent 'wlock.release()'
1433 # wlock at runtime may prevent 'wlock.release()'
1494 # after this block from doing so for subsequent
1434 # after this block from doing so for subsequent
1495 # changing files
1435 # changing files
1496 tr = self._repo.currenttransaction()
1436 tr = self._repo.currenttransaction()
1497 self._repo.dirstate.write(tr)
1437 self._repo.dirstate.write(tr)
1498
1438
1499 if poststatus:
1439 if poststatus:
1500 for ps in poststatus:
1440 for ps in poststatus:
1501 ps(self, status)
1441 ps(self, status)
1502 else:
1442 else:
1503 # in this case, writing changes out breaks
1443 # in this case, writing changes out breaks
1504 # consistency, because .hg/dirstate was
1444 # consistency, because .hg/dirstate was
1505 # already changed simultaneously after last
1445 # already changed simultaneously after last
1506 # caching (see also issue5584 for detail)
1446 # caching (see also issue5584 for detail)
1507 self._repo.ui.debug('skip updating dirstate: '
1447 self._repo.ui.debug('skip updating dirstate: '
1508 'identity mismatch\n')
1448 'identity mismatch\n')
1509 except error.LockError:
1449 except error.LockError:
1510 pass
1450 pass
1511 finally:
1451 finally:
1512 # Even if the wlock couldn't be grabbed, clear out the list.
1452 # Even if the wlock couldn't be grabbed, clear out the list.
1513 self._repo.clearpostdsstatus()
1453 self._repo.clearpostdsstatus()
1514
1454
1515 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1455 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1516 '''Gets the status from the dirstate -- internal use only.'''
1456 '''Gets the status from the dirstate -- internal use only.'''
1517 subrepos = []
1457 subrepos = []
1518 if '.hgsub' in self:
1458 if '.hgsub' in self:
1519 subrepos = sorted(self.substate)
1459 subrepos = sorted(self.substate)
1520 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1460 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1521 clean=clean, unknown=unknown)
1461 clean=clean, unknown=unknown)
1522
1462
1523 # check for any possibly clean files
1463 # check for any possibly clean files
1524 fixup = []
1464 fixup = []
1525 if cmp:
1465 if cmp:
1526 modified2, deleted2, fixup = self._checklookup(cmp)
1466 modified2, deleted2, fixup = self._checklookup(cmp)
1527 s.modified.extend(modified2)
1467 s.modified.extend(modified2)
1528 s.deleted.extend(deleted2)
1468 s.deleted.extend(deleted2)
1529
1469
1530 if fixup and clean:
1470 if fixup and clean:
1531 s.clean.extend(fixup)
1471 s.clean.extend(fixup)
1532
1472
1533 self._poststatusfixup(s, fixup)
1473 self._poststatusfixup(s, fixup)
1534
1474
1535 if match.always():
1475 if match.always():
1536 # cache for performance
1476 # cache for performance
1537 if s.unknown or s.ignored or s.clean:
1477 if s.unknown or s.ignored or s.clean:
1538 # "_status" is cached with list*=False in the normal route
1478 # "_status" is cached with list*=False in the normal route
1539 self._status = scmutil.status(s.modified, s.added, s.removed,
1479 self._status = scmutil.status(s.modified, s.added, s.removed,
1540 s.deleted, [], [], [])
1480 s.deleted, [], [], [])
1541 else:
1481 else:
1542 self._status = s
1482 self._status = s
1543
1483
1544 return s
1484 return s
1545
1485
1546 @propertycache
1486 @propertycache
1547 def _manifest(self):
1487 def _manifest(self):
1548 """generate a manifest corresponding to the values in self._status
1488 """generate a manifest corresponding to the values in self._status
1549
1489
1550 This reuse the file nodeid from parent, but we use special node
1490 This reuse the file nodeid from parent, but we use special node
1551 identifiers for added and modified files. This is used by manifests
1491 identifiers for added and modified files. This is used by manifests
1552 merge to see that files are different and by update logic to avoid
1492 merge to see that files are different and by update logic to avoid
1553 deleting newly added files.
1493 deleting newly added files.
1554 """
1494 """
1555 return self._buildstatusmanifest(self._status)
1495 return self._buildstatusmanifest(self._status)
1556
1496
1557 def _buildstatusmanifest(self, status):
1497 def _buildstatusmanifest(self, status):
1558 """Builds a manifest that includes the given status results."""
1498 """Builds a manifest that includes the given status results."""
1559 parents = self.parents()
1499 parents = self.parents()
1560
1500
1561 man = parents[0].manifest().copy()
1501 man = parents[0].manifest().copy()
1562
1502
1563 ff = self._flagfunc
1503 ff = self._flagfunc
1564 for i, l in ((addednodeid, status.added),
1504 for i, l in ((addednodeid, status.added),
1565 (modifiednodeid, status.modified)):
1505 (modifiednodeid, status.modified)):
1566 for f in l:
1506 for f in l:
1567 man[f] = i
1507 man[f] = i
1568 try:
1508 try:
1569 man.setflag(f, ff(f))
1509 man.setflag(f, ff(f))
1570 except OSError:
1510 except OSError:
1571 pass
1511 pass
1572
1512
1573 for f in status.deleted + status.removed:
1513 for f in status.deleted + status.removed:
1574 if f in man:
1514 if f in man:
1575 del man[f]
1515 del man[f]
1576
1516
1577 return man
1517 return man
1578
1518
1579 def _buildstatus(self, other, s, match, listignored, listclean,
1519 def _buildstatus(self, other, s, match, listignored, listclean,
1580 listunknown):
1520 listunknown):
1581 """build a status with respect to another context
1521 """build a status with respect to another context
1582
1522
1583 This includes logic for maintaining the fast path of status when
1523 This includes logic for maintaining the fast path of status when
1584 comparing the working directory against its parent, which is to skip
1524 comparing the working directory against its parent, which is to skip
1585 building a new manifest if self (working directory) is not comparing
1525 building a new manifest if self (working directory) is not comparing
1586 against its parent (repo['.']).
1526 against its parent (repo['.']).
1587 """
1527 """
1588 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1528 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1589 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1529 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1590 # might have accidentally ended up with the entire contents of the file
1530 # might have accidentally ended up with the entire contents of the file
1591 # they are supposed to be linking to.
1531 # they are supposed to be linking to.
1592 s.modified[:] = self._filtersuspectsymlink(s.modified)
1532 s.modified[:] = self._filtersuspectsymlink(s.modified)
1593 if other != self._repo['.']:
1533 if other != self._repo['.']:
1594 s = super(workingctx, self)._buildstatus(other, s, match,
1534 s = super(workingctx, self)._buildstatus(other, s, match,
1595 listignored, listclean,
1535 listignored, listclean,
1596 listunknown)
1536 listunknown)
1597 return s
1537 return s
1598
1538
1599 def _matchstatus(self, other, match):
1539 def _matchstatus(self, other, match):
1600 """override the match method with a filter for directory patterns
1540 """override the match method with a filter for directory patterns
1601
1541
1602 We use inheritance to customize the match.bad method only in cases of
1542 We use inheritance to customize the match.bad method only in cases of
1603 workingctx since it belongs only to the working directory when
1543 workingctx since it belongs only to the working directory when
1604 comparing against the parent changeset.
1544 comparing against the parent changeset.
1605
1545
1606 If we aren't comparing against the working directory's parent, then we
1546 If we aren't comparing against the working directory's parent, then we
1607 just use the default match object sent to us.
1547 just use the default match object sent to us.
1608 """
1548 """
1609 if other != self._repo['.']:
1549 if other != self._repo['.']:
1610 def bad(f, msg):
1550 def bad(f, msg):
1611 # 'f' may be a directory pattern from 'match.files()',
1551 # 'f' may be a directory pattern from 'match.files()',
1612 # so 'f not in ctx1' is not enough
1552 # so 'f not in ctx1' is not enough
1613 if f not in other and not other.hasdir(f):
1553 if f not in other and not other.hasdir(f):
1614 self._repo.ui.warn('%s: %s\n' %
1554 self._repo.ui.warn('%s: %s\n' %
1615 (self._repo.dirstate.pathto(f), msg))
1555 (self._repo.dirstate.pathto(f), msg))
1616 match.bad = bad
1556 match.bad = bad
1617 return match
1557 return match
1618
1558
1619 def markcommitted(self, node):
1559 def markcommitted(self, node):
1620 super(workingctx, self).markcommitted(node)
1560 super(workingctx, self).markcommitted(node)
1621
1561
1622 sparse.aftercommit(self._repo, node)
1562 sparse.aftercommit(self._repo, node)
1623
1563
1624 class committablefilectx(basefilectx):
1564 class committablefilectx(basefilectx):
1625 """A committablefilectx provides common functionality for a file context
1565 """A committablefilectx provides common functionality for a file context
1626 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1566 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1627 def __init__(self, repo, path, filelog=None, ctx=None):
1567 def __init__(self, repo, path, filelog=None, ctx=None):
1628 self._repo = repo
1568 self._repo = repo
1629 self._path = path
1569 self._path = path
1630 self._changeid = None
1570 self._changeid = None
1631 self._filerev = self._filenode = None
1571 self._filerev = self._filenode = None
1632
1572
1633 if filelog is not None:
1573 if filelog is not None:
1634 self._filelog = filelog
1574 self._filelog = filelog
1635 if ctx:
1575 if ctx:
1636 self._changectx = ctx
1576 self._changectx = ctx
1637
1577
1638 def __nonzero__(self):
1578 def __nonzero__(self):
1639 return True
1579 return True
1640
1580
1641 __bool__ = __nonzero__
1581 __bool__ = __nonzero__
1642
1582
1643 def linkrev(self):
1583 def linkrev(self):
1644 # linked to self._changectx no matter if file is modified or not
1584 # linked to self._changectx no matter if file is modified or not
1645 return self.rev()
1585 return self.rev()
1646
1586
1647 def parents(self):
1587 def parents(self):
1648 '''return parent filectxs, following copies if necessary'''
1588 '''return parent filectxs, following copies if necessary'''
1649 def filenode(ctx, path):
1589 def filenode(ctx, path):
1650 return ctx._manifest.get(path, nullid)
1590 return ctx._manifest.get(path, nullid)
1651
1591
1652 path = self._path
1592 path = self._path
1653 fl = self._filelog
1593 fl = self._filelog
1654 pcl = self._changectx._parents
1594 pcl = self._changectx._parents
1655 renamed = self.renamed()
1595 renamed = self.renamed()
1656
1596
1657 if renamed:
1597 if renamed:
1658 pl = [renamed + (None,)]
1598 pl = [renamed + (None,)]
1659 else:
1599 else:
1660 pl = [(path, filenode(pcl[0], path), fl)]
1600 pl = [(path, filenode(pcl[0], path), fl)]
1661
1601
1662 for pc in pcl[1:]:
1602 for pc in pcl[1:]:
1663 pl.append((path, filenode(pc, path), fl))
1603 pl.append((path, filenode(pc, path), fl))
1664
1604
1665 return [self._parentfilectx(p, fileid=n, filelog=l)
1605 return [self._parentfilectx(p, fileid=n, filelog=l)
1666 for p, n, l in pl if n != nullid]
1606 for p, n, l in pl if n != nullid]
1667
1607
1668 def children(self):
1608 def children(self):
1669 return []
1609 return []
1670
1610
1671 class workingfilectx(committablefilectx):
1611 class workingfilectx(committablefilectx):
1672 """A workingfilectx object makes access to data related to a particular
1612 """A workingfilectx object makes access to data related to a particular
1673 file in the working directory convenient."""
1613 file in the working directory convenient."""
1674 def __init__(self, repo, path, filelog=None, workingctx=None):
1614 def __init__(self, repo, path, filelog=None, workingctx=None):
1675 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1615 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1676
1616
1677 @propertycache
1617 @propertycache
1678 def _changectx(self):
1618 def _changectx(self):
1679 return workingctx(self._repo)
1619 return workingctx(self._repo)
1680
1620
1681 def data(self):
1621 def data(self):
1682 return self._repo.wread(self._path)
1622 return self._repo.wread(self._path)
1683 def renamed(self):
1623 def renamed(self):
1684 rp = self._repo.dirstate.copied(self._path)
1624 rp = self._repo.dirstate.copied(self._path)
1685 if not rp:
1625 if not rp:
1686 return None
1626 return None
1687 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1627 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1688
1628
1689 def size(self):
1629 def size(self):
1690 return self._repo.wvfs.lstat(self._path).st_size
1630 return self._repo.wvfs.lstat(self._path).st_size
1691 def date(self):
1631 def date(self):
1692 t, tz = self._changectx.date()
1632 t, tz = self._changectx.date()
1693 try:
1633 try:
1694 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1634 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1695 except OSError as err:
1635 except OSError as err:
1696 if err.errno != errno.ENOENT:
1636 if err.errno != errno.ENOENT:
1697 raise
1637 raise
1698 return (t, tz)
1638 return (t, tz)
1699
1639
1700 def exists(self):
1640 def exists(self):
1701 return self._repo.wvfs.exists(self._path)
1641 return self._repo.wvfs.exists(self._path)
1702
1642
1703 def lexists(self):
1643 def lexists(self):
1704 return self._repo.wvfs.lexists(self._path)
1644 return self._repo.wvfs.lexists(self._path)
1705
1645
1706 def audit(self):
1646 def audit(self):
1707 return self._repo.wvfs.audit(self._path)
1647 return self._repo.wvfs.audit(self._path)
1708
1648
1709 def cmp(self, fctx):
1649 def cmp(self, fctx):
1710 """compare with other file context
1650 """compare with other file context
1711
1651
1712 returns True if different than fctx.
1652 returns True if different than fctx.
1713 """
1653 """
1714 # fctx should be a filectx (not a workingfilectx)
1654 # fctx should be a filectx (not a workingfilectx)
1715 # invert comparison to reuse the same code path
1655 # invert comparison to reuse the same code path
1716 return fctx.cmp(self)
1656 return fctx.cmp(self)
1717
1657
1718 def remove(self, ignoremissing=False):
1658 def remove(self, ignoremissing=False):
1719 """wraps unlink for a repo's working directory"""
1659 """wraps unlink for a repo's working directory"""
1720 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1660 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1721 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1661 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1722 rmdir=rmdir)
1662 rmdir=rmdir)
1723
1663
1724 def write(self, data, flags, backgroundclose=False, **kwargs):
1664 def write(self, data, flags, backgroundclose=False, **kwargs):
1725 """wraps repo.wwrite"""
1665 """wraps repo.wwrite"""
1726 self._repo.wwrite(self._path, data, flags,
1666 self._repo.wwrite(self._path, data, flags,
1727 backgroundclose=backgroundclose,
1667 backgroundclose=backgroundclose,
1728 **kwargs)
1668 **kwargs)
1729
1669
1730 def markcopied(self, src):
1670 def markcopied(self, src):
1731 """marks this file a copy of `src`"""
1671 """marks this file a copy of `src`"""
1732 if self._repo.dirstate[self._path] in "nma":
1672 if self._repo.dirstate[self._path] in "nma":
1733 self._repo.dirstate.copy(src, self._path)
1673 self._repo.dirstate.copy(src, self._path)
1734
1674
1735 def clearunknown(self):
1675 def clearunknown(self):
1736 """Removes conflicting items in the working directory so that
1676 """Removes conflicting items in the working directory so that
1737 ``write()`` can be called successfully.
1677 ``write()`` can be called successfully.
1738 """
1678 """
1739 wvfs = self._repo.wvfs
1679 wvfs = self._repo.wvfs
1740 f = self._path
1680 f = self._path
1741 wvfs.audit(f)
1681 wvfs.audit(f)
1742 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1682 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1743 # remove files under the directory as they should already be
1683 # remove files under the directory as they should already be
1744 # warned and backed up
1684 # warned and backed up
1745 if wvfs.isdir(f) and not wvfs.islink(f):
1685 if wvfs.isdir(f) and not wvfs.islink(f):
1746 wvfs.rmtree(f, forcibly=True)
1686 wvfs.rmtree(f, forcibly=True)
1747 for p in reversed(list(util.finddirs(f))):
1687 for p in reversed(list(util.finddirs(f))):
1748 if wvfs.isfileorlink(p):
1688 if wvfs.isfileorlink(p):
1749 wvfs.unlink(p)
1689 wvfs.unlink(p)
1750 break
1690 break
1751 else:
1691 else:
1752 # don't remove files if path conflicts are not processed
1692 # don't remove files if path conflicts are not processed
1753 if wvfs.isdir(f) and not wvfs.islink(f):
1693 if wvfs.isdir(f) and not wvfs.islink(f):
1754 wvfs.removedirs(f)
1694 wvfs.removedirs(f)
1755
1695
1756 def setflags(self, l, x):
1696 def setflags(self, l, x):
1757 self._repo.wvfs.setflags(self._path, l, x)
1697 self._repo.wvfs.setflags(self._path, l, x)
1758
1698
1759 class overlayworkingctx(committablectx):
1699 class overlayworkingctx(committablectx):
1760 """Wraps another mutable context with a write-back cache that can be
1700 """Wraps another mutable context with a write-back cache that can be
1761 converted into a commit context.
1701 converted into a commit context.
1762
1702
1763 self._cache[path] maps to a dict with keys: {
1703 self._cache[path] maps to a dict with keys: {
1764 'exists': bool?
1704 'exists': bool?
1765 'date': date?
1705 'date': date?
1766 'data': str?
1706 'data': str?
1767 'flags': str?
1707 'flags': str?
1768 'copied': str? (path or None)
1708 'copied': str? (path or None)
1769 }
1709 }
1770 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1710 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1771 is `False`, the file was deleted.
1711 is `False`, the file was deleted.
1772 """
1712 """
1773
1713
1774 def __init__(self, repo):
1714 def __init__(self, repo):
1775 super(overlayworkingctx, self).__init__(repo)
1715 super(overlayworkingctx, self).__init__(repo)
1776 self.clean()
1716 self.clean()
1777
1717
1778 def setbase(self, wrappedctx):
1718 def setbase(self, wrappedctx):
1779 self._wrappedctx = wrappedctx
1719 self._wrappedctx = wrappedctx
1780 self._parents = [wrappedctx]
1720 self._parents = [wrappedctx]
1781 # Drop old manifest cache as it is now out of date.
1721 # Drop old manifest cache as it is now out of date.
1782 # This is necessary when, e.g., rebasing several nodes with one
1722 # This is necessary when, e.g., rebasing several nodes with one
1783 # ``overlayworkingctx`` (e.g. with --collapse).
1723 # ``overlayworkingctx`` (e.g. with --collapse).
1784 util.clearcachedproperty(self, '_manifest')
1724 util.clearcachedproperty(self, '_manifest')
1785
1725
1786 def data(self, path):
1726 def data(self, path):
1787 if self.isdirty(path):
1727 if self.isdirty(path):
1788 if self._cache[path]['exists']:
1728 if self._cache[path]['exists']:
1789 if self._cache[path]['data']:
1729 if self._cache[path]['data']:
1790 return self._cache[path]['data']
1730 return self._cache[path]['data']
1791 else:
1731 else:
1792 # Must fallback here, too, because we only set flags.
1732 # Must fallback here, too, because we only set flags.
1793 return self._wrappedctx[path].data()
1733 return self._wrappedctx[path].data()
1794 else:
1734 else:
1795 raise error.ProgrammingError("No such file or directory: %s" %
1735 raise error.ProgrammingError("No such file or directory: %s" %
1796 path)
1736 path)
1797 else:
1737 else:
1798 return self._wrappedctx[path].data()
1738 return self._wrappedctx[path].data()
1799
1739
1800 @propertycache
1740 @propertycache
1801 def _manifest(self):
1741 def _manifest(self):
1802 parents = self.parents()
1742 parents = self.parents()
1803 man = parents[0].manifest().copy()
1743 man = parents[0].manifest().copy()
1804
1744
1805 flag = self._flagfunc
1745 flag = self._flagfunc
1806 for path in self.added():
1746 for path in self.added():
1807 man[path] = addednodeid
1747 man[path] = addednodeid
1808 man.setflag(path, flag(path))
1748 man.setflag(path, flag(path))
1809 for path in self.modified():
1749 for path in self.modified():
1810 man[path] = modifiednodeid
1750 man[path] = modifiednodeid
1811 man.setflag(path, flag(path))
1751 man.setflag(path, flag(path))
1812 for path in self.removed():
1752 for path in self.removed():
1813 del man[path]
1753 del man[path]
1814 return man
1754 return man
1815
1755
1816 @propertycache
1756 @propertycache
1817 def _flagfunc(self):
1757 def _flagfunc(self):
1818 def f(path):
1758 def f(path):
1819 return self._cache[path]['flags']
1759 return self._cache[path]['flags']
1820 return f
1760 return f
1821
1761
1822 def files(self):
1762 def files(self):
1823 return sorted(self.added() + self.modified() + self.removed())
1763 return sorted(self.added() + self.modified() + self.removed())
1824
1764
1825 def modified(self):
1765 def modified(self):
1826 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1766 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1827 self._existsinparent(f)]
1767 self._existsinparent(f)]
1828
1768
1829 def added(self):
1769 def added(self):
1830 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1770 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1831 not self._existsinparent(f)]
1771 not self._existsinparent(f)]
1832
1772
1833 def removed(self):
1773 def removed(self):
1834 return [f for f in self._cache.keys() if
1774 return [f for f in self._cache.keys() if
1835 not self._cache[f]['exists'] and self._existsinparent(f)]
1775 not self._cache[f]['exists'] and self._existsinparent(f)]
1836
1776
1837 def isinmemory(self):
1777 def isinmemory(self):
1838 return True
1778 return True
1839
1779
1840 def filedate(self, path):
1780 def filedate(self, path):
1841 if self.isdirty(path):
1781 if self.isdirty(path):
1842 return self._cache[path]['date']
1782 return self._cache[path]['date']
1843 else:
1783 else:
1844 return self._wrappedctx[path].date()
1784 return self._wrappedctx[path].date()
1845
1785
1846 def markcopied(self, path, origin):
1786 def markcopied(self, path, origin):
1847 if self.isdirty(path):
1787 if self.isdirty(path):
1848 self._cache[path]['copied'] = origin
1788 self._cache[path]['copied'] = origin
1849 else:
1789 else:
1850 raise error.ProgrammingError('markcopied() called on clean context')
1790 raise error.ProgrammingError('markcopied() called on clean context')
1851
1791
1852 def copydata(self, path):
1792 def copydata(self, path):
1853 if self.isdirty(path):
1793 if self.isdirty(path):
1854 return self._cache[path]['copied']
1794 return self._cache[path]['copied']
1855 else:
1795 else:
1856 raise error.ProgrammingError('copydata() called on clean context')
1796 raise error.ProgrammingError('copydata() called on clean context')
1857
1797
1858 def flags(self, path):
1798 def flags(self, path):
1859 if self.isdirty(path):
1799 if self.isdirty(path):
1860 if self._cache[path]['exists']:
1800 if self._cache[path]['exists']:
1861 return self._cache[path]['flags']
1801 return self._cache[path]['flags']
1862 else:
1802 else:
1863 raise error.ProgrammingError("No such file or directory: %s" %
1803 raise error.ProgrammingError("No such file or directory: %s" %
1864 self._path)
1804 self._path)
1865 else:
1805 else:
1866 return self._wrappedctx[path].flags()
1806 return self._wrappedctx[path].flags()
1867
1807
1868 def _existsinparent(self, path):
1808 def _existsinparent(self, path):
1869 try:
1809 try:
1870 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1810 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1871 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1811 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1872 # with an ``exists()`` function.
1812 # with an ``exists()`` function.
1873 self._wrappedctx[path]
1813 self._wrappedctx[path]
1874 return True
1814 return True
1875 except error.ManifestLookupError:
1815 except error.ManifestLookupError:
1876 return False
1816 return False
1877
1817
1878 def _auditconflicts(self, path):
1818 def _auditconflicts(self, path):
1879 """Replicates conflict checks done by wvfs.write().
1819 """Replicates conflict checks done by wvfs.write().
1880
1820
1881 Since we never write to the filesystem and never call `applyupdates` in
1821 Since we never write to the filesystem and never call `applyupdates` in
1882 IMM, we'll never check that a path is actually writable -- e.g., because
1822 IMM, we'll never check that a path is actually writable -- e.g., because
1883 it adds `a/foo`, but `a` is actually a file in the other commit.
1823 it adds `a/foo`, but `a` is actually a file in the other commit.
1884 """
1824 """
1885 def fail(path, component):
1825 def fail(path, component):
1886 # p1() is the base and we're receiving "writes" for p2()'s
1826 # p1() is the base and we're receiving "writes" for p2()'s
1887 # files.
1827 # files.
1888 if 'l' in self.p1()[component].flags():
1828 if 'l' in self.p1()[component].flags():
1889 raise error.Abort("error: %s conflicts with symlink %s "
1829 raise error.Abort("error: %s conflicts with symlink %s "
1890 "in %s." % (path, component,
1830 "in %s." % (path, component,
1891 self.p1().rev()))
1831 self.p1().rev()))
1892 else:
1832 else:
1893 raise error.Abort("error: '%s' conflicts with file '%s' in "
1833 raise error.Abort("error: '%s' conflicts with file '%s' in "
1894 "%s." % (path, component,
1834 "%s." % (path, component,
1895 self.p1().rev()))
1835 self.p1().rev()))
1896
1836
1897 # Test that each new directory to be created to write this path from p2
1837 # Test that each new directory to be created to write this path from p2
1898 # is not a file in p1.
1838 # is not a file in p1.
1899 components = path.split('/')
1839 components = path.split('/')
1900 for i in pycompat.xrange(len(components)):
1840 for i in pycompat.xrange(len(components)):
1901 component = "/".join(components[0:i])
1841 component = "/".join(components[0:i])
1902 if component in self.p1() and self._cache[component]['exists']:
1842 if component in self.p1() and self._cache[component]['exists']:
1903 fail(path, component)
1843 fail(path, component)
1904
1844
1905 # Test the other direction -- that this path from p2 isn't a directory
1845 # Test the other direction -- that this path from p2 isn't a directory
1906 # in p1 (test that p1 doesn't any paths matching `path/*`).
1846 # in p1 (test that p1 doesn't any paths matching `path/*`).
1907 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1847 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1908 matches = self.p1().manifest().matches(match)
1848 matches = self.p1().manifest().matches(match)
1909 mfiles = matches.keys()
1849 mfiles = matches.keys()
1910 if len(mfiles) > 0:
1850 if len(mfiles) > 0:
1911 if len(mfiles) == 1 and mfiles[0] == path:
1851 if len(mfiles) == 1 and mfiles[0] == path:
1912 return
1852 return
1913 # omit the files which are deleted in current IMM wctx
1853 # omit the files which are deleted in current IMM wctx
1914 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1854 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1915 if not mfiles:
1855 if not mfiles:
1916 return
1856 return
1917 raise error.Abort("error: file '%s' cannot be written because "
1857 raise error.Abort("error: file '%s' cannot be written because "
1918 " '%s/' is a folder in %s (containing %d "
1858 " '%s/' is a folder in %s (containing %d "
1919 "entries: %s)"
1859 "entries: %s)"
1920 % (path, path, self.p1(), len(mfiles),
1860 % (path, path, self.p1(), len(mfiles),
1921 ', '.join(mfiles)))
1861 ', '.join(mfiles)))
1922
1862
1923 def write(self, path, data, flags='', **kwargs):
1863 def write(self, path, data, flags='', **kwargs):
1924 if data is None:
1864 if data is None:
1925 raise error.ProgrammingError("data must be non-None")
1865 raise error.ProgrammingError("data must be non-None")
1926 self._auditconflicts(path)
1866 self._auditconflicts(path)
1927 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1867 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1928 flags=flags)
1868 flags=flags)
1929
1869
1930 def setflags(self, path, l, x):
1870 def setflags(self, path, l, x):
1931 flag = ''
1871 flag = ''
1932 if l:
1872 if l:
1933 flag = 'l'
1873 flag = 'l'
1934 elif x:
1874 elif x:
1935 flag = 'x'
1875 flag = 'x'
1936 self._markdirty(path, exists=True, date=dateutil.makedate(),
1876 self._markdirty(path, exists=True, date=dateutil.makedate(),
1937 flags=flag)
1877 flags=flag)
1938
1878
1939 def remove(self, path):
1879 def remove(self, path):
1940 self._markdirty(path, exists=False)
1880 self._markdirty(path, exists=False)
1941
1881
1942 def exists(self, path):
1882 def exists(self, path):
1943 """exists behaves like `lexists`, but needs to follow symlinks and
1883 """exists behaves like `lexists`, but needs to follow symlinks and
1944 return False if they are broken.
1884 return False if they are broken.
1945 """
1885 """
1946 if self.isdirty(path):
1886 if self.isdirty(path):
1947 # If this path exists and is a symlink, "follow" it by calling
1887 # If this path exists and is a symlink, "follow" it by calling
1948 # exists on the destination path.
1888 # exists on the destination path.
1949 if (self._cache[path]['exists'] and
1889 if (self._cache[path]['exists'] and
1950 'l' in self._cache[path]['flags']):
1890 'l' in self._cache[path]['flags']):
1951 return self.exists(self._cache[path]['data'].strip())
1891 return self.exists(self._cache[path]['data'].strip())
1952 else:
1892 else:
1953 return self._cache[path]['exists']
1893 return self._cache[path]['exists']
1954
1894
1955 return self._existsinparent(path)
1895 return self._existsinparent(path)
1956
1896
1957 def lexists(self, path):
1897 def lexists(self, path):
1958 """lexists returns True if the path exists"""
1898 """lexists returns True if the path exists"""
1959 if self.isdirty(path):
1899 if self.isdirty(path):
1960 return self._cache[path]['exists']
1900 return self._cache[path]['exists']
1961
1901
1962 return self._existsinparent(path)
1902 return self._existsinparent(path)
1963
1903
1964 def size(self, path):
1904 def size(self, path):
1965 if self.isdirty(path):
1905 if self.isdirty(path):
1966 if self._cache[path]['exists']:
1906 if self._cache[path]['exists']:
1967 return len(self._cache[path]['data'])
1907 return len(self._cache[path]['data'])
1968 else:
1908 else:
1969 raise error.ProgrammingError("No such file or directory: %s" %
1909 raise error.ProgrammingError("No such file or directory: %s" %
1970 self._path)
1910 self._path)
1971 return self._wrappedctx[path].size()
1911 return self._wrappedctx[path].size()
1972
1912
1973 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1913 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1974 user=None, editor=None):
1914 user=None, editor=None):
1975 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1915 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1976 committed.
1916 committed.
1977
1917
1978 ``text`` is the commit message.
1918 ``text`` is the commit message.
1979 ``parents`` (optional) are rev numbers.
1919 ``parents`` (optional) are rev numbers.
1980 """
1920 """
1981 # Default parents to the wrapped contexts' if not passed.
1921 # Default parents to the wrapped contexts' if not passed.
1982 if parents is None:
1922 if parents is None:
1983 parents = self._wrappedctx.parents()
1923 parents = self._wrappedctx.parents()
1984 if len(parents) == 1:
1924 if len(parents) == 1:
1985 parents = (parents[0], None)
1925 parents = (parents[0], None)
1986
1926
1987 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1927 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1988 if parents[1] is None:
1928 if parents[1] is None:
1989 parents = (self._repo[parents[0]], None)
1929 parents = (self._repo[parents[0]], None)
1990 else:
1930 else:
1991 parents = (self._repo[parents[0]], self._repo[parents[1]])
1931 parents = (self._repo[parents[0]], self._repo[parents[1]])
1992
1932
1993 files = self._cache.keys()
1933 files = self._cache.keys()
1994 def getfile(repo, memctx, path):
1934 def getfile(repo, memctx, path):
1995 if self._cache[path]['exists']:
1935 if self._cache[path]['exists']:
1996 return memfilectx(repo, memctx, path,
1936 return memfilectx(repo, memctx, path,
1997 self._cache[path]['data'],
1937 self._cache[path]['data'],
1998 'l' in self._cache[path]['flags'],
1938 'l' in self._cache[path]['flags'],
1999 'x' in self._cache[path]['flags'],
1939 'x' in self._cache[path]['flags'],
2000 self._cache[path]['copied'])
1940 self._cache[path]['copied'])
2001 else:
1941 else:
2002 # Returning None, but including the path in `files`, is
1942 # Returning None, but including the path in `files`, is
2003 # necessary for memctx to register a deletion.
1943 # necessary for memctx to register a deletion.
2004 return None
1944 return None
2005 return memctx(self._repo, parents, text, files, getfile, date=date,
1945 return memctx(self._repo, parents, text, files, getfile, date=date,
2006 extra=extra, user=user, branch=branch, editor=editor)
1946 extra=extra, user=user, branch=branch, editor=editor)
2007
1947
2008 def isdirty(self, path):
1948 def isdirty(self, path):
2009 return path in self._cache
1949 return path in self._cache
2010
1950
2011 def isempty(self):
1951 def isempty(self):
2012 # We need to discard any keys that are actually clean before the empty
1952 # We need to discard any keys that are actually clean before the empty
2013 # commit check.
1953 # commit check.
2014 self._compact()
1954 self._compact()
2015 return len(self._cache) == 0
1955 return len(self._cache) == 0
2016
1956
2017 def clean(self):
1957 def clean(self):
2018 self._cache = {}
1958 self._cache = {}
2019
1959
2020 def _compact(self):
1960 def _compact(self):
2021 """Removes keys from the cache that are actually clean, by comparing
1961 """Removes keys from the cache that are actually clean, by comparing
2022 them with the underlying context.
1962 them with the underlying context.
2023
1963
2024 This can occur during the merge process, e.g. by passing --tool :local
1964 This can occur during the merge process, e.g. by passing --tool :local
2025 to resolve a conflict.
1965 to resolve a conflict.
2026 """
1966 """
2027 keys = []
1967 keys = []
2028 for path in self._cache.keys():
1968 for path in self._cache.keys():
2029 cache = self._cache[path]
1969 cache = self._cache[path]
2030 try:
1970 try:
2031 underlying = self._wrappedctx[path]
1971 underlying = self._wrappedctx[path]
2032 if (underlying.data() == cache['data'] and
1972 if (underlying.data() == cache['data'] and
2033 underlying.flags() == cache['flags']):
1973 underlying.flags() == cache['flags']):
2034 keys.append(path)
1974 keys.append(path)
2035 except error.ManifestLookupError:
1975 except error.ManifestLookupError:
2036 # Path not in the underlying manifest (created).
1976 # Path not in the underlying manifest (created).
2037 continue
1977 continue
2038
1978
2039 for path in keys:
1979 for path in keys:
2040 del self._cache[path]
1980 del self._cache[path]
2041 return keys
1981 return keys
2042
1982
2043 def _markdirty(self, path, exists, data=None, date=None, flags=''):
1983 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2044 # data not provided, let's see if we already have some; if not, let's
1984 # data not provided, let's see if we already have some; if not, let's
2045 # grab it from our underlying context, so that we always have data if
1985 # grab it from our underlying context, so that we always have data if
2046 # the file is marked as existing.
1986 # the file is marked as existing.
2047 if exists and data is None:
1987 if exists and data is None:
2048 oldentry = self._cache.get(path) or {}
1988 oldentry = self._cache.get(path) or {}
2049 data = oldentry.get('data') or self._wrappedctx[path].data()
1989 data = oldentry.get('data') or self._wrappedctx[path].data()
2050
1990
2051 self._cache[path] = {
1991 self._cache[path] = {
2052 'exists': exists,
1992 'exists': exists,
2053 'data': data,
1993 'data': data,
2054 'date': date,
1994 'date': date,
2055 'flags': flags,
1995 'flags': flags,
2056 'copied': None,
1996 'copied': None,
2057 }
1997 }
2058
1998
2059 def filectx(self, path, filelog=None):
1999 def filectx(self, path, filelog=None):
2060 return overlayworkingfilectx(self._repo, path, parent=self,
2000 return overlayworkingfilectx(self._repo, path, parent=self,
2061 filelog=filelog)
2001 filelog=filelog)
2062
2002
2063 class overlayworkingfilectx(committablefilectx):
2003 class overlayworkingfilectx(committablefilectx):
2064 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2004 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2065 cache, which can be flushed through later by calling ``flush()``."""
2005 cache, which can be flushed through later by calling ``flush()``."""
2066
2006
2067 def __init__(self, repo, path, filelog=None, parent=None):
2007 def __init__(self, repo, path, filelog=None, parent=None):
2068 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2008 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2069 parent)
2009 parent)
2070 self._repo = repo
2010 self._repo = repo
2071 self._parent = parent
2011 self._parent = parent
2072 self._path = path
2012 self._path = path
2073
2013
2074 def cmp(self, fctx):
2014 def cmp(self, fctx):
2075 return self.data() != fctx.data()
2015 return self.data() != fctx.data()
2076
2016
2077 def changectx(self):
2017 def changectx(self):
2078 return self._parent
2018 return self._parent
2079
2019
2080 def data(self):
2020 def data(self):
2081 return self._parent.data(self._path)
2021 return self._parent.data(self._path)
2082
2022
2083 def date(self):
2023 def date(self):
2084 return self._parent.filedate(self._path)
2024 return self._parent.filedate(self._path)
2085
2025
2086 def exists(self):
2026 def exists(self):
2087 return self.lexists()
2027 return self.lexists()
2088
2028
2089 def lexists(self):
2029 def lexists(self):
2090 return self._parent.exists(self._path)
2030 return self._parent.exists(self._path)
2091
2031
2092 def renamed(self):
2032 def renamed(self):
2093 path = self._parent.copydata(self._path)
2033 path = self._parent.copydata(self._path)
2094 if not path:
2034 if not path:
2095 return None
2035 return None
2096 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2036 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2097
2037
2098 def size(self):
2038 def size(self):
2099 return self._parent.size(self._path)
2039 return self._parent.size(self._path)
2100
2040
2101 def markcopied(self, origin):
2041 def markcopied(self, origin):
2102 self._parent.markcopied(self._path, origin)
2042 self._parent.markcopied(self._path, origin)
2103
2043
2104 def audit(self):
2044 def audit(self):
2105 pass
2045 pass
2106
2046
2107 def flags(self):
2047 def flags(self):
2108 return self._parent.flags(self._path)
2048 return self._parent.flags(self._path)
2109
2049
2110 def setflags(self, islink, isexec):
2050 def setflags(self, islink, isexec):
2111 return self._parent.setflags(self._path, islink, isexec)
2051 return self._parent.setflags(self._path, islink, isexec)
2112
2052
2113 def write(self, data, flags, backgroundclose=False, **kwargs):
2053 def write(self, data, flags, backgroundclose=False, **kwargs):
2114 return self._parent.write(self._path, data, flags, **kwargs)
2054 return self._parent.write(self._path, data, flags, **kwargs)
2115
2055
2116 def remove(self, ignoremissing=False):
2056 def remove(self, ignoremissing=False):
2117 return self._parent.remove(self._path)
2057 return self._parent.remove(self._path)
2118
2058
2119 def clearunknown(self):
2059 def clearunknown(self):
2120 pass
2060 pass
2121
2061
2122 class workingcommitctx(workingctx):
2062 class workingcommitctx(workingctx):
2123 """A workingcommitctx object makes access to data related to
2063 """A workingcommitctx object makes access to data related to
2124 the revision being committed convenient.
2064 the revision being committed convenient.
2125
2065
2126 This hides changes in the working directory, if they aren't
2066 This hides changes in the working directory, if they aren't
2127 committed in this context.
2067 committed in this context.
2128 """
2068 """
2129 def __init__(self, repo, changes,
2069 def __init__(self, repo, changes,
2130 text="", user=None, date=None, extra=None):
2070 text="", user=None, date=None, extra=None):
2131 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2071 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2132 changes)
2072 changes)
2133
2073
2134 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2074 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2135 """Return matched files only in ``self._status``
2075 """Return matched files only in ``self._status``
2136
2076
2137 Uncommitted files appear "clean" via this context, even if
2077 Uncommitted files appear "clean" via this context, even if
2138 they aren't actually so in the working directory.
2078 they aren't actually so in the working directory.
2139 """
2079 """
2140 if clean:
2080 if clean:
2141 clean = [f for f in self._manifest if f not in self._changedset]
2081 clean = [f for f in self._manifest if f not in self._changedset]
2142 else:
2082 else:
2143 clean = []
2083 clean = []
2144 return scmutil.status([f for f in self._status.modified if match(f)],
2084 return scmutil.status([f for f in self._status.modified if match(f)],
2145 [f for f in self._status.added if match(f)],
2085 [f for f in self._status.added if match(f)],
2146 [f for f in self._status.removed if match(f)],
2086 [f for f in self._status.removed if match(f)],
2147 [], [], [], clean)
2087 [], [], [], clean)
2148
2088
2149 @propertycache
2089 @propertycache
2150 def _changedset(self):
2090 def _changedset(self):
2151 """Return the set of files changed in this context
2091 """Return the set of files changed in this context
2152 """
2092 """
2153 changed = set(self._status.modified)
2093 changed = set(self._status.modified)
2154 changed.update(self._status.added)
2094 changed.update(self._status.added)
2155 changed.update(self._status.removed)
2095 changed.update(self._status.removed)
2156 return changed
2096 return changed
2157
2097
2158 def makecachingfilectxfn(func):
2098 def makecachingfilectxfn(func):
2159 """Create a filectxfn that caches based on the path.
2099 """Create a filectxfn that caches based on the path.
2160
2100
2161 We can't use util.cachefunc because it uses all arguments as the cache
2101 We can't use util.cachefunc because it uses all arguments as the cache
2162 key and this creates a cycle since the arguments include the repo and
2102 key and this creates a cycle since the arguments include the repo and
2163 memctx.
2103 memctx.
2164 """
2104 """
2165 cache = {}
2105 cache = {}
2166
2106
2167 def getfilectx(repo, memctx, path):
2107 def getfilectx(repo, memctx, path):
2168 if path not in cache:
2108 if path not in cache:
2169 cache[path] = func(repo, memctx, path)
2109 cache[path] = func(repo, memctx, path)
2170 return cache[path]
2110 return cache[path]
2171
2111
2172 return getfilectx
2112 return getfilectx
2173
2113
2174 def memfilefromctx(ctx):
2114 def memfilefromctx(ctx):
2175 """Given a context return a memfilectx for ctx[path]
2115 """Given a context return a memfilectx for ctx[path]
2176
2116
2177 This is a convenience method for building a memctx based on another
2117 This is a convenience method for building a memctx based on another
2178 context.
2118 context.
2179 """
2119 """
2180 def getfilectx(repo, memctx, path):
2120 def getfilectx(repo, memctx, path):
2181 fctx = ctx[path]
2121 fctx = ctx[path]
2182 # this is weird but apparently we only keep track of one parent
2122 # this is weird but apparently we only keep track of one parent
2183 # (why not only store that instead of a tuple?)
2123 # (why not only store that instead of a tuple?)
2184 copied = fctx.renamed()
2124 copied = fctx.renamed()
2185 if copied:
2125 if copied:
2186 copied = copied[0]
2126 copied = copied[0]
2187 return memfilectx(repo, memctx, path, fctx.data(),
2127 return memfilectx(repo, memctx, path, fctx.data(),
2188 islink=fctx.islink(), isexec=fctx.isexec(),
2128 islink=fctx.islink(), isexec=fctx.isexec(),
2189 copied=copied)
2129 copied=copied)
2190
2130
2191 return getfilectx
2131 return getfilectx
2192
2132
2193 def memfilefrompatch(patchstore):
2133 def memfilefrompatch(patchstore):
2194 """Given a patch (e.g. patchstore object) return a memfilectx
2134 """Given a patch (e.g. patchstore object) return a memfilectx
2195
2135
2196 This is a convenience method for building a memctx based on a patchstore.
2136 This is a convenience method for building a memctx based on a patchstore.
2197 """
2137 """
2198 def getfilectx(repo, memctx, path):
2138 def getfilectx(repo, memctx, path):
2199 data, mode, copied = patchstore.getfile(path)
2139 data, mode, copied = patchstore.getfile(path)
2200 if data is None:
2140 if data is None:
2201 return None
2141 return None
2202 islink, isexec = mode
2142 islink, isexec = mode
2203 return memfilectx(repo, memctx, path, data, islink=islink,
2143 return memfilectx(repo, memctx, path, data, islink=islink,
2204 isexec=isexec, copied=copied)
2144 isexec=isexec, copied=copied)
2205
2145
2206 return getfilectx
2146 return getfilectx
2207
2147
2208 class memctx(committablectx):
2148 class memctx(committablectx):
2209 """Use memctx to perform in-memory commits via localrepo.commitctx().
2149 """Use memctx to perform in-memory commits via localrepo.commitctx().
2210
2150
2211 Revision information is supplied at initialization time while
2151 Revision information is supplied at initialization time while
2212 related files data and is made available through a callback
2152 related files data and is made available through a callback
2213 mechanism. 'repo' is the current localrepo, 'parents' is a
2153 mechanism. 'repo' is the current localrepo, 'parents' is a
2214 sequence of two parent revisions identifiers (pass None for every
2154 sequence of two parent revisions identifiers (pass None for every
2215 missing parent), 'text' is the commit message and 'files' lists
2155 missing parent), 'text' is the commit message and 'files' lists
2216 names of files touched by the revision (normalized and relative to
2156 names of files touched by the revision (normalized and relative to
2217 repository root).
2157 repository root).
2218
2158
2219 filectxfn(repo, memctx, path) is a callable receiving the
2159 filectxfn(repo, memctx, path) is a callable receiving the
2220 repository, the current memctx object and the normalized path of
2160 repository, the current memctx object and the normalized path of
2221 requested file, relative to repository root. It is fired by the
2161 requested file, relative to repository root. It is fired by the
2222 commit function for every file in 'files', but calls order is
2162 commit function for every file in 'files', but calls order is
2223 undefined. If the file is available in the revision being
2163 undefined. If the file is available in the revision being
2224 committed (updated or added), filectxfn returns a memfilectx
2164 committed (updated or added), filectxfn returns a memfilectx
2225 object. If the file was removed, filectxfn return None for recent
2165 object. If the file was removed, filectxfn return None for recent
2226 Mercurial. Moved files are represented by marking the source file
2166 Mercurial. Moved files are represented by marking the source file
2227 removed and the new file added with copy information (see
2167 removed and the new file added with copy information (see
2228 memfilectx).
2168 memfilectx).
2229
2169
2230 user receives the committer name and defaults to current
2170 user receives the committer name and defaults to current
2231 repository username, date is the commit date in any format
2171 repository username, date is the commit date in any format
2232 supported by dateutil.parsedate() and defaults to current date, extra
2172 supported by dateutil.parsedate() and defaults to current date, extra
2233 is a dictionary of metadata or is left empty.
2173 is a dictionary of metadata or is left empty.
2234 """
2174 """
2235
2175
2236 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2176 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2237 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2177 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2238 # this field to determine what to do in filectxfn.
2178 # this field to determine what to do in filectxfn.
2239 _returnnoneformissingfiles = True
2179 _returnnoneformissingfiles = True
2240
2180
2241 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2181 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2242 date=None, extra=None, branch=None, editor=False):
2182 date=None, extra=None, branch=None, editor=False):
2243 super(memctx, self).__init__(repo, text, user, date, extra)
2183 super(memctx, self).__init__(repo, text, user, date, extra)
2244 self._rev = None
2184 self._rev = None
2245 self._node = None
2185 self._node = None
2246 parents = [(p or nullid) for p in parents]
2186 parents = [(p or nullid) for p in parents]
2247 p1, p2 = parents
2187 p1, p2 = parents
2248 self._parents = [self._repo[p] for p in (p1, p2)]
2188 self._parents = [self._repo[p] for p in (p1, p2)]
2249 files = sorted(set(files))
2189 files = sorted(set(files))
2250 self._files = files
2190 self._files = files
2251 if branch is not None:
2191 if branch is not None:
2252 self._extra['branch'] = encoding.fromlocal(branch)
2192 self._extra['branch'] = encoding.fromlocal(branch)
2253 self.substate = {}
2193 self.substate = {}
2254
2194
2255 if isinstance(filectxfn, patch.filestore):
2195 if isinstance(filectxfn, patch.filestore):
2256 filectxfn = memfilefrompatch(filectxfn)
2196 filectxfn = memfilefrompatch(filectxfn)
2257 elif not callable(filectxfn):
2197 elif not callable(filectxfn):
2258 # if store is not callable, wrap it in a function
2198 # if store is not callable, wrap it in a function
2259 filectxfn = memfilefromctx(filectxfn)
2199 filectxfn = memfilefromctx(filectxfn)
2260
2200
2261 # memoizing increases performance for e.g. vcs convert scenarios.
2201 # memoizing increases performance for e.g. vcs convert scenarios.
2262 self._filectxfn = makecachingfilectxfn(filectxfn)
2202 self._filectxfn = makecachingfilectxfn(filectxfn)
2263
2203
2264 if editor:
2204 if editor:
2265 self._text = editor(self._repo, self, [])
2205 self._text = editor(self._repo, self, [])
2266 self._repo.savecommitmessage(self._text)
2206 self._repo.savecommitmessage(self._text)
2267
2207
2268 def filectx(self, path, filelog=None):
2208 def filectx(self, path, filelog=None):
2269 """get a file context from the working directory
2209 """get a file context from the working directory
2270
2210
2271 Returns None if file doesn't exist and should be removed."""
2211 Returns None if file doesn't exist and should be removed."""
2272 return self._filectxfn(self._repo, self, path)
2212 return self._filectxfn(self._repo, self, path)
2273
2213
2274 def commit(self):
2214 def commit(self):
2275 """commit context to the repo"""
2215 """commit context to the repo"""
2276 return self._repo.commitctx(self)
2216 return self._repo.commitctx(self)
2277
2217
2278 @propertycache
2218 @propertycache
2279 def _manifest(self):
2219 def _manifest(self):
2280 """generate a manifest based on the return values of filectxfn"""
2220 """generate a manifest based on the return values of filectxfn"""
2281
2221
2282 # keep this simple for now; just worry about p1
2222 # keep this simple for now; just worry about p1
2283 pctx = self._parents[0]
2223 pctx = self._parents[0]
2284 man = pctx.manifest().copy()
2224 man = pctx.manifest().copy()
2285
2225
2286 for f in self._status.modified:
2226 for f in self._status.modified:
2287 man[f] = modifiednodeid
2227 man[f] = modifiednodeid
2288
2228
2289 for f in self._status.added:
2229 for f in self._status.added:
2290 man[f] = addednodeid
2230 man[f] = addednodeid
2291
2231
2292 for f in self._status.removed:
2232 for f in self._status.removed:
2293 if f in man:
2233 if f in man:
2294 del man[f]
2234 del man[f]
2295
2235
2296 return man
2236 return man
2297
2237
2298 @propertycache
2238 @propertycache
2299 def _status(self):
2239 def _status(self):
2300 """Calculate exact status from ``files`` specified at construction
2240 """Calculate exact status from ``files`` specified at construction
2301 """
2241 """
2302 man1 = self.p1().manifest()
2242 man1 = self.p1().manifest()
2303 p2 = self._parents[1]
2243 p2 = self._parents[1]
2304 # "1 < len(self._parents)" can't be used for checking
2244 # "1 < len(self._parents)" can't be used for checking
2305 # existence of the 2nd parent, because "memctx._parents" is
2245 # existence of the 2nd parent, because "memctx._parents" is
2306 # explicitly initialized by the list, of which length is 2.
2246 # explicitly initialized by the list, of which length is 2.
2307 if p2.node() != nullid:
2247 if p2.node() != nullid:
2308 man2 = p2.manifest()
2248 man2 = p2.manifest()
2309 managing = lambda f: f in man1 or f in man2
2249 managing = lambda f: f in man1 or f in man2
2310 else:
2250 else:
2311 managing = lambda f: f in man1
2251 managing = lambda f: f in man1
2312
2252
2313 modified, added, removed = [], [], []
2253 modified, added, removed = [], [], []
2314 for f in self._files:
2254 for f in self._files:
2315 if not managing(f):
2255 if not managing(f):
2316 added.append(f)
2256 added.append(f)
2317 elif self[f]:
2257 elif self[f]:
2318 modified.append(f)
2258 modified.append(f)
2319 else:
2259 else:
2320 removed.append(f)
2260 removed.append(f)
2321
2261
2322 return scmutil.status(modified, added, removed, [], [], [], [])
2262 return scmutil.status(modified, added, removed, [], [], [], [])
2323
2263
2324 class memfilectx(committablefilectx):
2264 class memfilectx(committablefilectx):
2325 """memfilectx represents an in-memory file to commit.
2265 """memfilectx represents an in-memory file to commit.
2326
2266
2327 See memctx and committablefilectx for more details.
2267 See memctx and committablefilectx for more details.
2328 """
2268 """
2329 def __init__(self, repo, changectx, path, data, islink=False,
2269 def __init__(self, repo, changectx, path, data, islink=False,
2330 isexec=False, copied=None):
2270 isexec=False, copied=None):
2331 """
2271 """
2332 path is the normalized file path relative to repository root.
2272 path is the normalized file path relative to repository root.
2333 data is the file content as a string.
2273 data is the file content as a string.
2334 islink is True if the file is a symbolic link.
2274 islink is True if the file is a symbolic link.
2335 isexec is True if the file is executable.
2275 isexec is True if the file is executable.
2336 copied is the source file path if current file was copied in the
2276 copied is the source file path if current file was copied in the
2337 revision being committed, or None."""
2277 revision being committed, or None."""
2338 super(memfilectx, self).__init__(repo, path, None, changectx)
2278 super(memfilectx, self).__init__(repo, path, None, changectx)
2339 self._data = data
2279 self._data = data
2340 if islink:
2280 if islink:
2341 self._flags = 'l'
2281 self._flags = 'l'
2342 elif isexec:
2282 elif isexec:
2343 self._flags = 'x'
2283 self._flags = 'x'
2344 else:
2284 else:
2345 self._flags = ''
2285 self._flags = ''
2346 self._copied = None
2286 self._copied = None
2347 if copied:
2287 if copied:
2348 self._copied = (copied, nullid)
2288 self._copied = (copied, nullid)
2349
2289
2350 def data(self):
2290 def data(self):
2351 return self._data
2291 return self._data
2352
2292
2353 def remove(self, ignoremissing=False):
2293 def remove(self, ignoremissing=False):
2354 """wraps unlink for a repo's working directory"""
2294 """wraps unlink for a repo's working directory"""
2355 # need to figure out what to do here
2295 # need to figure out what to do here
2356 del self._changectx[self._path]
2296 del self._changectx[self._path]
2357
2297
2358 def write(self, data, flags, **kwargs):
2298 def write(self, data, flags, **kwargs):
2359 """wraps repo.wwrite"""
2299 """wraps repo.wwrite"""
2360 self._data = data
2300 self._data = data
2361
2301
2362
2302
2363 class metadataonlyctx(committablectx):
2303 class metadataonlyctx(committablectx):
2364 """Like memctx but it's reusing the manifest of different commit.
2304 """Like memctx but it's reusing the manifest of different commit.
2365 Intended to be used by lightweight operations that are creating
2305 Intended to be used by lightweight operations that are creating
2366 metadata-only changes.
2306 metadata-only changes.
2367
2307
2368 Revision information is supplied at initialization time. 'repo' is the
2308 Revision information is supplied at initialization time. 'repo' is the
2369 current localrepo, 'ctx' is original revision which manifest we're reuisng
2309 current localrepo, 'ctx' is original revision which manifest we're reuisng
2370 'parents' is a sequence of two parent revisions identifiers (pass None for
2310 'parents' is a sequence of two parent revisions identifiers (pass None for
2371 every missing parent), 'text' is the commit.
2311 every missing parent), 'text' is the commit.
2372
2312
2373 user receives the committer name and defaults to current repository
2313 user receives the committer name and defaults to current repository
2374 username, date is the commit date in any format supported by
2314 username, date is the commit date in any format supported by
2375 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2315 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2376 metadata or is left empty.
2316 metadata or is left empty.
2377 """
2317 """
2378 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2318 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2379 date=None, extra=None, editor=False):
2319 date=None, extra=None, editor=False):
2380 if text is None:
2320 if text is None:
2381 text = originalctx.description()
2321 text = originalctx.description()
2382 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2322 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2383 self._rev = None
2323 self._rev = None
2384 self._node = None
2324 self._node = None
2385 self._originalctx = originalctx
2325 self._originalctx = originalctx
2386 self._manifestnode = originalctx.manifestnode()
2326 self._manifestnode = originalctx.manifestnode()
2387 if parents is None:
2327 if parents is None:
2388 parents = originalctx.parents()
2328 parents = originalctx.parents()
2389 else:
2329 else:
2390 parents = [repo[p] for p in parents if p is not None]
2330 parents = [repo[p] for p in parents if p is not None]
2391 parents = parents[:]
2331 parents = parents[:]
2392 while len(parents) < 2:
2332 while len(parents) < 2:
2393 parents.append(repo[nullid])
2333 parents.append(repo[nullid])
2394 p1, p2 = self._parents = parents
2334 p1, p2 = self._parents = parents
2395
2335
2396 # sanity check to ensure that the reused manifest parents are
2336 # sanity check to ensure that the reused manifest parents are
2397 # manifests of our commit parents
2337 # manifests of our commit parents
2398 mp1, mp2 = self.manifestctx().parents
2338 mp1, mp2 = self.manifestctx().parents
2399 if p1 != nullid and p1.manifestnode() != mp1:
2339 if p1 != nullid and p1.manifestnode() != mp1:
2400 raise RuntimeError('can\'t reuse the manifest: '
2340 raise RuntimeError('can\'t reuse the manifest: '
2401 'its p1 doesn\'t match the new ctx p1')
2341 'its p1 doesn\'t match the new ctx p1')
2402 if p2 != nullid and p2.manifestnode() != mp2:
2342 if p2 != nullid and p2.manifestnode() != mp2:
2403 raise RuntimeError('can\'t reuse the manifest: '
2343 raise RuntimeError('can\'t reuse the manifest: '
2404 'its p2 doesn\'t match the new ctx p2')
2344 'its p2 doesn\'t match the new ctx p2')
2405
2345
2406 self._files = originalctx.files()
2346 self._files = originalctx.files()
2407 self.substate = {}
2347 self.substate = {}
2408
2348
2409 if editor:
2349 if editor:
2410 self._text = editor(self._repo, self, [])
2350 self._text = editor(self._repo, self, [])
2411 self._repo.savecommitmessage(self._text)
2351 self._repo.savecommitmessage(self._text)
2412
2352
2413 def manifestnode(self):
2353 def manifestnode(self):
2414 return self._manifestnode
2354 return self._manifestnode
2415
2355
2416 @property
2356 @property
2417 def _manifestctx(self):
2357 def _manifestctx(self):
2418 return self._repo.manifestlog[self._manifestnode]
2358 return self._repo.manifestlog[self._manifestnode]
2419
2359
2420 def filectx(self, path, filelog=None):
2360 def filectx(self, path, filelog=None):
2421 return self._originalctx.filectx(path, filelog=filelog)
2361 return self._originalctx.filectx(path, filelog=filelog)
2422
2362
2423 def commit(self):
2363 def commit(self):
2424 """commit context to the repo"""
2364 """commit context to the repo"""
2425 return self._repo.commitctx(self)
2365 return self._repo.commitctx(self)
2426
2366
2427 @property
2367 @property
2428 def _manifest(self):
2368 def _manifest(self):
2429 return self._originalctx.manifest()
2369 return self._originalctx.manifest()
2430
2370
2431 @propertycache
2371 @propertycache
2432 def _status(self):
2372 def _status(self):
2433 """Calculate exact status from ``files`` specified in the ``origctx``
2373 """Calculate exact status from ``files`` specified in the ``origctx``
2434 and parents manifests.
2374 and parents manifests.
2435 """
2375 """
2436 man1 = self.p1().manifest()
2376 man1 = self.p1().manifest()
2437 p2 = self._parents[1]
2377 p2 = self._parents[1]
2438 # "1 < len(self._parents)" can't be used for checking
2378 # "1 < len(self._parents)" can't be used for checking
2439 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2379 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2440 # explicitly initialized by the list, of which length is 2.
2380 # explicitly initialized by the list, of which length is 2.
2441 if p2.node() != nullid:
2381 if p2.node() != nullid:
2442 man2 = p2.manifest()
2382 man2 = p2.manifest()
2443 managing = lambda f: f in man1 or f in man2
2383 managing = lambda f: f in man1 or f in man2
2444 else:
2384 else:
2445 managing = lambda f: f in man1
2385 managing = lambda f: f in man1
2446
2386
2447 modified, added, removed = [], [], []
2387 modified, added, removed = [], [], []
2448 for f in self._files:
2388 for f in self._files:
2449 if not managing(f):
2389 if not managing(f):
2450 added.append(f)
2390 added.append(f)
2451 elif f in self:
2391 elif f in self:
2452 modified.append(f)
2392 modified.append(f)
2453 else:
2393 else:
2454 removed.append(f)
2394 removed.append(f)
2455
2395
2456 return scmutil.status(modified, added, removed, [], [], [], [])
2396 return scmutil.status(modified, added, removed, [], [], [], [])
2457
2397
2458 class arbitraryfilectx(object):
2398 class arbitraryfilectx(object):
2459 """Allows you to use filectx-like functions on a file in an arbitrary
2399 """Allows you to use filectx-like functions on a file in an arbitrary
2460 location on disk, possibly not in the working directory.
2400 location on disk, possibly not in the working directory.
2461 """
2401 """
2462 def __init__(self, path, repo=None):
2402 def __init__(self, path, repo=None):
2463 # Repo is optional because contrib/simplemerge uses this class.
2403 # Repo is optional because contrib/simplemerge uses this class.
2464 self._repo = repo
2404 self._repo = repo
2465 self._path = path
2405 self._path = path
2466
2406
2467 def cmp(self, fctx):
2407 def cmp(self, fctx):
2468 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2408 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2469 # path if either side is a symlink.
2409 # path if either side is a symlink.
2470 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2410 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2471 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2411 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2472 # Add a fast-path for merge if both sides are disk-backed.
2412 # Add a fast-path for merge if both sides are disk-backed.
2473 # Note that filecmp uses the opposite return values (True if same)
2413 # Note that filecmp uses the opposite return values (True if same)
2474 # from our cmp functions (True if different).
2414 # from our cmp functions (True if different).
2475 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2415 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2476 return self.data() != fctx.data()
2416 return self.data() != fctx.data()
2477
2417
2478 def path(self):
2418 def path(self):
2479 return self._path
2419 return self._path
2480
2420
2481 def flags(self):
2421 def flags(self):
2482 return ''
2422 return ''
2483
2423
2484 def data(self):
2424 def data(self):
2485 return util.readfile(self._path)
2425 return util.readfile(self._path)
2486
2426
2487 def decodeddata(self):
2427 def decodeddata(self):
2488 with open(self._path, "rb") as f:
2428 with open(self._path, "rb") as f:
2489 return f.read()
2429 return f.read()
2490
2430
2491 def remove(self):
2431 def remove(self):
2492 util.unlink(self._path)
2432 util.unlink(self._path)
2493
2433
2494 def write(self, data, flags, **kwargs):
2434 def write(self, data, flags, **kwargs):
2495 assert not flags
2435 assert not flags
2496 with open(self._path, "w") as f:
2436 with open(self._path, "w") as f:
2497 f.write(data)
2437 f.write(data)
@@ -1,2941 +1,3001 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 hex,
21 hex,
21 nullid,
22 nullid,
23 nullrev,
22 short,
24 short,
23 )
25 )
24 from . import (
26 from . import (
25 bookmarks,
27 bookmarks,
26 branchmap,
28 branchmap,
27 bundle2,
29 bundle2,
28 changegroup,
30 changegroup,
29 changelog,
31 changelog,
30 color,
32 color,
31 context,
33 context,
32 dirstate,
34 dirstate,
33 dirstateguard,
35 dirstateguard,
34 discovery,
36 discovery,
35 encoding,
37 encoding,
36 error,
38 error,
37 exchange,
39 exchange,
38 extensions,
40 extensions,
39 filelog,
41 filelog,
40 hook,
42 hook,
41 lock as lockmod,
43 lock as lockmod,
42 manifest,
44 manifest,
43 match as matchmod,
45 match as matchmod,
44 merge as mergemod,
46 merge as mergemod,
45 mergeutil,
47 mergeutil,
46 namespaces,
48 namespaces,
47 narrowspec,
49 narrowspec,
48 obsolete,
50 obsolete,
49 pathutil,
51 pathutil,
50 phases,
52 phases,
51 pushkey,
53 pushkey,
52 pycompat,
54 pycompat,
53 repository,
55 repository,
54 repoview,
56 repoview,
55 revset,
57 revset,
56 revsetlang,
58 revsetlang,
57 scmutil,
59 scmutil,
58 sparse,
60 sparse,
59 store as storemod,
61 store as storemod,
60 subrepoutil,
62 subrepoutil,
61 tags as tagsmod,
63 tags as tagsmod,
62 transaction,
64 transaction,
63 txnutil,
65 txnutil,
64 util,
66 util,
65 vfs as vfsmod,
67 vfs as vfsmod,
66 )
68 )
67 from .utils import (
69 from .utils import (
68 interfaceutil,
70 interfaceutil,
69 procutil,
71 procutil,
70 stringutil,
72 stringutil,
71 )
73 )
72
74
73 from .revlogutils import (
75 from .revlogutils import (
74 constants as revlogconst,
76 constants as revlogconst,
75 )
77 )
76
78
77 release = lockmod.release
79 release = lockmod.release
78 urlerr = util.urlerr
80 urlerr = util.urlerr
79 urlreq = util.urlreq
81 urlreq = util.urlreq
80
82
81 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
85 # - '' for svfs relative paths
84 _cachedfiles = set()
86 _cachedfiles = set()
85
87
86 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
88 """
90 """
89 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
90 if repo is None:
92 if repo is None:
91 return self
93 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
95 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
97 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
99
98 class repofilecache(_basefilecache):
100 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
101 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
102 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
103 super(repofilecache, self).__init__(*paths)
102 for path in paths:
104 for path in paths:
103 _cachedfiles.add((path, 'plain'))
105 _cachedfiles.add((path, 'plain'))
104
106
105 def join(self, obj, fname):
107 def join(self, obj, fname):
106 return obj.vfs.join(fname)
108 return obj.vfs.join(fname)
107
109
108 class storecache(_basefilecache):
110 class storecache(_basefilecache):
109 """filecache for files in the store"""
111 """filecache for files in the store"""
110 def __init__(self, *paths):
112 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
113 super(storecache, self).__init__(*paths)
112 for path in paths:
114 for path in paths:
113 _cachedfiles.add((path, ''))
115 _cachedfiles.add((path, ''))
114
116
115 def join(self, obj, fname):
117 def join(self, obj, fname):
116 return obj.sjoin(fname)
118 return obj.sjoin(fname)
117
119
118 def isfilecached(repo, name):
120 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
121 """check if a repo has already cached "name" filecache-ed property
120
122
121 This returns (cachedobj-or-None, iscached) tuple.
123 This returns (cachedobj-or-None, iscached) tuple.
122 """
124 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
126 if not cacheentry:
125 return None, False
127 return None, False
126 return cacheentry.obj, True
128 return cacheentry.obj, True
127
129
128 class unfilteredpropertycache(util.propertycache):
130 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
131 """propertycache that apply to unfiltered repo only"""
130
132
131 def __get__(self, repo, type=None):
133 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
134 unfi = repo.unfiltered()
133 if unfi is repo:
135 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
136 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
137 return getattr(unfi, self.name)
136
138
137 class filteredpropertycache(util.propertycache):
139 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
140 """propertycache that must take filtering in account"""
139
141
140 def cachevalue(self, obj, value):
142 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
143 object.__setattr__(obj, self.name, value)
142
144
143
145
144 def hasunfilteredcache(repo, name):
146 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
147 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
148 return name in vars(repo.unfiltered())
147
149
148 def unfilteredmethod(orig):
150 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
151 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
152 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
153 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
154 return wrapper
153
155
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
157 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
158 legacycaps = moderncaps.union({'changegroupsubset'})
157
159
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
161 class localcommandexecutor(object):
160 def __init__(self, peer):
162 def __init__(self, peer):
161 self._peer = peer
163 self._peer = peer
162 self._sent = False
164 self._sent = False
163 self._closed = False
165 self._closed = False
164
166
165 def __enter__(self):
167 def __enter__(self):
166 return self
168 return self
167
169
168 def __exit__(self, exctype, excvalue, exctb):
170 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
171 self.close()
170
172
171 def callcommand(self, command, args):
173 def callcommand(self, command, args):
172 if self._sent:
174 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
176 'sendcommands()')
175
177
176 if self._closed:
178 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
179 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
180 'close()')
179
181
180 # We don't need to support anything fancy. Just call the named
182 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
183 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
184 fn = getattr(self._peer, pycompat.sysstr(command))
183
185
184 f = pycompat.futures.Future()
186 f = pycompat.futures.Future()
185
187
186 try:
188 try:
187 result = fn(**pycompat.strkwargs(args))
189 result = fn(**pycompat.strkwargs(args))
188 except Exception:
190 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
192 else:
191 f.set_result(result)
193 f.set_result(result)
192
194
193 return f
195 return f
194
196
195 def sendcommands(self):
197 def sendcommands(self):
196 self._sent = True
198 self._sent = True
197
199
198 def close(self):
200 def close(self):
199 self._closed = True
201 self._closed = True
200
202
201 @interfaceutil.implementer(repository.ipeercommands)
203 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
204 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
205 '''peer for a local repo; reflects only the most recent API'''
204
206
205 def __init__(self, repo, caps=None):
207 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
208 super(localpeer, self).__init__()
207
209
208 if caps is None:
210 if caps is None:
209 caps = moderncaps.copy()
211 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
212 self._repo = repo.filtered('served')
211 self.ui = repo.ui
213 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
214 self._caps = repo._restrictcapabilities(caps)
213
215
214 # Begin of _basepeer interface.
216 # Begin of _basepeer interface.
215
217
216 def url(self):
218 def url(self):
217 return self._repo.url()
219 return self._repo.url()
218
220
219 def local(self):
221 def local(self):
220 return self._repo
222 return self._repo
221
223
222 def peer(self):
224 def peer(self):
223 return self
225 return self
224
226
225 def canpush(self):
227 def canpush(self):
226 return True
228 return True
227
229
228 def close(self):
230 def close(self):
229 self._repo.close()
231 self._repo.close()
230
232
231 # End of _basepeer interface.
233 # End of _basepeer interface.
232
234
233 # Begin of _basewirecommands interface.
235 # Begin of _basewirecommands interface.
234
236
235 def branchmap(self):
237 def branchmap(self):
236 return self._repo.branchmap()
238 return self._repo.branchmap()
237
239
238 def capabilities(self):
240 def capabilities(self):
239 return self._caps
241 return self._caps
240
242
241 def clonebundles(self):
243 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
244 return self._repo.tryread('clonebundles.manifest')
243
245
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
247 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
249 pycompat.bytestr(four),
248 pycompat.bytestr(five))
250 pycompat.bytestr(five))
249
251
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
253 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
255 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
256 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
257 cb = util.chunkbuffer(chunks)
256
258
257 if exchange.bundle2requested(bundlecaps):
259 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
260 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
261 # wire level function happier. We need to build a proper object
260 # from it in local peer.
262 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
263 return bundle2.getunbundler(self.ui, cb)
262 else:
264 else:
263 return changegroup.getunbundler('01', cb, None)
265 return changegroup.getunbundler('01', cb, None)
264
266
265 def heads(self):
267 def heads(self):
266 return self._repo.heads()
268 return self._repo.heads()
267
269
268 def known(self, nodes):
270 def known(self, nodes):
269 return self._repo.known(nodes)
271 return self._repo.known(nodes)
270
272
271 def listkeys(self, namespace):
273 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
274 return self._repo.listkeys(namespace)
273
275
274 def lookup(self, key):
276 def lookup(self, key):
275 return self._repo.lookup(key)
277 return self._repo.lookup(key)
276
278
277 def pushkey(self, namespace, key, old, new):
279 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
280 return self._repo.pushkey(namespace, key, old, new)
279
281
280 def stream_out(self):
282 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
283 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
284 'peer'))
283
285
284 def unbundle(self, bundle, heads, url):
286 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
287 """apply a bundle on a repo
286
288
287 This function handles the repo locking itself."""
289 This function handles the repo locking itself."""
288 try:
290 try:
289 try:
291 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
292 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
294 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
295 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
296 # This little dance should be dropped eventually when the
295 # API is finally improved.
297 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
298 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
299 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
300 return ret
299 except Exception as exc:
301 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
302 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
303 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
304 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
305 # it directly.
304 #
306 #
305 # This is not very elegant but allows a "simple" solution for
307 # This is not very elegant but allows a "simple" solution for
306 # issue4594
308 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
309 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
310 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
311 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
312 for out in output:
311 bundler.addpart(out)
313 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
314 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
315 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
316 bundle2.processbundle(self._repo, b)
315 raise
317 raise
316 except error.PushRaced as exc:
318 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
319 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
320 stringutil.forcebytestr(exc))
319
321
320 # End of _basewirecommands interface.
322 # End of _basewirecommands interface.
321
323
322 # Begin of peer interface.
324 # Begin of peer interface.
323
325
324 def commandexecutor(self):
326 def commandexecutor(self):
325 return localcommandexecutor(self)
327 return localcommandexecutor(self)
326
328
327 # End of peer interface.
329 # End of peer interface.
328
330
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
332 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
333 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
334 restricted capabilities'''
333
335
334 def __init__(self, repo):
336 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
338
337 # Begin of baselegacywirecommands interface.
339 # Begin of baselegacywirecommands interface.
338
340
339 def between(self, pairs):
341 def between(self, pairs):
340 return self._repo.between(pairs)
342 return self._repo.between(pairs)
341
343
342 def branches(self, nodes):
344 def branches(self, nodes):
343 return self._repo.branches(nodes)
345 return self._repo.branches(nodes)
344
346
345 def changegroup(self, nodes, source):
347 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
349 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
351
350 def changegroupsubset(self, bases, heads, source):
352 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
354 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
356
355 # End of baselegacywirecommands interface.
357 # End of baselegacywirecommands interface.
356
358
357 # Increment the sub-version when the revlog v2 format changes to lock out old
359 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
360 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
362
361 # A repository with the sparserevlog feature will have delta chains that
363 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
364 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
365 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
366 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
367 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
368 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
369 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
371
370 # Functions receiving (ui, features) that extensions can register to impact
372 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
373 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
374 # functions defined in loaded extensions are called.
373 #
375 #
374 # The function receives a set of requirement strings that the repository
376 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
377 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
378 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
379 featuresetupfuncs = set()
378
380
379 def makelocalrepository(baseui, path, intents=None):
381 def makelocalrepository(baseui, path, intents=None):
380 """Create a local repository object.
382 """Create a local repository object.
381
383
382 Given arguments needed to construct a local repository, this function
384 Given arguments needed to construct a local repository, this function
383 performs various early repository loading functionality (such as
385 performs various early repository loading functionality (such as
384 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
385 the repository can be opened, derives a type suitable for representing
387 the repository can be opened, derives a type suitable for representing
386 that repository, and returns an instance of it.
388 that repository, and returns an instance of it.
387
389
388 The returned object conforms to the ``repository.completelocalrepository``
390 The returned object conforms to the ``repository.completelocalrepository``
389 interface.
391 interface.
390
392
391 The repository type is derived by calling a series of factory functions
393 The repository type is derived by calling a series of factory functions
392 for each aspect/interface of the final repository. These are defined by
394 for each aspect/interface of the final repository. These are defined by
393 ``REPO_INTERFACES``.
395 ``REPO_INTERFACES``.
394
396
395 Each factory function is called to produce a type implementing a specific
397 Each factory function is called to produce a type implementing a specific
396 interface. The cumulative list of returned types will be combined into a
398 interface. The cumulative list of returned types will be combined into a
397 new type and that type will be instantiated to represent the local
399 new type and that type will be instantiated to represent the local
398 repository.
400 repository.
399
401
400 The factory functions each receive various state that may be consulted
402 The factory functions each receive various state that may be consulted
401 as part of deriving a type.
403 as part of deriving a type.
402
404
403 Extensions should wrap these factory functions to customize repository type
405 Extensions should wrap these factory functions to customize repository type
404 creation. Note that an extension's wrapped function may be called even if
406 creation. Note that an extension's wrapped function may be called even if
405 that extension is not loaded for the repo being constructed. Extensions
407 that extension is not loaded for the repo being constructed. Extensions
406 should check if their ``__name__`` appears in the
408 should check if their ``__name__`` appears in the
407 ``extensionmodulenames`` set passed to the factory function and no-op if
409 ``extensionmodulenames`` set passed to the factory function and no-op if
408 not.
410 not.
409 """
411 """
410 ui = baseui.copy()
412 ui = baseui.copy()
411 # Prevent copying repo configuration.
413 # Prevent copying repo configuration.
412 ui.copy = baseui.copy
414 ui.copy = baseui.copy
413
415
414 # Working directory VFS rooted at repository root.
416 # Working directory VFS rooted at repository root.
415 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
416
418
417 # Main VFS for .hg/ directory.
419 # Main VFS for .hg/ directory.
418 hgpath = wdirvfs.join(b'.hg')
420 hgpath = wdirvfs.join(b'.hg')
419 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
420
422
421 # The .hg/ path should exist and should be a directory. All other
423 # The .hg/ path should exist and should be a directory. All other
422 # cases are errors.
424 # cases are errors.
423 if not hgvfs.isdir():
425 if not hgvfs.isdir():
424 try:
426 try:
425 hgvfs.stat()
427 hgvfs.stat()
426 except OSError as e:
428 except OSError as e:
427 if e.errno != errno.ENOENT:
429 if e.errno != errno.ENOENT:
428 raise
430 raise
429
431
430 raise error.RepoError(_(b'repository %s not found') % path)
432 raise error.RepoError(_(b'repository %s not found') % path)
431
433
432 # .hg/requires file contains a newline-delimited list of
434 # .hg/requires file contains a newline-delimited list of
433 # features/capabilities the opener (us) must have in order to use
435 # features/capabilities the opener (us) must have in order to use
434 # the repository. This file was introduced in Mercurial 0.9.2,
436 # the repository. This file was introduced in Mercurial 0.9.2,
435 # which means very old repositories may not have one. We assume
437 # which means very old repositories may not have one. We assume
436 # a missing file translates to no requirements.
438 # a missing file translates to no requirements.
437 try:
439 try:
438 requirements = set(hgvfs.read(b'requires').splitlines())
440 requirements = set(hgvfs.read(b'requires').splitlines())
439 except IOError as e:
441 except IOError as e:
440 if e.errno != errno.ENOENT:
442 if e.errno != errno.ENOENT:
441 raise
443 raise
442 requirements = set()
444 requirements = set()
443
445
444 # The .hg/hgrc file may load extensions or contain config options
446 # The .hg/hgrc file may load extensions or contain config options
445 # that influence repository construction. Attempt to load it and
447 # that influence repository construction. Attempt to load it and
446 # process any new extensions that it may have pulled in.
448 # process any new extensions that it may have pulled in.
447 try:
449 try:
448 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
449 # Run this before extensions.loadall() so extensions can be
451 # Run this before extensions.loadall() so extensions can be
450 # automatically enabled.
452 # automatically enabled.
451 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
452 except IOError:
454 except IOError:
453 pass
455 pass
454 else:
456 else:
455 extensions.loadall(ui)
457 extensions.loadall(ui)
456
458
457 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
458 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
459
461
460 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
461
463
462 # We first validate the requirements are known.
464 # We first validate the requirements are known.
463 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
464
466
465 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
466 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
467
469
468 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
469 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
470 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
471 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
472 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
473 # in this hgrc.
475 # in this hgrc.
474 #
476 #
475 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
476 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
477 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
478 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
479 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
480
482
481 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
482 # Now get on with doing that.
484 # Now get on with doing that.
483
485
484 features = set()
486 features = set()
485
487
486 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
487 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
488 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
489 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
490 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
491 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
492 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
493 if b'relshared' in requirements:
495 if b'relshared' in requirements:
494 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
495
497
496 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
497
499
498 if not sharedvfs.exists():
500 if not sharedvfs.exists():
499 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
500 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
501
503
502 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
503
505
504 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
505 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
506 else:
508 else:
507 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
508 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
509
511
510 # The store has changed over time and the exact layout is dictated by
512 # The store has changed over time and the exact layout is dictated by
511 # requirements. The store interface abstracts differences across all
513 # requirements. The store interface abstracts differences across all
512 # of them.
514 # of them.
513 store = makestore(requirements, storebasepath,
515 store = makestore(requirements, storebasepath,
514 lambda base: vfsmod.vfs(base, cacheaudited=True))
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
515 hgvfs.createmode = store.createmode
517 hgvfs.createmode = store.createmode
516
518
517 storevfs = store.vfs
519 storevfs = store.vfs
518 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
519
521
520 # The cache vfs is used to manage cache files.
522 # The cache vfs is used to manage cache files.
521 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
522 cachevfs.createmode = store.createmode
524 cachevfs.createmode = store.createmode
523
525
524 # Now resolve the type for the repository object. We do this by repeatedly
526 # Now resolve the type for the repository object. We do this by repeatedly
525 # calling a factory function to produces types for specific aspects of the
527 # calling a factory function to produces types for specific aspects of the
526 # repo's operation. The aggregate returned types are used as base classes
528 # repo's operation. The aggregate returned types are used as base classes
527 # for a dynamically-derived type, which will represent our new repository.
529 # for a dynamically-derived type, which will represent our new repository.
528
530
529 bases = []
531 bases = []
530 extrastate = {}
532 extrastate = {}
531
533
532 for iface, fn in REPO_INTERFACES:
534 for iface, fn in REPO_INTERFACES:
533 # We pass all potentially useful state to give extensions tons of
535 # We pass all potentially useful state to give extensions tons of
534 # flexibility.
536 # flexibility.
535 typ = fn(ui=ui,
537 typ = fn(ui=ui,
536 intents=intents,
538 intents=intents,
537 requirements=requirements,
539 requirements=requirements,
538 features=features,
540 features=features,
539 wdirvfs=wdirvfs,
541 wdirvfs=wdirvfs,
540 hgvfs=hgvfs,
542 hgvfs=hgvfs,
541 store=store,
543 store=store,
542 storevfs=storevfs,
544 storevfs=storevfs,
543 storeoptions=storevfs.options,
545 storeoptions=storevfs.options,
544 cachevfs=cachevfs,
546 cachevfs=cachevfs,
545 extensionmodulenames=extensionmodulenames,
547 extensionmodulenames=extensionmodulenames,
546 extrastate=extrastate,
548 extrastate=extrastate,
547 baseclasses=bases)
549 baseclasses=bases)
548
550
549 if not isinstance(typ, type):
551 if not isinstance(typ, type):
550 raise error.ProgrammingError('unable to construct type for %s' %
552 raise error.ProgrammingError('unable to construct type for %s' %
551 iface)
553 iface)
552
554
553 bases.append(typ)
555 bases.append(typ)
554
556
555 # type() allows you to use characters in type names that wouldn't be
557 # type() allows you to use characters in type names that wouldn't be
556 # recognized as Python symbols in source code. We abuse that to add
558 # recognized as Python symbols in source code. We abuse that to add
557 # rich information about our constructed repo.
559 # rich information about our constructed repo.
558 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
559 wdirvfs.base,
561 wdirvfs.base,
560 b','.join(sorted(requirements))))
562 b','.join(sorted(requirements))))
561
563
562 cls = type(name, tuple(bases), {})
564 cls = type(name, tuple(bases), {})
563
565
564 return cls(
566 return cls(
565 baseui=baseui,
567 baseui=baseui,
566 ui=ui,
568 ui=ui,
567 origroot=path,
569 origroot=path,
568 wdirvfs=wdirvfs,
570 wdirvfs=wdirvfs,
569 hgvfs=hgvfs,
571 hgvfs=hgvfs,
570 requirements=requirements,
572 requirements=requirements,
571 supportedrequirements=supportedrequirements,
573 supportedrequirements=supportedrequirements,
572 sharedpath=storebasepath,
574 sharedpath=storebasepath,
573 store=store,
575 store=store,
574 cachevfs=cachevfs,
576 cachevfs=cachevfs,
575 features=features,
577 features=features,
576 intents=intents)
578 intents=intents)
577
579
578 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
579 """Perform additional actions after .hg/hgrc is loaded.
581 """Perform additional actions after .hg/hgrc is loaded.
580
582
581 This function is called during repository loading immediately after
583 This function is called during repository loading immediately after
582 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
583
585
584 The function can be used to validate configs, automatically add
586 The function can be used to validate configs, automatically add
585 options (including extensions) based on requirements, etc.
587 options (including extensions) based on requirements, etc.
586 """
588 """
587
589
588 # Map of requirements to list of extensions to load automatically when
590 # Map of requirements to list of extensions to load automatically when
589 # requirement is present.
591 # requirement is present.
590 autoextensions = {
592 autoextensions = {
591 b'largefiles': [b'largefiles'],
593 b'largefiles': [b'largefiles'],
592 b'lfs': [b'lfs'],
594 b'lfs': [b'lfs'],
593 }
595 }
594
596
595 for requirement, names in sorted(autoextensions.items()):
597 for requirement, names in sorted(autoextensions.items()):
596 if requirement not in requirements:
598 if requirement not in requirements:
597 continue
599 continue
598
600
599 for name in names:
601 for name in names:
600 if not ui.hasconfig(b'extensions', name):
602 if not ui.hasconfig(b'extensions', name):
601 ui.setconfig(b'extensions', name, b'', source='autoload')
603 ui.setconfig(b'extensions', name, b'', source='autoload')
602
604
603 def gathersupportedrequirements(ui):
605 def gathersupportedrequirements(ui):
604 """Determine the complete set of recognized requirements."""
606 """Determine the complete set of recognized requirements."""
605 # Start with all requirements supported by this file.
607 # Start with all requirements supported by this file.
606 supported = set(localrepository._basesupported)
608 supported = set(localrepository._basesupported)
607
609
608 # Execute ``featuresetupfuncs`` entries if they belong to an extension
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
609 # relevant to this ui instance.
611 # relevant to this ui instance.
610 modules = {m.__name__ for n, m in extensions.extensions(ui)}
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
611
613
612 for fn in featuresetupfuncs:
614 for fn in featuresetupfuncs:
613 if fn.__module__ in modules:
615 if fn.__module__ in modules:
614 fn(ui, supported)
616 fn(ui, supported)
615
617
616 # Add derived requirements from registered compression engines.
618 # Add derived requirements from registered compression engines.
617 for name in util.compengines:
619 for name in util.compengines:
618 engine = util.compengines[name]
620 engine = util.compengines[name]
619 if engine.revlogheader():
621 if engine.revlogheader():
620 supported.add(b'exp-compression-%s' % name)
622 supported.add(b'exp-compression-%s' % name)
621
623
622 return supported
624 return supported
623
625
624 def ensurerequirementsrecognized(requirements, supported):
626 def ensurerequirementsrecognized(requirements, supported):
625 """Validate that a set of local requirements is recognized.
627 """Validate that a set of local requirements is recognized.
626
628
627 Receives a set of requirements. Raises an ``error.RepoError`` if there
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
628 exists any requirement in that set that currently loaded code doesn't
630 exists any requirement in that set that currently loaded code doesn't
629 recognize.
631 recognize.
630
632
631 Returns a set of supported requirements.
633 Returns a set of supported requirements.
632 """
634 """
633 missing = set()
635 missing = set()
634
636
635 for requirement in requirements:
637 for requirement in requirements:
636 if requirement in supported:
638 if requirement in supported:
637 continue
639 continue
638
640
639 if not requirement or not requirement[0:1].isalnum():
641 if not requirement or not requirement[0:1].isalnum():
640 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
641
643
642 missing.add(requirement)
644 missing.add(requirement)
643
645
644 if missing:
646 if missing:
645 raise error.RequirementError(
647 raise error.RequirementError(
646 _(b'repository requires features unknown to this Mercurial: %s') %
648 _(b'repository requires features unknown to this Mercurial: %s') %
647 b' '.join(sorted(missing)),
649 b' '.join(sorted(missing)),
648 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
649 b'for more information'))
651 b'for more information'))
650
652
651 def ensurerequirementscompatible(ui, requirements):
653 def ensurerequirementscompatible(ui, requirements):
652 """Validates that a set of recognized requirements is mutually compatible.
654 """Validates that a set of recognized requirements is mutually compatible.
653
655
654 Some requirements may not be compatible with others or require
656 Some requirements may not be compatible with others or require
655 config options that aren't enabled. This function is called during
657 config options that aren't enabled. This function is called during
656 repository opening to ensure that the set of requirements needed
658 repository opening to ensure that the set of requirements needed
657 to open a repository is sane and compatible with config options.
659 to open a repository is sane and compatible with config options.
658
660
659 Extensions can monkeypatch this function to perform additional
661 Extensions can monkeypatch this function to perform additional
660 checking.
662 checking.
661
663
662 ``error.RepoError`` should be raised on failure.
664 ``error.RepoError`` should be raised on failure.
663 """
665 """
664 if b'exp-sparse' in requirements and not sparse.enabled:
666 if b'exp-sparse' in requirements and not sparse.enabled:
665 raise error.RepoError(_(b'repository is using sparse feature but '
667 raise error.RepoError(_(b'repository is using sparse feature but '
666 b'sparse is not enabled; enable the '
668 b'sparse is not enabled; enable the '
667 b'"sparse" extensions to access'))
669 b'"sparse" extensions to access'))
668
670
669 def makestore(requirements, path, vfstype):
671 def makestore(requirements, path, vfstype):
670 """Construct a storage object for a repository."""
672 """Construct a storage object for a repository."""
671 if b'store' in requirements:
673 if b'store' in requirements:
672 if b'fncache' in requirements:
674 if b'fncache' in requirements:
673 return storemod.fncachestore(path, vfstype,
675 return storemod.fncachestore(path, vfstype,
674 b'dotencode' in requirements)
676 b'dotencode' in requirements)
675
677
676 return storemod.encodedstore(path, vfstype)
678 return storemod.encodedstore(path, vfstype)
677
679
678 return storemod.basicstore(path, vfstype)
680 return storemod.basicstore(path, vfstype)
679
681
680 def resolvestorevfsoptions(ui, requirements, features):
682 def resolvestorevfsoptions(ui, requirements, features):
681 """Resolve the options to pass to the store vfs opener.
683 """Resolve the options to pass to the store vfs opener.
682
684
683 The returned dict is used to influence behavior of the storage layer.
685 The returned dict is used to influence behavior of the storage layer.
684 """
686 """
685 options = {}
687 options = {}
686
688
687 if b'treemanifest' in requirements:
689 if b'treemanifest' in requirements:
688 options[b'treemanifest'] = True
690 options[b'treemanifest'] = True
689
691
690 # experimental config: format.manifestcachesize
692 # experimental config: format.manifestcachesize
691 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
692 if manifestcachesize is not None:
694 if manifestcachesize is not None:
693 options[b'manifestcachesize'] = manifestcachesize
695 options[b'manifestcachesize'] = manifestcachesize
694
696
695 # In the absence of another requirement superseding a revlog-related
697 # In the absence of another requirement superseding a revlog-related
696 # requirement, we have to assume the repo is using revlog version 0.
698 # requirement, we have to assume the repo is using revlog version 0.
697 # This revlog format is super old and we don't bother trying to parse
699 # This revlog format is super old and we don't bother trying to parse
698 # opener options for it because those options wouldn't do anything
700 # opener options for it because those options wouldn't do anything
699 # meaningful on such old repos.
701 # meaningful on such old repos.
700 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
701 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
702
704
703 return options
705 return options
704
706
705 def resolverevlogstorevfsoptions(ui, requirements, features):
707 def resolverevlogstorevfsoptions(ui, requirements, features):
706 """Resolve opener options specific to revlogs."""
708 """Resolve opener options specific to revlogs."""
707
709
708 options = {}
710 options = {}
709
711
710 if b'revlogv1' in requirements:
712 if b'revlogv1' in requirements:
711 options[b'revlogv1'] = True
713 options[b'revlogv1'] = True
712 if REVLOGV2_REQUIREMENT in requirements:
714 if REVLOGV2_REQUIREMENT in requirements:
713 options[b'revlogv2'] = True
715 options[b'revlogv2'] = True
714
716
715 if b'generaldelta' in requirements:
717 if b'generaldelta' in requirements:
716 options[b'generaldelta'] = True
718 options[b'generaldelta'] = True
717
719
718 # experimental config: format.chunkcachesize
720 # experimental config: format.chunkcachesize
719 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
720 if chunkcachesize is not None:
722 if chunkcachesize is not None:
721 options[b'chunkcachesize'] = chunkcachesize
723 options[b'chunkcachesize'] = chunkcachesize
722
724
723 deltabothparents = ui.configbool(b'storage',
725 deltabothparents = ui.configbool(b'storage',
724 b'revlog.optimize-delta-parent-choice')
726 b'revlog.optimize-delta-parent-choice')
725 options[b'deltabothparents'] = deltabothparents
727 options[b'deltabothparents'] = deltabothparents
726
728
727 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
728
730
729 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
730 if 0 <= chainspan:
732 if 0 <= chainspan:
731 options[b'maxdeltachainspan'] = chainspan
733 options[b'maxdeltachainspan'] = chainspan
732
734
733 mmapindexthreshold = ui.configbytes(b'experimental',
735 mmapindexthreshold = ui.configbytes(b'experimental',
734 b'mmapindexthreshold')
736 b'mmapindexthreshold')
735 if mmapindexthreshold is not None:
737 if mmapindexthreshold is not None:
736 options[b'mmapindexthreshold'] = mmapindexthreshold
738 options[b'mmapindexthreshold'] = mmapindexthreshold
737
739
738 withsparseread = ui.configbool(b'experimental', b'sparse-read')
740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
739 srdensitythres = float(ui.config(b'experimental',
741 srdensitythres = float(ui.config(b'experimental',
740 b'sparse-read.density-threshold'))
742 b'sparse-read.density-threshold'))
741 srmingapsize = ui.configbytes(b'experimental',
743 srmingapsize = ui.configbytes(b'experimental',
742 b'sparse-read.min-gap-size')
744 b'sparse-read.min-gap-size')
743 options[b'with-sparse-read'] = withsparseread
745 options[b'with-sparse-read'] = withsparseread
744 options[b'sparse-read-density-threshold'] = srdensitythres
746 options[b'sparse-read-density-threshold'] = srdensitythres
745 options[b'sparse-read-min-gap-size'] = srmingapsize
747 options[b'sparse-read-min-gap-size'] = srmingapsize
746
748
747 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
748 options[b'sparse-revlog'] = sparserevlog
750 options[b'sparse-revlog'] = sparserevlog
749 if sparserevlog:
751 if sparserevlog:
750 options[b'generaldelta'] = True
752 options[b'generaldelta'] = True
751
753
752 maxchainlen = None
754 maxchainlen = None
753 if sparserevlog:
755 if sparserevlog:
754 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
755 # experimental config: format.maxchainlen
757 # experimental config: format.maxchainlen
756 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
757 if maxchainlen is not None:
759 if maxchainlen is not None:
758 options[b'maxchainlen'] = maxchainlen
760 options[b'maxchainlen'] = maxchainlen
759
761
760 for r in requirements:
762 for r in requirements:
761 if r.startswith(b'exp-compression-'):
763 if r.startswith(b'exp-compression-'):
762 options[b'compengine'] = r[len(b'exp-compression-'):]
764 options[b'compengine'] = r[len(b'exp-compression-'):]
763
765
764 if repository.NARROW_REQUIREMENT in requirements:
766 if repository.NARROW_REQUIREMENT in requirements:
765 options[b'enableellipsis'] = True
767 options[b'enableellipsis'] = True
766
768
767 return options
769 return options
768
770
769 def makemain(**kwargs):
771 def makemain(**kwargs):
770 """Produce a type conforming to ``ilocalrepositorymain``."""
772 """Produce a type conforming to ``ilocalrepositorymain``."""
771 return localrepository
773 return localrepository
772
774
773 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
774 class revlogfilestorage(object):
776 class revlogfilestorage(object):
775 """File storage when using revlogs."""
777 """File storage when using revlogs."""
776
778
777 def file(self, path):
779 def file(self, path):
778 if path[0] == b'/':
780 if path[0] == b'/':
779 path = path[1:]
781 path = path[1:]
780
782
781 return filelog.filelog(self.svfs, path)
783 return filelog.filelog(self.svfs, path)
782
784
783 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
784 class revlognarrowfilestorage(object):
786 class revlognarrowfilestorage(object):
785 """File storage when using revlogs and narrow files."""
787 """File storage when using revlogs and narrow files."""
786
788
787 def file(self, path):
789 def file(self, path):
788 if path[0] == b'/':
790 if path[0] == b'/':
789 path = path[1:]
791 path = path[1:]
790
792
791 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
792
794
793 def makefilestorage(requirements, features, **kwargs):
795 def makefilestorage(requirements, features, **kwargs):
794 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
795 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
796
798
797 if repository.NARROW_REQUIREMENT in requirements:
799 if repository.NARROW_REQUIREMENT in requirements:
798 return revlognarrowfilestorage
800 return revlognarrowfilestorage
799 else:
801 else:
800 return revlogfilestorage
802 return revlogfilestorage
801
803
802 # List of repository interfaces and factory functions for them. Each
804 # List of repository interfaces and factory functions for them. Each
803 # will be called in order during ``makelocalrepository()`` to iteratively
805 # will be called in order during ``makelocalrepository()`` to iteratively
804 # derive the final type for a local repository instance.
806 # derive the final type for a local repository instance.
805 REPO_INTERFACES = [
807 REPO_INTERFACES = [
806 (repository.ilocalrepositorymain, makemain),
808 (repository.ilocalrepositorymain, makemain),
807 (repository.ilocalrepositoryfilestorage, makefilestorage),
809 (repository.ilocalrepositoryfilestorage, makefilestorage),
808 ]
810 ]
809
811
810 @interfaceutil.implementer(repository.ilocalrepositorymain)
812 @interfaceutil.implementer(repository.ilocalrepositorymain)
811 class localrepository(object):
813 class localrepository(object):
812 """Main class for representing local repositories.
814 """Main class for representing local repositories.
813
815
814 All local repositories are instances of this class.
816 All local repositories are instances of this class.
815
817
816 Constructed on its own, instances of this class are not usable as
818 Constructed on its own, instances of this class are not usable as
817 repository objects. To obtain a usable repository object, call
819 repository objects. To obtain a usable repository object, call
818 ``hg.repository()``, ``localrepo.instance()``, or
820 ``hg.repository()``, ``localrepo.instance()``, or
819 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
821 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
820 ``instance()`` adds support for creating new repositories.
822 ``instance()`` adds support for creating new repositories.
821 ``hg.repository()`` adds more extension integration, including calling
823 ``hg.repository()`` adds more extension integration, including calling
822 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
824 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
823 used.
825 used.
824 """
826 """
825
827
826 # obsolete experimental requirements:
828 # obsolete experimental requirements:
827 # - manifestv2: An experimental new manifest format that allowed
829 # - manifestv2: An experimental new manifest format that allowed
828 # for stem compression of long paths. Experiment ended up not
830 # for stem compression of long paths. Experiment ended up not
829 # being successful (repository sizes went up due to worse delta
831 # being successful (repository sizes went up due to worse delta
830 # chains), and the code was deleted in 4.6.
832 # chains), and the code was deleted in 4.6.
831 supportedformats = {
833 supportedformats = {
832 'revlogv1',
834 'revlogv1',
833 'generaldelta',
835 'generaldelta',
834 'treemanifest',
836 'treemanifest',
835 REVLOGV2_REQUIREMENT,
837 REVLOGV2_REQUIREMENT,
836 SPARSEREVLOG_REQUIREMENT,
838 SPARSEREVLOG_REQUIREMENT,
837 }
839 }
838 _basesupported = supportedformats | {
840 _basesupported = supportedformats | {
839 'store',
841 'store',
840 'fncache',
842 'fncache',
841 'shared',
843 'shared',
842 'relshared',
844 'relshared',
843 'dotencode',
845 'dotencode',
844 'exp-sparse',
846 'exp-sparse',
845 'internal-phase'
847 'internal-phase'
846 }
848 }
847
849
848 # list of prefix for file which can be written without 'wlock'
850 # list of prefix for file which can be written without 'wlock'
849 # Extensions should extend this list when needed
851 # Extensions should extend this list when needed
850 _wlockfreeprefix = {
852 _wlockfreeprefix = {
851 # We migh consider requiring 'wlock' for the next
853 # We migh consider requiring 'wlock' for the next
852 # two, but pretty much all the existing code assume
854 # two, but pretty much all the existing code assume
853 # wlock is not needed so we keep them excluded for
855 # wlock is not needed so we keep them excluded for
854 # now.
856 # now.
855 'hgrc',
857 'hgrc',
856 'requires',
858 'requires',
857 # XXX cache is a complicatged business someone
859 # XXX cache is a complicatged business someone
858 # should investigate this in depth at some point
860 # should investigate this in depth at some point
859 'cache/',
861 'cache/',
860 # XXX shouldn't be dirstate covered by the wlock?
862 # XXX shouldn't be dirstate covered by the wlock?
861 'dirstate',
863 'dirstate',
862 # XXX bisect was still a bit too messy at the time
864 # XXX bisect was still a bit too messy at the time
863 # this changeset was introduced. Someone should fix
865 # this changeset was introduced. Someone should fix
864 # the remainig bit and drop this line
866 # the remainig bit and drop this line
865 'bisect.state',
867 'bisect.state',
866 }
868 }
867
869
868 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
870 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
869 supportedrequirements, sharedpath, store, cachevfs,
871 supportedrequirements, sharedpath, store, cachevfs,
870 features, intents=None):
872 features, intents=None):
871 """Create a new local repository instance.
873 """Create a new local repository instance.
872
874
873 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
875 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
874 or ``localrepo.makelocalrepository()`` for obtaining a new repository
876 or ``localrepo.makelocalrepository()`` for obtaining a new repository
875 object.
877 object.
876
878
877 Arguments:
879 Arguments:
878
880
879 baseui
881 baseui
880 ``ui.ui`` instance that ``ui`` argument was based off of.
882 ``ui.ui`` instance that ``ui`` argument was based off of.
881
883
882 ui
884 ui
883 ``ui.ui`` instance for use by the repository.
885 ``ui.ui`` instance for use by the repository.
884
886
885 origroot
887 origroot
886 ``bytes`` path to working directory root of this repository.
888 ``bytes`` path to working directory root of this repository.
887
889
888 wdirvfs
890 wdirvfs
889 ``vfs.vfs`` rooted at the working directory.
891 ``vfs.vfs`` rooted at the working directory.
890
892
891 hgvfs
893 hgvfs
892 ``vfs.vfs`` rooted at .hg/
894 ``vfs.vfs`` rooted at .hg/
893
895
894 requirements
896 requirements
895 ``set`` of bytestrings representing repository opening requirements.
897 ``set`` of bytestrings representing repository opening requirements.
896
898
897 supportedrequirements
899 supportedrequirements
898 ``set`` of bytestrings representing repository requirements that we
900 ``set`` of bytestrings representing repository requirements that we
899 know how to open. May be a supetset of ``requirements``.
901 know how to open. May be a supetset of ``requirements``.
900
902
901 sharedpath
903 sharedpath
902 ``bytes`` Defining path to storage base directory. Points to a
904 ``bytes`` Defining path to storage base directory. Points to a
903 ``.hg/`` directory somewhere.
905 ``.hg/`` directory somewhere.
904
906
905 store
907 store
906 ``store.basicstore`` (or derived) instance providing access to
908 ``store.basicstore`` (or derived) instance providing access to
907 versioned storage.
909 versioned storage.
908
910
909 cachevfs
911 cachevfs
910 ``vfs.vfs`` used for cache files.
912 ``vfs.vfs`` used for cache files.
911
913
912 features
914 features
913 ``set`` of bytestrings defining features/capabilities of this
915 ``set`` of bytestrings defining features/capabilities of this
914 instance.
916 instance.
915
917
916 intents
918 intents
917 ``set`` of system strings indicating what this repo will be used
919 ``set`` of system strings indicating what this repo will be used
918 for.
920 for.
919 """
921 """
920 self.baseui = baseui
922 self.baseui = baseui
921 self.ui = ui
923 self.ui = ui
922 self.origroot = origroot
924 self.origroot = origroot
923 # vfs rooted at working directory.
925 # vfs rooted at working directory.
924 self.wvfs = wdirvfs
926 self.wvfs = wdirvfs
925 self.root = wdirvfs.base
927 self.root = wdirvfs.base
926 # vfs rooted at .hg/. Used to access most non-store paths.
928 # vfs rooted at .hg/. Used to access most non-store paths.
927 self.vfs = hgvfs
929 self.vfs = hgvfs
928 self.path = hgvfs.base
930 self.path = hgvfs.base
929 self.requirements = requirements
931 self.requirements = requirements
930 self.supported = supportedrequirements
932 self.supported = supportedrequirements
931 self.sharedpath = sharedpath
933 self.sharedpath = sharedpath
932 self.store = store
934 self.store = store
933 self.cachevfs = cachevfs
935 self.cachevfs = cachevfs
934 self.features = features
936 self.features = features
935
937
936 self.filtername = None
938 self.filtername = None
937
939
938 if (self.ui.configbool('devel', 'all-warnings') or
940 if (self.ui.configbool('devel', 'all-warnings') or
939 self.ui.configbool('devel', 'check-locks')):
941 self.ui.configbool('devel', 'check-locks')):
940 self.vfs.audit = self._getvfsward(self.vfs.audit)
942 self.vfs.audit = self._getvfsward(self.vfs.audit)
941 # A list of callback to shape the phase if no data were found.
943 # A list of callback to shape the phase if no data were found.
942 # Callback are in the form: func(repo, roots) --> processed root.
944 # Callback are in the form: func(repo, roots) --> processed root.
943 # This list it to be filled by extension during repo setup
945 # This list it to be filled by extension during repo setup
944 self._phasedefaults = []
946 self._phasedefaults = []
945
947
946 color.setup(self.ui)
948 color.setup(self.ui)
947
949
948 self.spath = self.store.path
950 self.spath = self.store.path
949 self.svfs = self.store.vfs
951 self.svfs = self.store.vfs
950 self.sjoin = self.store.join
952 self.sjoin = self.store.join
951 if (self.ui.configbool('devel', 'all-warnings') or
953 if (self.ui.configbool('devel', 'all-warnings') or
952 self.ui.configbool('devel', 'check-locks')):
954 self.ui.configbool('devel', 'check-locks')):
953 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
955 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
954 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
956 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
955 else: # standard vfs
957 else: # standard vfs
956 self.svfs.audit = self._getsvfsward(self.svfs.audit)
958 self.svfs.audit = self._getsvfsward(self.svfs.audit)
957
959
958 self._dirstatevalidatewarned = False
960 self._dirstatevalidatewarned = False
959
961
960 self._branchcaches = {}
962 self._branchcaches = {}
961 self._revbranchcache = None
963 self._revbranchcache = None
962 self._filterpats = {}
964 self._filterpats = {}
963 self._datafilters = {}
965 self._datafilters = {}
964 self._transref = self._lockref = self._wlockref = None
966 self._transref = self._lockref = self._wlockref = None
965
967
966 # A cache for various files under .hg/ that tracks file changes,
968 # A cache for various files under .hg/ that tracks file changes,
967 # (used by the filecache decorator)
969 # (used by the filecache decorator)
968 #
970 #
969 # Maps a property name to its util.filecacheentry
971 # Maps a property name to its util.filecacheentry
970 self._filecache = {}
972 self._filecache = {}
971
973
972 # hold sets of revision to be filtered
974 # hold sets of revision to be filtered
973 # should be cleared when something might have changed the filter value:
975 # should be cleared when something might have changed the filter value:
974 # - new changesets,
976 # - new changesets,
975 # - phase change,
977 # - phase change,
976 # - new obsolescence marker,
978 # - new obsolescence marker,
977 # - working directory parent change,
979 # - working directory parent change,
978 # - bookmark changes
980 # - bookmark changes
979 self.filteredrevcache = {}
981 self.filteredrevcache = {}
980
982
981 # post-dirstate-status hooks
983 # post-dirstate-status hooks
982 self._postdsstatus = []
984 self._postdsstatus = []
983
985
984 # generic mapping between names and nodes
986 # generic mapping between names and nodes
985 self.names = namespaces.namespaces()
987 self.names = namespaces.namespaces()
986
988
987 # Key to signature value.
989 # Key to signature value.
988 self._sparsesignaturecache = {}
990 self._sparsesignaturecache = {}
989 # Signature to cached matcher instance.
991 # Signature to cached matcher instance.
990 self._sparsematchercache = {}
992 self._sparsematchercache = {}
991
993
992 def _getvfsward(self, origfunc):
994 def _getvfsward(self, origfunc):
993 """build a ward for self.vfs"""
995 """build a ward for self.vfs"""
994 rref = weakref.ref(self)
996 rref = weakref.ref(self)
995 def checkvfs(path, mode=None):
997 def checkvfs(path, mode=None):
996 ret = origfunc(path, mode=mode)
998 ret = origfunc(path, mode=mode)
997 repo = rref()
999 repo = rref()
998 if (repo is None
1000 if (repo is None
999 or not util.safehasattr(repo, '_wlockref')
1001 or not util.safehasattr(repo, '_wlockref')
1000 or not util.safehasattr(repo, '_lockref')):
1002 or not util.safehasattr(repo, '_lockref')):
1001 return
1003 return
1002 if mode in (None, 'r', 'rb'):
1004 if mode in (None, 'r', 'rb'):
1003 return
1005 return
1004 if path.startswith(repo.path):
1006 if path.startswith(repo.path):
1005 # truncate name relative to the repository (.hg)
1007 # truncate name relative to the repository (.hg)
1006 path = path[len(repo.path) + 1:]
1008 path = path[len(repo.path) + 1:]
1007 if path.startswith('cache/'):
1009 if path.startswith('cache/'):
1008 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1010 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1009 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1011 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1010 if path.startswith('journal.'):
1012 if path.startswith('journal.'):
1011 # journal is covered by 'lock'
1013 # journal is covered by 'lock'
1012 if repo._currentlock(repo._lockref) is None:
1014 if repo._currentlock(repo._lockref) is None:
1013 repo.ui.develwarn('write with no lock: "%s"' % path,
1015 repo.ui.develwarn('write with no lock: "%s"' % path,
1014 stacklevel=2, config='check-locks')
1016 stacklevel=2, config='check-locks')
1015 elif repo._currentlock(repo._wlockref) is None:
1017 elif repo._currentlock(repo._wlockref) is None:
1016 # rest of vfs files are covered by 'wlock'
1018 # rest of vfs files are covered by 'wlock'
1017 #
1019 #
1018 # exclude special files
1020 # exclude special files
1019 for prefix in self._wlockfreeprefix:
1021 for prefix in self._wlockfreeprefix:
1020 if path.startswith(prefix):
1022 if path.startswith(prefix):
1021 return
1023 return
1022 repo.ui.develwarn('write with no wlock: "%s"' % path,
1024 repo.ui.develwarn('write with no wlock: "%s"' % path,
1023 stacklevel=2, config='check-locks')
1025 stacklevel=2, config='check-locks')
1024 return ret
1026 return ret
1025 return checkvfs
1027 return checkvfs
1026
1028
1027 def _getsvfsward(self, origfunc):
1029 def _getsvfsward(self, origfunc):
1028 """build a ward for self.svfs"""
1030 """build a ward for self.svfs"""
1029 rref = weakref.ref(self)
1031 rref = weakref.ref(self)
1030 def checksvfs(path, mode=None):
1032 def checksvfs(path, mode=None):
1031 ret = origfunc(path, mode=mode)
1033 ret = origfunc(path, mode=mode)
1032 repo = rref()
1034 repo = rref()
1033 if repo is None or not util.safehasattr(repo, '_lockref'):
1035 if repo is None or not util.safehasattr(repo, '_lockref'):
1034 return
1036 return
1035 if mode in (None, 'r', 'rb'):
1037 if mode in (None, 'r', 'rb'):
1036 return
1038 return
1037 if path.startswith(repo.sharedpath):
1039 if path.startswith(repo.sharedpath):
1038 # truncate name relative to the repository (.hg)
1040 # truncate name relative to the repository (.hg)
1039 path = path[len(repo.sharedpath) + 1:]
1041 path = path[len(repo.sharedpath) + 1:]
1040 if repo._currentlock(repo._lockref) is None:
1042 if repo._currentlock(repo._lockref) is None:
1041 repo.ui.develwarn('write with no lock: "%s"' % path,
1043 repo.ui.develwarn('write with no lock: "%s"' % path,
1042 stacklevel=3)
1044 stacklevel=3)
1043 return ret
1045 return ret
1044 return checksvfs
1046 return checksvfs
1045
1047
1046 def close(self):
1048 def close(self):
1047 self._writecaches()
1049 self._writecaches()
1048
1050
1049 def _writecaches(self):
1051 def _writecaches(self):
1050 if self._revbranchcache:
1052 if self._revbranchcache:
1051 self._revbranchcache.write()
1053 self._revbranchcache.write()
1052
1054
1053 def _restrictcapabilities(self, caps):
1055 def _restrictcapabilities(self, caps):
1054 if self.ui.configbool('experimental', 'bundle2-advertise'):
1056 if self.ui.configbool('experimental', 'bundle2-advertise'):
1055 caps = set(caps)
1057 caps = set(caps)
1056 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1058 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1057 role='client'))
1059 role='client'))
1058 caps.add('bundle2=' + urlreq.quote(capsblob))
1060 caps.add('bundle2=' + urlreq.quote(capsblob))
1059 return caps
1061 return caps
1060
1062
1061 def _writerequirements(self):
1063 def _writerequirements(self):
1062 scmutil.writerequires(self.vfs, self.requirements)
1064 scmutil.writerequires(self.vfs, self.requirements)
1063
1065
1064 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1066 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1065 # self -> auditor -> self._checknested -> self
1067 # self -> auditor -> self._checknested -> self
1066
1068
1067 @property
1069 @property
1068 def auditor(self):
1070 def auditor(self):
1069 # This is only used by context.workingctx.match in order to
1071 # This is only used by context.workingctx.match in order to
1070 # detect files in subrepos.
1072 # detect files in subrepos.
1071 return pathutil.pathauditor(self.root, callback=self._checknested)
1073 return pathutil.pathauditor(self.root, callback=self._checknested)
1072
1074
1073 @property
1075 @property
1074 def nofsauditor(self):
1076 def nofsauditor(self):
1075 # This is only used by context.basectx.match in order to detect
1077 # This is only used by context.basectx.match in order to detect
1076 # files in subrepos.
1078 # files in subrepos.
1077 return pathutil.pathauditor(self.root, callback=self._checknested,
1079 return pathutil.pathauditor(self.root, callback=self._checknested,
1078 realfs=False, cached=True)
1080 realfs=False, cached=True)
1079
1081
1080 def _checknested(self, path):
1082 def _checknested(self, path):
1081 """Determine if path is a legal nested repository."""
1083 """Determine if path is a legal nested repository."""
1082 if not path.startswith(self.root):
1084 if not path.startswith(self.root):
1083 return False
1085 return False
1084 subpath = path[len(self.root) + 1:]
1086 subpath = path[len(self.root) + 1:]
1085 normsubpath = util.pconvert(subpath)
1087 normsubpath = util.pconvert(subpath)
1086
1088
1087 # XXX: Checking against the current working copy is wrong in
1089 # XXX: Checking against the current working copy is wrong in
1088 # the sense that it can reject things like
1090 # the sense that it can reject things like
1089 #
1091 #
1090 # $ hg cat -r 10 sub/x.txt
1092 # $ hg cat -r 10 sub/x.txt
1091 #
1093 #
1092 # if sub/ is no longer a subrepository in the working copy
1094 # if sub/ is no longer a subrepository in the working copy
1093 # parent revision.
1095 # parent revision.
1094 #
1096 #
1095 # However, it can of course also allow things that would have
1097 # However, it can of course also allow things that would have
1096 # been rejected before, such as the above cat command if sub/
1098 # been rejected before, such as the above cat command if sub/
1097 # is a subrepository now, but was a normal directory before.
1099 # is a subrepository now, but was a normal directory before.
1098 # The old path auditor would have rejected by mistake since it
1100 # The old path auditor would have rejected by mistake since it
1099 # panics when it sees sub/.hg/.
1101 # panics when it sees sub/.hg/.
1100 #
1102 #
1101 # All in all, checking against the working copy seems sensible
1103 # All in all, checking against the working copy seems sensible
1102 # since we want to prevent access to nested repositories on
1104 # since we want to prevent access to nested repositories on
1103 # the filesystem *now*.
1105 # the filesystem *now*.
1104 ctx = self[None]
1106 ctx = self[None]
1105 parts = util.splitpath(subpath)
1107 parts = util.splitpath(subpath)
1106 while parts:
1108 while parts:
1107 prefix = '/'.join(parts)
1109 prefix = '/'.join(parts)
1108 if prefix in ctx.substate:
1110 if prefix in ctx.substate:
1109 if prefix == normsubpath:
1111 if prefix == normsubpath:
1110 return True
1112 return True
1111 else:
1113 else:
1112 sub = ctx.sub(prefix)
1114 sub = ctx.sub(prefix)
1113 return sub.checknested(subpath[len(prefix) + 1:])
1115 return sub.checknested(subpath[len(prefix) + 1:])
1114 else:
1116 else:
1115 parts.pop()
1117 parts.pop()
1116 return False
1118 return False
1117
1119
1118 def peer(self):
1120 def peer(self):
1119 return localpeer(self) # not cached to avoid reference cycle
1121 return localpeer(self) # not cached to avoid reference cycle
1120
1122
1121 def unfiltered(self):
1123 def unfiltered(self):
1122 """Return unfiltered version of the repository
1124 """Return unfiltered version of the repository
1123
1125
1124 Intended to be overwritten by filtered repo."""
1126 Intended to be overwritten by filtered repo."""
1125 return self
1127 return self
1126
1128
1127 def filtered(self, name, visibilityexceptions=None):
1129 def filtered(self, name, visibilityexceptions=None):
1128 """Return a filtered version of a repository"""
1130 """Return a filtered version of a repository"""
1129 cls = repoview.newtype(self.unfiltered().__class__)
1131 cls = repoview.newtype(self.unfiltered().__class__)
1130 return cls(self, name, visibilityexceptions)
1132 return cls(self, name, visibilityexceptions)
1131
1133
1132 @repofilecache('bookmarks', 'bookmarks.current')
1134 @repofilecache('bookmarks', 'bookmarks.current')
1133 def _bookmarks(self):
1135 def _bookmarks(self):
1134 return bookmarks.bmstore(self)
1136 return bookmarks.bmstore(self)
1135
1137
1136 @property
1138 @property
1137 def _activebookmark(self):
1139 def _activebookmark(self):
1138 return self._bookmarks.active
1140 return self._bookmarks.active
1139
1141
1140 # _phasesets depend on changelog. what we need is to call
1142 # _phasesets depend on changelog. what we need is to call
1141 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1143 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1142 # can't be easily expressed in filecache mechanism.
1144 # can't be easily expressed in filecache mechanism.
1143 @storecache('phaseroots', '00changelog.i')
1145 @storecache('phaseroots', '00changelog.i')
1144 def _phasecache(self):
1146 def _phasecache(self):
1145 return phases.phasecache(self, self._phasedefaults)
1147 return phases.phasecache(self, self._phasedefaults)
1146
1148
1147 @storecache('obsstore')
1149 @storecache('obsstore')
1148 def obsstore(self):
1150 def obsstore(self):
1149 return obsolete.makestore(self.ui, self)
1151 return obsolete.makestore(self.ui, self)
1150
1152
1151 @storecache('00changelog.i')
1153 @storecache('00changelog.i')
1152 def changelog(self):
1154 def changelog(self):
1153 return changelog.changelog(self.svfs,
1155 return changelog.changelog(self.svfs,
1154 trypending=txnutil.mayhavepending(self.root))
1156 trypending=txnutil.mayhavepending(self.root))
1155
1157
1156 @storecache('00manifest.i')
1158 @storecache('00manifest.i')
1157 def manifestlog(self):
1159 def manifestlog(self):
1158 rootstore = manifest.manifestrevlog(self.svfs)
1160 rootstore = manifest.manifestrevlog(self.svfs)
1159 return manifest.manifestlog(self.svfs, self, rootstore)
1161 return manifest.manifestlog(self.svfs, self, rootstore)
1160
1162
1161 @repofilecache('dirstate')
1163 @repofilecache('dirstate')
1162 def dirstate(self):
1164 def dirstate(self):
1163 return self._makedirstate()
1165 return self._makedirstate()
1164
1166
1165 def _makedirstate(self):
1167 def _makedirstate(self):
1166 """Extension point for wrapping the dirstate per-repo."""
1168 """Extension point for wrapping the dirstate per-repo."""
1167 sparsematchfn = lambda: sparse.matcher(self)
1169 sparsematchfn = lambda: sparse.matcher(self)
1168
1170
1169 return dirstate.dirstate(self.vfs, self.ui, self.root,
1171 return dirstate.dirstate(self.vfs, self.ui, self.root,
1170 self._dirstatevalidate, sparsematchfn)
1172 self._dirstatevalidate, sparsematchfn)
1171
1173
1172 def _dirstatevalidate(self, node):
1174 def _dirstatevalidate(self, node):
1173 try:
1175 try:
1174 self.changelog.rev(node)
1176 self.changelog.rev(node)
1175 return node
1177 return node
1176 except error.LookupError:
1178 except error.LookupError:
1177 if not self._dirstatevalidatewarned:
1179 if not self._dirstatevalidatewarned:
1178 self._dirstatevalidatewarned = True
1180 self._dirstatevalidatewarned = True
1179 self.ui.warn(_("warning: ignoring unknown"
1181 self.ui.warn(_("warning: ignoring unknown"
1180 " working parent %s!\n") % short(node))
1182 " working parent %s!\n") % short(node))
1181 return nullid
1183 return nullid
1182
1184
1183 @storecache(narrowspec.FILENAME)
1185 @storecache(narrowspec.FILENAME)
1184 def narrowpats(self):
1186 def narrowpats(self):
1185 """matcher patterns for this repository's narrowspec
1187 """matcher patterns for this repository's narrowspec
1186
1188
1187 A tuple of (includes, excludes).
1189 A tuple of (includes, excludes).
1188 """
1190 """
1189 return narrowspec.load(self)
1191 return narrowspec.load(self)
1190
1192
1191 @storecache(narrowspec.FILENAME)
1193 @storecache(narrowspec.FILENAME)
1192 def _narrowmatch(self):
1194 def _narrowmatch(self):
1193 if repository.NARROW_REQUIREMENT not in self.requirements:
1195 if repository.NARROW_REQUIREMENT not in self.requirements:
1194 return matchmod.always(self.root, '')
1196 return matchmod.always(self.root, '')
1195 include, exclude = self.narrowpats
1197 include, exclude = self.narrowpats
1196 return narrowspec.match(self.root, include=include, exclude=exclude)
1198 return narrowspec.match(self.root, include=include, exclude=exclude)
1197
1199
1198 # TODO(martinvonz): make this property-like instead?
1200 # TODO(martinvonz): make this property-like instead?
1199 def narrowmatch(self):
1201 def narrowmatch(self):
1200 return self._narrowmatch
1202 return self._narrowmatch
1201
1203
1202 def setnarrowpats(self, newincludes, newexcludes):
1204 def setnarrowpats(self, newincludes, newexcludes):
1203 narrowspec.save(self, newincludes, newexcludes)
1205 narrowspec.save(self, newincludes, newexcludes)
1204 self.invalidate(clearfilecache=True)
1206 self.invalidate(clearfilecache=True)
1205
1207
1206 def __getitem__(self, changeid):
1208 def __getitem__(self, changeid):
1207 if changeid is None:
1209 if changeid is None:
1208 return context.workingctx(self)
1210 return context.workingctx(self)
1209 if isinstance(changeid, context.basectx):
1211 if isinstance(changeid, context.basectx):
1210 return changeid
1212 return changeid
1211 if isinstance(changeid, slice):
1213 if isinstance(changeid, slice):
1212 # wdirrev isn't contiguous so the slice shouldn't include it
1214 # wdirrev isn't contiguous so the slice shouldn't include it
1213 return [self[i]
1215 return [self[i]
1214 for i in pycompat.xrange(*changeid.indices(len(self)))
1216 for i in pycompat.xrange(*changeid.indices(len(self)))
1215 if i not in self.changelog.filteredrevs]
1217 if i not in self.changelog.filteredrevs]
1216 try:
1218 try:
1217 return context.changectx(self, changeid)
1219 if isinstance(changeid, int):
1220 node = self.changelog.node(changeid)
1221 rev = changeid
1222 return context.changectx(self, rev, node)
1223 elif changeid == 'null':
1224 node = nullid
1225 rev = nullrev
1226 return context.changectx(self, rev, node)
1227 elif changeid == 'tip':
1228 node = self.changelog.tip()
1229 rev = self.changelog.rev(node)
1230 return context.changectx(self, rev, node)
1231 elif (changeid == '.'
1232 or self.local() and changeid == self.dirstate.p1()):
1233 # this is a hack to delay/avoid loading obsmarkers
1234 # when we know that '.' won't be hidden
1235 node = self.dirstate.p1()
1236 rev = self.unfiltered().changelog.rev(node)
1237 return context.changectx(self, rev, node)
1238 elif len(changeid) == 20:
1239 try:
1240 node = changeid
1241 rev = self.changelog.rev(changeid)
1242 return context.changectx(self, rev, node)
1243 except error.FilteredLookupError:
1244 changeid = hex(changeid) # for the error message
1245 raise
1246 except LookupError:
1247 # check if it might have come from damaged dirstate
1248 #
1249 # XXX we could avoid the unfiltered if we had a recognizable
1250 # exception for filtered changeset access
1251 if (self.local()
1252 and changeid in self.unfiltered().dirstate.parents()):
1253 msg = _("working directory has unknown parent '%s'!")
1254 raise error.Abort(msg % short(changeid))
1255 changeid = hex(changeid) # for the error message
1256
1257 elif len(changeid) == 40:
1258 try:
1259 node = bin(changeid)
1260 rev = self.changelog.rev(node)
1261 return context.changectx(self, rev, node)
1262 except error.FilteredLookupError:
1263 raise
1264 except LookupError:
1265 pass
1266 else:
1267 raise error.ProgrammingError(
1268 "unsupported changeid '%s' of type %s" %
1269 (changeid, type(changeid)))
1270
1271 except (error.FilteredIndexError, error.FilteredLookupError):
1272 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1273 % pycompat.bytestr(changeid))
1274 except IndexError:
1275 pass
1218 except error.WdirUnsupported:
1276 except error.WdirUnsupported:
1219 return context.workingctx(self)
1277 return context.workingctx(self)
1278 raise error.RepoLookupError(
1279 _("unknown revision '%s'") % changeid)
1220
1280
1221 def __contains__(self, changeid):
1281 def __contains__(self, changeid):
1222 """True if the given changeid exists
1282 """True if the given changeid exists
1223
1283
1224 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1284 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1225 specified.
1285 specified.
1226 """
1286 """
1227 try:
1287 try:
1228 self[changeid]
1288 self[changeid]
1229 return True
1289 return True
1230 except error.RepoLookupError:
1290 except error.RepoLookupError:
1231 return False
1291 return False
1232
1292
1233 def __nonzero__(self):
1293 def __nonzero__(self):
1234 return True
1294 return True
1235
1295
1236 __bool__ = __nonzero__
1296 __bool__ = __nonzero__
1237
1297
1238 def __len__(self):
1298 def __len__(self):
1239 # no need to pay the cost of repoview.changelog
1299 # no need to pay the cost of repoview.changelog
1240 unfi = self.unfiltered()
1300 unfi = self.unfiltered()
1241 return len(unfi.changelog)
1301 return len(unfi.changelog)
1242
1302
1243 def __iter__(self):
1303 def __iter__(self):
1244 return iter(self.changelog)
1304 return iter(self.changelog)
1245
1305
1246 def revs(self, expr, *args):
1306 def revs(self, expr, *args):
1247 '''Find revisions matching a revset.
1307 '''Find revisions matching a revset.
1248
1308
1249 The revset is specified as a string ``expr`` that may contain
1309 The revset is specified as a string ``expr`` that may contain
1250 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1310 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1251
1311
1252 Revset aliases from the configuration are not expanded. To expand
1312 Revset aliases from the configuration are not expanded. To expand
1253 user aliases, consider calling ``scmutil.revrange()`` or
1313 user aliases, consider calling ``scmutil.revrange()`` or
1254 ``repo.anyrevs([expr], user=True)``.
1314 ``repo.anyrevs([expr], user=True)``.
1255
1315
1256 Returns a revset.abstractsmartset, which is a list-like interface
1316 Returns a revset.abstractsmartset, which is a list-like interface
1257 that contains integer revisions.
1317 that contains integer revisions.
1258 '''
1318 '''
1259 expr = revsetlang.formatspec(expr, *args)
1319 expr = revsetlang.formatspec(expr, *args)
1260 m = revset.match(None, expr)
1320 m = revset.match(None, expr)
1261 return m(self)
1321 return m(self)
1262
1322
1263 def set(self, expr, *args):
1323 def set(self, expr, *args):
1264 '''Find revisions matching a revset and emit changectx instances.
1324 '''Find revisions matching a revset and emit changectx instances.
1265
1325
1266 This is a convenience wrapper around ``revs()`` that iterates the
1326 This is a convenience wrapper around ``revs()`` that iterates the
1267 result and is a generator of changectx instances.
1327 result and is a generator of changectx instances.
1268
1328
1269 Revset aliases from the configuration are not expanded. To expand
1329 Revset aliases from the configuration are not expanded. To expand
1270 user aliases, consider calling ``scmutil.revrange()``.
1330 user aliases, consider calling ``scmutil.revrange()``.
1271 '''
1331 '''
1272 for r in self.revs(expr, *args):
1332 for r in self.revs(expr, *args):
1273 yield self[r]
1333 yield self[r]
1274
1334
1275 def anyrevs(self, specs, user=False, localalias=None):
1335 def anyrevs(self, specs, user=False, localalias=None):
1276 '''Find revisions matching one of the given revsets.
1336 '''Find revisions matching one of the given revsets.
1277
1337
1278 Revset aliases from the configuration are not expanded by default. To
1338 Revset aliases from the configuration are not expanded by default. To
1279 expand user aliases, specify ``user=True``. To provide some local
1339 expand user aliases, specify ``user=True``. To provide some local
1280 definitions overriding user aliases, set ``localalias`` to
1340 definitions overriding user aliases, set ``localalias`` to
1281 ``{name: definitionstring}``.
1341 ``{name: definitionstring}``.
1282 '''
1342 '''
1283 if user:
1343 if user:
1284 m = revset.matchany(self.ui, specs,
1344 m = revset.matchany(self.ui, specs,
1285 lookup=revset.lookupfn(self),
1345 lookup=revset.lookupfn(self),
1286 localalias=localalias)
1346 localalias=localalias)
1287 else:
1347 else:
1288 m = revset.matchany(None, specs, localalias=localalias)
1348 m = revset.matchany(None, specs, localalias=localalias)
1289 return m(self)
1349 return m(self)
1290
1350
1291 def url(self):
1351 def url(self):
1292 return 'file:' + self.root
1352 return 'file:' + self.root
1293
1353
1294 def hook(self, name, throw=False, **args):
1354 def hook(self, name, throw=False, **args):
1295 """Call a hook, passing this repo instance.
1355 """Call a hook, passing this repo instance.
1296
1356
1297 This a convenience method to aid invoking hooks. Extensions likely
1357 This a convenience method to aid invoking hooks. Extensions likely
1298 won't call this unless they have registered a custom hook or are
1358 won't call this unless they have registered a custom hook or are
1299 replacing code that is expected to call a hook.
1359 replacing code that is expected to call a hook.
1300 """
1360 """
1301 return hook.hook(self.ui, self, name, throw, **args)
1361 return hook.hook(self.ui, self, name, throw, **args)
1302
1362
1303 @filteredpropertycache
1363 @filteredpropertycache
1304 def _tagscache(self):
1364 def _tagscache(self):
1305 '''Returns a tagscache object that contains various tags related
1365 '''Returns a tagscache object that contains various tags related
1306 caches.'''
1366 caches.'''
1307
1367
1308 # This simplifies its cache management by having one decorated
1368 # This simplifies its cache management by having one decorated
1309 # function (this one) and the rest simply fetch things from it.
1369 # function (this one) and the rest simply fetch things from it.
1310 class tagscache(object):
1370 class tagscache(object):
1311 def __init__(self):
1371 def __init__(self):
1312 # These two define the set of tags for this repository. tags
1372 # These two define the set of tags for this repository. tags
1313 # maps tag name to node; tagtypes maps tag name to 'global' or
1373 # maps tag name to node; tagtypes maps tag name to 'global' or
1314 # 'local'. (Global tags are defined by .hgtags across all
1374 # 'local'. (Global tags are defined by .hgtags across all
1315 # heads, and local tags are defined in .hg/localtags.)
1375 # heads, and local tags are defined in .hg/localtags.)
1316 # They constitute the in-memory cache of tags.
1376 # They constitute the in-memory cache of tags.
1317 self.tags = self.tagtypes = None
1377 self.tags = self.tagtypes = None
1318
1378
1319 self.nodetagscache = self.tagslist = None
1379 self.nodetagscache = self.tagslist = None
1320
1380
1321 cache = tagscache()
1381 cache = tagscache()
1322 cache.tags, cache.tagtypes = self._findtags()
1382 cache.tags, cache.tagtypes = self._findtags()
1323
1383
1324 return cache
1384 return cache
1325
1385
1326 def tags(self):
1386 def tags(self):
1327 '''return a mapping of tag to node'''
1387 '''return a mapping of tag to node'''
1328 t = {}
1388 t = {}
1329 if self.changelog.filteredrevs:
1389 if self.changelog.filteredrevs:
1330 tags, tt = self._findtags()
1390 tags, tt = self._findtags()
1331 else:
1391 else:
1332 tags = self._tagscache.tags
1392 tags = self._tagscache.tags
1333 for k, v in tags.iteritems():
1393 for k, v in tags.iteritems():
1334 try:
1394 try:
1335 # ignore tags to unknown nodes
1395 # ignore tags to unknown nodes
1336 self.changelog.rev(v)
1396 self.changelog.rev(v)
1337 t[k] = v
1397 t[k] = v
1338 except (error.LookupError, ValueError):
1398 except (error.LookupError, ValueError):
1339 pass
1399 pass
1340 return t
1400 return t
1341
1401
1342 def _findtags(self):
1402 def _findtags(self):
1343 '''Do the hard work of finding tags. Return a pair of dicts
1403 '''Do the hard work of finding tags. Return a pair of dicts
1344 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1404 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1345 maps tag name to a string like \'global\' or \'local\'.
1405 maps tag name to a string like \'global\' or \'local\'.
1346 Subclasses or extensions are free to add their own tags, but
1406 Subclasses or extensions are free to add their own tags, but
1347 should be aware that the returned dicts will be retained for the
1407 should be aware that the returned dicts will be retained for the
1348 duration of the localrepo object.'''
1408 duration of the localrepo object.'''
1349
1409
1350 # XXX what tagtype should subclasses/extensions use? Currently
1410 # XXX what tagtype should subclasses/extensions use? Currently
1351 # mq and bookmarks add tags, but do not set the tagtype at all.
1411 # mq and bookmarks add tags, but do not set the tagtype at all.
1352 # Should each extension invent its own tag type? Should there
1412 # Should each extension invent its own tag type? Should there
1353 # be one tagtype for all such "virtual" tags? Or is the status
1413 # be one tagtype for all such "virtual" tags? Or is the status
1354 # quo fine?
1414 # quo fine?
1355
1415
1356
1416
1357 # map tag name to (node, hist)
1417 # map tag name to (node, hist)
1358 alltags = tagsmod.findglobaltags(self.ui, self)
1418 alltags = tagsmod.findglobaltags(self.ui, self)
1359 # map tag name to tag type
1419 # map tag name to tag type
1360 tagtypes = dict((tag, 'global') for tag in alltags)
1420 tagtypes = dict((tag, 'global') for tag in alltags)
1361
1421
1362 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1422 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1363
1423
1364 # Build the return dicts. Have to re-encode tag names because
1424 # Build the return dicts. Have to re-encode tag names because
1365 # the tags module always uses UTF-8 (in order not to lose info
1425 # the tags module always uses UTF-8 (in order not to lose info
1366 # writing to the cache), but the rest of Mercurial wants them in
1426 # writing to the cache), but the rest of Mercurial wants them in
1367 # local encoding.
1427 # local encoding.
1368 tags = {}
1428 tags = {}
1369 for (name, (node, hist)) in alltags.iteritems():
1429 for (name, (node, hist)) in alltags.iteritems():
1370 if node != nullid:
1430 if node != nullid:
1371 tags[encoding.tolocal(name)] = node
1431 tags[encoding.tolocal(name)] = node
1372 tags['tip'] = self.changelog.tip()
1432 tags['tip'] = self.changelog.tip()
1373 tagtypes = dict([(encoding.tolocal(name), value)
1433 tagtypes = dict([(encoding.tolocal(name), value)
1374 for (name, value) in tagtypes.iteritems()])
1434 for (name, value) in tagtypes.iteritems()])
1375 return (tags, tagtypes)
1435 return (tags, tagtypes)
1376
1436
1377 def tagtype(self, tagname):
1437 def tagtype(self, tagname):
1378 '''
1438 '''
1379 return the type of the given tag. result can be:
1439 return the type of the given tag. result can be:
1380
1440
1381 'local' : a local tag
1441 'local' : a local tag
1382 'global' : a global tag
1442 'global' : a global tag
1383 None : tag does not exist
1443 None : tag does not exist
1384 '''
1444 '''
1385
1445
1386 return self._tagscache.tagtypes.get(tagname)
1446 return self._tagscache.tagtypes.get(tagname)
1387
1447
1388 def tagslist(self):
1448 def tagslist(self):
1389 '''return a list of tags ordered by revision'''
1449 '''return a list of tags ordered by revision'''
1390 if not self._tagscache.tagslist:
1450 if not self._tagscache.tagslist:
1391 l = []
1451 l = []
1392 for t, n in self.tags().iteritems():
1452 for t, n in self.tags().iteritems():
1393 l.append((self.changelog.rev(n), t, n))
1453 l.append((self.changelog.rev(n), t, n))
1394 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1395
1455
1396 return self._tagscache.tagslist
1456 return self._tagscache.tagslist
1397
1457
1398 def nodetags(self, node):
1458 def nodetags(self, node):
1399 '''return the tags associated with a node'''
1459 '''return the tags associated with a node'''
1400 if not self._tagscache.nodetagscache:
1460 if not self._tagscache.nodetagscache:
1401 nodetagscache = {}
1461 nodetagscache = {}
1402 for t, n in self._tagscache.tags.iteritems():
1462 for t, n in self._tagscache.tags.iteritems():
1403 nodetagscache.setdefault(n, []).append(t)
1463 nodetagscache.setdefault(n, []).append(t)
1404 for tags in nodetagscache.itervalues():
1464 for tags in nodetagscache.itervalues():
1405 tags.sort()
1465 tags.sort()
1406 self._tagscache.nodetagscache = nodetagscache
1466 self._tagscache.nodetagscache = nodetagscache
1407 return self._tagscache.nodetagscache.get(node, [])
1467 return self._tagscache.nodetagscache.get(node, [])
1408
1468
1409 def nodebookmarks(self, node):
1469 def nodebookmarks(self, node):
1410 """return the list of bookmarks pointing to the specified node"""
1470 """return the list of bookmarks pointing to the specified node"""
1411 return self._bookmarks.names(node)
1471 return self._bookmarks.names(node)
1412
1472
1413 def branchmap(self):
1473 def branchmap(self):
1414 '''returns a dictionary {branch: [branchheads]} with branchheads
1474 '''returns a dictionary {branch: [branchheads]} with branchheads
1415 ordered by increasing revision number'''
1475 ordered by increasing revision number'''
1416 branchmap.updatecache(self)
1476 branchmap.updatecache(self)
1417 return self._branchcaches[self.filtername]
1477 return self._branchcaches[self.filtername]
1418
1478
1419 @unfilteredmethod
1479 @unfilteredmethod
1420 def revbranchcache(self):
1480 def revbranchcache(self):
1421 if not self._revbranchcache:
1481 if not self._revbranchcache:
1422 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1482 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1423 return self._revbranchcache
1483 return self._revbranchcache
1424
1484
1425 def branchtip(self, branch, ignoremissing=False):
1485 def branchtip(self, branch, ignoremissing=False):
1426 '''return the tip node for a given branch
1486 '''return the tip node for a given branch
1427
1487
1428 If ignoremissing is True, then this method will not raise an error.
1488 If ignoremissing is True, then this method will not raise an error.
1429 This is helpful for callers that only expect None for a missing branch
1489 This is helpful for callers that only expect None for a missing branch
1430 (e.g. namespace).
1490 (e.g. namespace).
1431
1491
1432 '''
1492 '''
1433 try:
1493 try:
1434 return self.branchmap().branchtip(branch)
1494 return self.branchmap().branchtip(branch)
1435 except KeyError:
1495 except KeyError:
1436 if not ignoremissing:
1496 if not ignoremissing:
1437 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1497 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1438 else:
1498 else:
1439 pass
1499 pass
1440
1500
1441 def lookup(self, key):
1501 def lookup(self, key):
1442 return scmutil.revsymbol(self, key).node()
1502 return scmutil.revsymbol(self, key).node()
1443
1503
1444 def lookupbranch(self, key):
1504 def lookupbranch(self, key):
1445 if key in self.branchmap():
1505 if key in self.branchmap():
1446 return key
1506 return key
1447
1507
1448 return scmutil.revsymbol(self, key).branch()
1508 return scmutil.revsymbol(self, key).branch()
1449
1509
1450 def known(self, nodes):
1510 def known(self, nodes):
1451 cl = self.changelog
1511 cl = self.changelog
1452 nm = cl.nodemap
1512 nm = cl.nodemap
1453 filtered = cl.filteredrevs
1513 filtered = cl.filteredrevs
1454 result = []
1514 result = []
1455 for n in nodes:
1515 for n in nodes:
1456 r = nm.get(n)
1516 r = nm.get(n)
1457 resp = not (r is None or r in filtered)
1517 resp = not (r is None or r in filtered)
1458 result.append(resp)
1518 result.append(resp)
1459 return result
1519 return result
1460
1520
1461 def local(self):
1521 def local(self):
1462 return self
1522 return self
1463
1523
1464 def publishing(self):
1524 def publishing(self):
1465 # it's safe (and desirable) to trust the publish flag unconditionally
1525 # it's safe (and desirable) to trust the publish flag unconditionally
1466 # so that we don't finalize changes shared between users via ssh or nfs
1526 # so that we don't finalize changes shared between users via ssh or nfs
1467 return self.ui.configbool('phases', 'publish', untrusted=True)
1527 return self.ui.configbool('phases', 'publish', untrusted=True)
1468
1528
1469 def cancopy(self):
1529 def cancopy(self):
1470 # so statichttprepo's override of local() works
1530 # so statichttprepo's override of local() works
1471 if not self.local():
1531 if not self.local():
1472 return False
1532 return False
1473 if not self.publishing():
1533 if not self.publishing():
1474 return True
1534 return True
1475 # if publishing we can't copy if there is filtered content
1535 # if publishing we can't copy if there is filtered content
1476 return not self.filtered('visible').changelog.filteredrevs
1536 return not self.filtered('visible').changelog.filteredrevs
1477
1537
1478 def shared(self):
1538 def shared(self):
1479 '''the type of shared repository (None if not shared)'''
1539 '''the type of shared repository (None if not shared)'''
1480 if self.sharedpath != self.path:
1540 if self.sharedpath != self.path:
1481 return 'store'
1541 return 'store'
1482 return None
1542 return None
1483
1543
1484 def wjoin(self, f, *insidef):
1544 def wjoin(self, f, *insidef):
1485 return self.vfs.reljoin(self.root, f, *insidef)
1545 return self.vfs.reljoin(self.root, f, *insidef)
1486
1546
1487 def setparents(self, p1, p2=nullid):
1547 def setparents(self, p1, p2=nullid):
1488 with self.dirstate.parentchange():
1548 with self.dirstate.parentchange():
1489 copies = self.dirstate.setparents(p1, p2)
1549 copies = self.dirstate.setparents(p1, p2)
1490 pctx = self[p1]
1550 pctx = self[p1]
1491 if copies:
1551 if copies:
1492 # Adjust copy records, the dirstate cannot do it, it
1552 # Adjust copy records, the dirstate cannot do it, it
1493 # requires access to parents manifests. Preserve them
1553 # requires access to parents manifests. Preserve them
1494 # only for entries added to first parent.
1554 # only for entries added to first parent.
1495 for f in copies:
1555 for f in copies:
1496 if f not in pctx and copies[f] in pctx:
1556 if f not in pctx and copies[f] in pctx:
1497 self.dirstate.copy(copies[f], f)
1557 self.dirstate.copy(copies[f], f)
1498 if p2 == nullid:
1558 if p2 == nullid:
1499 for f, s in sorted(self.dirstate.copies().items()):
1559 for f, s in sorted(self.dirstate.copies().items()):
1500 if f not in pctx and s not in pctx:
1560 if f not in pctx and s not in pctx:
1501 self.dirstate.copy(None, f)
1561 self.dirstate.copy(None, f)
1502
1562
1503 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1563 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1504 """changeid can be a changeset revision, node, or tag.
1564 """changeid can be a changeset revision, node, or tag.
1505 fileid can be a file revision or node."""
1565 fileid can be a file revision or node."""
1506 return context.filectx(self, path, changeid, fileid,
1566 return context.filectx(self, path, changeid, fileid,
1507 changectx=changectx)
1567 changectx=changectx)
1508
1568
1509 def getcwd(self):
1569 def getcwd(self):
1510 return self.dirstate.getcwd()
1570 return self.dirstate.getcwd()
1511
1571
1512 def pathto(self, f, cwd=None):
1572 def pathto(self, f, cwd=None):
1513 return self.dirstate.pathto(f, cwd)
1573 return self.dirstate.pathto(f, cwd)
1514
1574
1515 def _loadfilter(self, filter):
1575 def _loadfilter(self, filter):
1516 if filter not in self._filterpats:
1576 if filter not in self._filterpats:
1517 l = []
1577 l = []
1518 for pat, cmd in self.ui.configitems(filter):
1578 for pat, cmd in self.ui.configitems(filter):
1519 if cmd == '!':
1579 if cmd == '!':
1520 continue
1580 continue
1521 mf = matchmod.match(self.root, '', [pat])
1581 mf = matchmod.match(self.root, '', [pat])
1522 fn = None
1582 fn = None
1523 params = cmd
1583 params = cmd
1524 for name, filterfn in self._datafilters.iteritems():
1584 for name, filterfn in self._datafilters.iteritems():
1525 if cmd.startswith(name):
1585 if cmd.startswith(name):
1526 fn = filterfn
1586 fn = filterfn
1527 params = cmd[len(name):].lstrip()
1587 params = cmd[len(name):].lstrip()
1528 break
1588 break
1529 if not fn:
1589 if not fn:
1530 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1590 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1531 # Wrap old filters not supporting keyword arguments
1591 # Wrap old filters not supporting keyword arguments
1532 if not pycompat.getargspec(fn)[2]:
1592 if not pycompat.getargspec(fn)[2]:
1533 oldfn = fn
1593 oldfn = fn
1534 fn = lambda s, c, **kwargs: oldfn(s, c)
1594 fn = lambda s, c, **kwargs: oldfn(s, c)
1535 l.append((mf, fn, params))
1595 l.append((mf, fn, params))
1536 self._filterpats[filter] = l
1596 self._filterpats[filter] = l
1537 return self._filterpats[filter]
1597 return self._filterpats[filter]
1538
1598
1539 def _filter(self, filterpats, filename, data):
1599 def _filter(self, filterpats, filename, data):
1540 for mf, fn, cmd in filterpats:
1600 for mf, fn, cmd in filterpats:
1541 if mf(filename):
1601 if mf(filename):
1542 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1602 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1543 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1603 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1544 break
1604 break
1545
1605
1546 return data
1606 return data
1547
1607
1548 @unfilteredpropertycache
1608 @unfilteredpropertycache
1549 def _encodefilterpats(self):
1609 def _encodefilterpats(self):
1550 return self._loadfilter('encode')
1610 return self._loadfilter('encode')
1551
1611
1552 @unfilteredpropertycache
1612 @unfilteredpropertycache
1553 def _decodefilterpats(self):
1613 def _decodefilterpats(self):
1554 return self._loadfilter('decode')
1614 return self._loadfilter('decode')
1555
1615
1556 def adddatafilter(self, name, filter):
1616 def adddatafilter(self, name, filter):
1557 self._datafilters[name] = filter
1617 self._datafilters[name] = filter
1558
1618
1559 def wread(self, filename):
1619 def wread(self, filename):
1560 if self.wvfs.islink(filename):
1620 if self.wvfs.islink(filename):
1561 data = self.wvfs.readlink(filename)
1621 data = self.wvfs.readlink(filename)
1562 else:
1622 else:
1563 data = self.wvfs.read(filename)
1623 data = self.wvfs.read(filename)
1564 return self._filter(self._encodefilterpats, filename, data)
1624 return self._filter(self._encodefilterpats, filename, data)
1565
1625
1566 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1626 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1567 """write ``data`` into ``filename`` in the working directory
1627 """write ``data`` into ``filename`` in the working directory
1568
1628
1569 This returns length of written (maybe decoded) data.
1629 This returns length of written (maybe decoded) data.
1570 """
1630 """
1571 data = self._filter(self._decodefilterpats, filename, data)
1631 data = self._filter(self._decodefilterpats, filename, data)
1572 if 'l' in flags:
1632 if 'l' in flags:
1573 self.wvfs.symlink(data, filename)
1633 self.wvfs.symlink(data, filename)
1574 else:
1634 else:
1575 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1635 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1576 **kwargs)
1636 **kwargs)
1577 if 'x' in flags:
1637 if 'x' in flags:
1578 self.wvfs.setflags(filename, False, True)
1638 self.wvfs.setflags(filename, False, True)
1579 else:
1639 else:
1580 self.wvfs.setflags(filename, False, False)
1640 self.wvfs.setflags(filename, False, False)
1581 return len(data)
1641 return len(data)
1582
1642
1583 def wwritedata(self, filename, data):
1643 def wwritedata(self, filename, data):
1584 return self._filter(self._decodefilterpats, filename, data)
1644 return self._filter(self._decodefilterpats, filename, data)
1585
1645
1586 def currenttransaction(self):
1646 def currenttransaction(self):
1587 """return the current transaction or None if non exists"""
1647 """return the current transaction or None if non exists"""
1588 if self._transref:
1648 if self._transref:
1589 tr = self._transref()
1649 tr = self._transref()
1590 else:
1650 else:
1591 tr = None
1651 tr = None
1592
1652
1593 if tr and tr.running():
1653 if tr and tr.running():
1594 return tr
1654 return tr
1595 return None
1655 return None
1596
1656
1597 def transaction(self, desc, report=None):
1657 def transaction(self, desc, report=None):
1598 if (self.ui.configbool('devel', 'all-warnings')
1658 if (self.ui.configbool('devel', 'all-warnings')
1599 or self.ui.configbool('devel', 'check-locks')):
1659 or self.ui.configbool('devel', 'check-locks')):
1600 if self._currentlock(self._lockref) is None:
1660 if self._currentlock(self._lockref) is None:
1601 raise error.ProgrammingError('transaction requires locking')
1661 raise error.ProgrammingError('transaction requires locking')
1602 tr = self.currenttransaction()
1662 tr = self.currenttransaction()
1603 if tr is not None:
1663 if tr is not None:
1604 return tr.nest(name=desc)
1664 return tr.nest(name=desc)
1605
1665
1606 # abort here if the journal already exists
1666 # abort here if the journal already exists
1607 if self.svfs.exists("journal"):
1667 if self.svfs.exists("journal"):
1608 raise error.RepoError(
1668 raise error.RepoError(
1609 _("abandoned transaction found"),
1669 _("abandoned transaction found"),
1610 hint=_("run 'hg recover' to clean up transaction"))
1670 hint=_("run 'hg recover' to clean up transaction"))
1611
1671
1612 idbase = "%.40f#%f" % (random.random(), time.time())
1672 idbase = "%.40f#%f" % (random.random(), time.time())
1613 ha = hex(hashlib.sha1(idbase).digest())
1673 ha = hex(hashlib.sha1(idbase).digest())
1614 txnid = 'TXN:' + ha
1674 txnid = 'TXN:' + ha
1615 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1675 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1616
1676
1617 self._writejournal(desc)
1677 self._writejournal(desc)
1618 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1678 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1619 if report:
1679 if report:
1620 rp = report
1680 rp = report
1621 else:
1681 else:
1622 rp = self.ui.warn
1682 rp = self.ui.warn
1623 vfsmap = {'plain': self.vfs} # root of .hg/
1683 vfsmap = {'plain': self.vfs} # root of .hg/
1624 # we must avoid cyclic reference between repo and transaction.
1684 # we must avoid cyclic reference between repo and transaction.
1625 reporef = weakref.ref(self)
1685 reporef = weakref.ref(self)
1626 # Code to track tag movement
1686 # Code to track tag movement
1627 #
1687 #
1628 # Since tags are all handled as file content, it is actually quite hard
1688 # Since tags are all handled as file content, it is actually quite hard
1629 # to track these movement from a code perspective. So we fallback to a
1689 # to track these movement from a code perspective. So we fallback to a
1630 # tracking at the repository level. One could envision to track changes
1690 # tracking at the repository level. One could envision to track changes
1631 # to the '.hgtags' file through changegroup apply but that fails to
1691 # to the '.hgtags' file through changegroup apply but that fails to
1632 # cope with case where transaction expose new heads without changegroup
1692 # cope with case where transaction expose new heads without changegroup
1633 # being involved (eg: phase movement).
1693 # being involved (eg: phase movement).
1634 #
1694 #
1635 # For now, We gate the feature behind a flag since this likely comes
1695 # For now, We gate the feature behind a flag since this likely comes
1636 # with performance impacts. The current code run more often than needed
1696 # with performance impacts. The current code run more often than needed
1637 # and do not use caches as much as it could. The current focus is on
1697 # and do not use caches as much as it could. The current focus is on
1638 # the behavior of the feature so we disable it by default. The flag
1698 # the behavior of the feature so we disable it by default. The flag
1639 # will be removed when we are happy with the performance impact.
1699 # will be removed when we are happy with the performance impact.
1640 #
1700 #
1641 # Once this feature is no longer experimental move the following
1701 # Once this feature is no longer experimental move the following
1642 # documentation to the appropriate help section:
1702 # documentation to the appropriate help section:
1643 #
1703 #
1644 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1704 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1645 # tags (new or changed or deleted tags). In addition the details of
1705 # tags (new or changed or deleted tags). In addition the details of
1646 # these changes are made available in a file at:
1706 # these changes are made available in a file at:
1647 # ``REPOROOT/.hg/changes/tags.changes``.
1707 # ``REPOROOT/.hg/changes/tags.changes``.
1648 # Make sure you check for HG_TAG_MOVED before reading that file as it
1708 # Make sure you check for HG_TAG_MOVED before reading that file as it
1649 # might exist from a previous transaction even if no tag were touched
1709 # might exist from a previous transaction even if no tag were touched
1650 # in this one. Changes are recorded in a line base format::
1710 # in this one. Changes are recorded in a line base format::
1651 #
1711 #
1652 # <action> <hex-node> <tag-name>\n
1712 # <action> <hex-node> <tag-name>\n
1653 #
1713 #
1654 # Actions are defined as follow:
1714 # Actions are defined as follow:
1655 # "-R": tag is removed,
1715 # "-R": tag is removed,
1656 # "+A": tag is added,
1716 # "+A": tag is added,
1657 # "-M": tag is moved (old value),
1717 # "-M": tag is moved (old value),
1658 # "+M": tag is moved (new value),
1718 # "+M": tag is moved (new value),
1659 tracktags = lambda x: None
1719 tracktags = lambda x: None
1660 # experimental config: experimental.hook-track-tags
1720 # experimental config: experimental.hook-track-tags
1661 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1721 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1662 if desc != 'strip' and shouldtracktags:
1722 if desc != 'strip' and shouldtracktags:
1663 oldheads = self.changelog.headrevs()
1723 oldheads = self.changelog.headrevs()
1664 def tracktags(tr2):
1724 def tracktags(tr2):
1665 repo = reporef()
1725 repo = reporef()
1666 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1726 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1667 newheads = repo.changelog.headrevs()
1727 newheads = repo.changelog.headrevs()
1668 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1728 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1669 # notes: we compare lists here.
1729 # notes: we compare lists here.
1670 # As we do it only once buiding set would not be cheaper
1730 # As we do it only once buiding set would not be cheaper
1671 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1731 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1672 if changes:
1732 if changes:
1673 tr2.hookargs['tag_moved'] = '1'
1733 tr2.hookargs['tag_moved'] = '1'
1674 with repo.vfs('changes/tags.changes', 'w',
1734 with repo.vfs('changes/tags.changes', 'w',
1675 atomictemp=True) as changesfile:
1735 atomictemp=True) as changesfile:
1676 # note: we do not register the file to the transaction
1736 # note: we do not register the file to the transaction
1677 # because we needs it to still exist on the transaction
1737 # because we needs it to still exist on the transaction
1678 # is close (for txnclose hooks)
1738 # is close (for txnclose hooks)
1679 tagsmod.writediff(changesfile, changes)
1739 tagsmod.writediff(changesfile, changes)
1680 def validate(tr2):
1740 def validate(tr2):
1681 """will run pre-closing hooks"""
1741 """will run pre-closing hooks"""
1682 # XXX the transaction API is a bit lacking here so we take a hacky
1742 # XXX the transaction API is a bit lacking here so we take a hacky
1683 # path for now
1743 # path for now
1684 #
1744 #
1685 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1745 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1686 # dict is copied before these run. In addition we needs the data
1746 # dict is copied before these run. In addition we needs the data
1687 # available to in memory hooks too.
1747 # available to in memory hooks too.
1688 #
1748 #
1689 # Moreover, we also need to make sure this runs before txnclose
1749 # Moreover, we also need to make sure this runs before txnclose
1690 # hooks and there is no "pending" mechanism that would execute
1750 # hooks and there is no "pending" mechanism that would execute
1691 # logic only if hooks are about to run.
1751 # logic only if hooks are about to run.
1692 #
1752 #
1693 # Fixing this limitation of the transaction is also needed to track
1753 # Fixing this limitation of the transaction is also needed to track
1694 # other families of changes (bookmarks, phases, obsolescence).
1754 # other families of changes (bookmarks, phases, obsolescence).
1695 #
1755 #
1696 # This will have to be fixed before we remove the experimental
1756 # This will have to be fixed before we remove the experimental
1697 # gating.
1757 # gating.
1698 tracktags(tr2)
1758 tracktags(tr2)
1699 repo = reporef()
1759 repo = reporef()
1700 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1760 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1701 scmutil.enforcesinglehead(repo, tr2, desc)
1761 scmutil.enforcesinglehead(repo, tr2, desc)
1702 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1762 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1703 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1763 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1704 args = tr.hookargs.copy()
1764 args = tr.hookargs.copy()
1705 args.update(bookmarks.preparehookargs(name, old, new))
1765 args.update(bookmarks.preparehookargs(name, old, new))
1706 repo.hook('pretxnclose-bookmark', throw=True,
1766 repo.hook('pretxnclose-bookmark', throw=True,
1707 txnname=desc,
1767 txnname=desc,
1708 **pycompat.strkwargs(args))
1768 **pycompat.strkwargs(args))
1709 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1769 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1710 cl = repo.unfiltered().changelog
1770 cl = repo.unfiltered().changelog
1711 for rev, (old, new) in tr.changes['phases'].items():
1771 for rev, (old, new) in tr.changes['phases'].items():
1712 args = tr.hookargs.copy()
1772 args = tr.hookargs.copy()
1713 node = hex(cl.node(rev))
1773 node = hex(cl.node(rev))
1714 args.update(phases.preparehookargs(node, old, new))
1774 args.update(phases.preparehookargs(node, old, new))
1715 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1775 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1716 **pycompat.strkwargs(args))
1776 **pycompat.strkwargs(args))
1717
1777
1718 repo.hook('pretxnclose', throw=True,
1778 repo.hook('pretxnclose', throw=True,
1719 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1779 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1720 def releasefn(tr, success):
1780 def releasefn(tr, success):
1721 repo = reporef()
1781 repo = reporef()
1722 if success:
1782 if success:
1723 # this should be explicitly invoked here, because
1783 # this should be explicitly invoked here, because
1724 # in-memory changes aren't written out at closing
1784 # in-memory changes aren't written out at closing
1725 # transaction, if tr.addfilegenerator (via
1785 # transaction, if tr.addfilegenerator (via
1726 # dirstate.write or so) isn't invoked while
1786 # dirstate.write or so) isn't invoked while
1727 # transaction running
1787 # transaction running
1728 repo.dirstate.write(None)
1788 repo.dirstate.write(None)
1729 else:
1789 else:
1730 # discard all changes (including ones already written
1790 # discard all changes (including ones already written
1731 # out) in this transaction
1791 # out) in this transaction
1732 narrowspec.restorebackup(self, 'journal.narrowspec')
1792 narrowspec.restorebackup(self, 'journal.narrowspec')
1733 repo.dirstate.restorebackup(None, 'journal.dirstate')
1793 repo.dirstate.restorebackup(None, 'journal.dirstate')
1734
1794
1735 repo.invalidate(clearfilecache=True)
1795 repo.invalidate(clearfilecache=True)
1736
1796
1737 tr = transaction.transaction(rp, self.svfs, vfsmap,
1797 tr = transaction.transaction(rp, self.svfs, vfsmap,
1738 "journal",
1798 "journal",
1739 "undo",
1799 "undo",
1740 aftertrans(renames),
1800 aftertrans(renames),
1741 self.store.createmode,
1801 self.store.createmode,
1742 validator=validate,
1802 validator=validate,
1743 releasefn=releasefn,
1803 releasefn=releasefn,
1744 checkambigfiles=_cachedfiles,
1804 checkambigfiles=_cachedfiles,
1745 name=desc)
1805 name=desc)
1746 tr.changes['origrepolen'] = len(self)
1806 tr.changes['origrepolen'] = len(self)
1747 tr.changes['obsmarkers'] = set()
1807 tr.changes['obsmarkers'] = set()
1748 tr.changes['phases'] = {}
1808 tr.changes['phases'] = {}
1749 tr.changes['bookmarks'] = {}
1809 tr.changes['bookmarks'] = {}
1750
1810
1751 tr.hookargs['txnid'] = txnid
1811 tr.hookargs['txnid'] = txnid
1752 # note: writing the fncache only during finalize mean that the file is
1812 # note: writing the fncache only during finalize mean that the file is
1753 # outdated when running hooks. As fncache is used for streaming clone,
1813 # outdated when running hooks. As fncache is used for streaming clone,
1754 # this is not expected to break anything that happen during the hooks.
1814 # this is not expected to break anything that happen during the hooks.
1755 tr.addfinalize('flush-fncache', self.store.write)
1815 tr.addfinalize('flush-fncache', self.store.write)
1756 def txnclosehook(tr2):
1816 def txnclosehook(tr2):
1757 """To be run if transaction is successful, will schedule a hook run
1817 """To be run if transaction is successful, will schedule a hook run
1758 """
1818 """
1759 # Don't reference tr2 in hook() so we don't hold a reference.
1819 # Don't reference tr2 in hook() so we don't hold a reference.
1760 # This reduces memory consumption when there are multiple
1820 # This reduces memory consumption when there are multiple
1761 # transactions per lock. This can likely go away if issue5045
1821 # transactions per lock. This can likely go away if issue5045
1762 # fixes the function accumulation.
1822 # fixes the function accumulation.
1763 hookargs = tr2.hookargs
1823 hookargs = tr2.hookargs
1764
1824
1765 def hookfunc():
1825 def hookfunc():
1766 repo = reporef()
1826 repo = reporef()
1767 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1827 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1768 bmchanges = sorted(tr.changes['bookmarks'].items())
1828 bmchanges = sorted(tr.changes['bookmarks'].items())
1769 for name, (old, new) in bmchanges:
1829 for name, (old, new) in bmchanges:
1770 args = tr.hookargs.copy()
1830 args = tr.hookargs.copy()
1771 args.update(bookmarks.preparehookargs(name, old, new))
1831 args.update(bookmarks.preparehookargs(name, old, new))
1772 repo.hook('txnclose-bookmark', throw=False,
1832 repo.hook('txnclose-bookmark', throw=False,
1773 txnname=desc, **pycompat.strkwargs(args))
1833 txnname=desc, **pycompat.strkwargs(args))
1774
1834
1775 if hook.hashook(repo.ui, 'txnclose-phase'):
1835 if hook.hashook(repo.ui, 'txnclose-phase'):
1776 cl = repo.unfiltered().changelog
1836 cl = repo.unfiltered().changelog
1777 phasemv = sorted(tr.changes['phases'].items())
1837 phasemv = sorted(tr.changes['phases'].items())
1778 for rev, (old, new) in phasemv:
1838 for rev, (old, new) in phasemv:
1779 args = tr.hookargs.copy()
1839 args = tr.hookargs.copy()
1780 node = hex(cl.node(rev))
1840 node = hex(cl.node(rev))
1781 args.update(phases.preparehookargs(node, old, new))
1841 args.update(phases.preparehookargs(node, old, new))
1782 repo.hook('txnclose-phase', throw=False, txnname=desc,
1842 repo.hook('txnclose-phase', throw=False, txnname=desc,
1783 **pycompat.strkwargs(args))
1843 **pycompat.strkwargs(args))
1784
1844
1785 repo.hook('txnclose', throw=False, txnname=desc,
1845 repo.hook('txnclose', throw=False, txnname=desc,
1786 **pycompat.strkwargs(hookargs))
1846 **pycompat.strkwargs(hookargs))
1787 reporef()._afterlock(hookfunc)
1847 reporef()._afterlock(hookfunc)
1788 tr.addfinalize('txnclose-hook', txnclosehook)
1848 tr.addfinalize('txnclose-hook', txnclosehook)
1789 # Include a leading "-" to make it happen before the transaction summary
1849 # Include a leading "-" to make it happen before the transaction summary
1790 # reports registered via scmutil.registersummarycallback() whose names
1850 # reports registered via scmutil.registersummarycallback() whose names
1791 # are 00-txnreport etc. That way, the caches will be warm when the
1851 # are 00-txnreport etc. That way, the caches will be warm when the
1792 # callbacks run.
1852 # callbacks run.
1793 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1853 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1794 def txnaborthook(tr2):
1854 def txnaborthook(tr2):
1795 """To be run if transaction is aborted
1855 """To be run if transaction is aborted
1796 """
1856 """
1797 reporef().hook('txnabort', throw=False, txnname=desc,
1857 reporef().hook('txnabort', throw=False, txnname=desc,
1798 **pycompat.strkwargs(tr2.hookargs))
1858 **pycompat.strkwargs(tr2.hookargs))
1799 tr.addabort('txnabort-hook', txnaborthook)
1859 tr.addabort('txnabort-hook', txnaborthook)
1800 # avoid eager cache invalidation. in-memory data should be identical
1860 # avoid eager cache invalidation. in-memory data should be identical
1801 # to stored data if transaction has no error.
1861 # to stored data if transaction has no error.
1802 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1862 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1803 self._transref = weakref.ref(tr)
1863 self._transref = weakref.ref(tr)
1804 scmutil.registersummarycallback(self, tr, desc)
1864 scmutil.registersummarycallback(self, tr, desc)
1805 return tr
1865 return tr
1806
1866
1807 def _journalfiles(self):
1867 def _journalfiles(self):
1808 return ((self.svfs, 'journal'),
1868 return ((self.svfs, 'journal'),
1809 (self.vfs, 'journal.dirstate'),
1869 (self.vfs, 'journal.dirstate'),
1810 (self.vfs, 'journal.branch'),
1870 (self.vfs, 'journal.branch'),
1811 (self.vfs, 'journal.desc'),
1871 (self.vfs, 'journal.desc'),
1812 (self.vfs, 'journal.bookmarks'),
1872 (self.vfs, 'journal.bookmarks'),
1813 (self.svfs, 'journal.phaseroots'))
1873 (self.svfs, 'journal.phaseroots'))
1814
1874
1815 def undofiles(self):
1875 def undofiles(self):
1816 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1876 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1817
1877
1818 @unfilteredmethod
1878 @unfilteredmethod
1819 def _writejournal(self, desc):
1879 def _writejournal(self, desc):
1820 self.dirstate.savebackup(None, 'journal.dirstate')
1880 self.dirstate.savebackup(None, 'journal.dirstate')
1821 narrowspec.savebackup(self, 'journal.narrowspec')
1881 narrowspec.savebackup(self, 'journal.narrowspec')
1822 self.vfs.write("journal.branch",
1882 self.vfs.write("journal.branch",
1823 encoding.fromlocal(self.dirstate.branch()))
1883 encoding.fromlocal(self.dirstate.branch()))
1824 self.vfs.write("journal.desc",
1884 self.vfs.write("journal.desc",
1825 "%d\n%s\n" % (len(self), desc))
1885 "%d\n%s\n" % (len(self), desc))
1826 self.vfs.write("journal.bookmarks",
1886 self.vfs.write("journal.bookmarks",
1827 self.vfs.tryread("bookmarks"))
1887 self.vfs.tryread("bookmarks"))
1828 self.svfs.write("journal.phaseroots",
1888 self.svfs.write("journal.phaseroots",
1829 self.svfs.tryread("phaseroots"))
1889 self.svfs.tryread("phaseroots"))
1830
1890
1831 def recover(self):
1891 def recover(self):
1832 with self.lock():
1892 with self.lock():
1833 if self.svfs.exists("journal"):
1893 if self.svfs.exists("journal"):
1834 self.ui.status(_("rolling back interrupted transaction\n"))
1894 self.ui.status(_("rolling back interrupted transaction\n"))
1835 vfsmap = {'': self.svfs,
1895 vfsmap = {'': self.svfs,
1836 'plain': self.vfs,}
1896 'plain': self.vfs,}
1837 transaction.rollback(self.svfs, vfsmap, "journal",
1897 transaction.rollback(self.svfs, vfsmap, "journal",
1838 self.ui.warn,
1898 self.ui.warn,
1839 checkambigfiles=_cachedfiles)
1899 checkambigfiles=_cachedfiles)
1840 self.invalidate()
1900 self.invalidate()
1841 return True
1901 return True
1842 else:
1902 else:
1843 self.ui.warn(_("no interrupted transaction available\n"))
1903 self.ui.warn(_("no interrupted transaction available\n"))
1844 return False
1904 return False
1845
1905
1846 def rollback(self, dryrun=False, force=False):
1906 def rollback(self, dryrun=False, force=False):
1847 wlock = lock = dsguard = None
1907 wlock = lock = dsguard = None
1848 try:
1908 try:
1849 wlock = self.wlock()
1909 wlock = self.wlock()
1850 lock = self.lock()
1910 lock = self.lock()
1851 if self.svfs.exists("undo"):
1911 if self.svfs.exists("undo"):
1852 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1912 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1853
1913
1854 return self._rollback(dryrun, force, dsguard)
1914 return self._rollback(dryrun, force, dsguard)
1855 else:
1915 else:
1856 self.ui.warn(_("no rollback information available\n"))
1916 self.ui.warn(_("no rollback information available\n"))
1857 return 1
1917 return 1
1858 finally:
1918 finally:
1859 release(dsguard, lock, wlock)
1919 release(dsguard, lock, wlock)
1860
1920
1861 @unfilteredmethod # Until we get smarter cache management
1921 @unfilteredmethod # Until we get smarter cache management
1862 def _rollback(self, dryrun, force, dsguard):
1922 def _rollback(self, dryrun, force, dsguard):
1863 ui = self.ui
1923 ui = self.ui
1864 try:
1924 try:
1865 args = self.vfs.read('undo.desc').splitlines()
1925 args = self.vfs.read('undo.desc').splitlines()
1866 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1926 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1867 if len(args) >= 3:
1927 if len(args) >= 3:
1868 detail = args[2]
1928 detail = args[2]
1869 oldtip = oldlen - 1
1929 oldtip = oldlen - 1
1870
1930
1871 if detail and ui.verbose:
1931 if detail and ui.verbose:
1872 msg = (_('repository tip rolled back to revision %d'
1932 msg = (_('repository tip rolled back to revision %d'
1873 ' (undo %s: %s)\n')
1933 ' (undo %s: %s)\n')
1874 % (oldtip, desc, detail))
1934 % (oldtip, desc, detail))
1875 else:
1935 else:
1876 msg = (_('repository tip rolled back to revision %d'
1936 msg = (_('repository tip rolled back to revision %d'
1877 ' (undo %s)\n')
1937 ' (undo %s)\n')
1878 % (oldtip, desc))
1938 % (oldtip, desc))
1879 except IOError:
1939 except IOError:
1880 msg = _('rolling back unknown transaction\n')
1940 msg = _('rolling back unknown transaction\n')
1881 desc = None
1941 desc = None
1882
1942
1883 if not force and self['.'] != self['tip'] and desc == 'commit':
1943 if not force and self['.'] != self['tip'] and desc == 'commit':
1884 raise error.Abort(
1944 raise error.Abort(
1885 _('rollback of last commit while not checked out '
1945 _('rollback of last commit while not checked out '
1886 'may lose data'), hint=_('use -f to force'))
1946 'may lose data'), hint=_('use -f to force'))
1887
1947
1888 ui.status(msg)
1948 ui.status(msg)
1889 if dryrun:
1949 if dryrun:
1890 return 0
1950 return 0
1891
1951
1892 parents = self.dirstate.parents()
1952 parents = self.dirstate.parents()
1893 self.destroying()
1953 self.destroying()
1894 vfsmap = {'plain': self.vfs, '': self.svfs}
1954 vfsmap = {'plain': self.vfs, '': self.svfs}
1895 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1955 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1896 checkambigfiles=_cachedfiles)
1956 checkambigfiles=_cachedfiles)
1897 if self.vfs.exists('undo.bookmarks'):
1957 if self.vfs.exists('undo.bookmarks'):
1898 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1958 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1899 if self.svfs.exists('undo.phaseroots'):
1959 if self.svfs.exists('undo.phaseroots'):
1900 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1960 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1901 self.invalidate()
1961 self.invalidate()
1902
1962
1903 parentgone = (parents[0] not in self.changelog.nodemap or
1963 parentgone = (parents[0] not in self.changelog.nodemap or
1904 parents[1] not in self.changelog.nodemap)
1964 parents[1] not in self.changelog.nodemap)
1905 if parentgone:
1965 if parentgone:
1906 # prevent dirstateguard from overwriting already restored one
1966 # prevent dirstateguard from overwriting already restored one
1907 dsguard.close()
1967 dsguard.close()
1908
1968
1909 narrowspec.restorebackup(self, 'undo.narrowspec')
1969 narrowspec.restorebackup(self, 'undo.narrowspec')
1910 self.dirstate.restorebackup(None, 'undo.dirstate')
1970 self.dirstate.restorebackup(None, 'undo.dirstate')
1911 try:
1971 try:
1912 branch = self.vfs.read('undo.branch')
1972 branch = self.vfs.read('undo.branch')
1913 self.dirstate.setbranch(encoding.tolocal(branch))
1973 self.dirstate.setbranch(encoding.tolocal(branch))
1914 except IOError:
1974 except IOError:
1915 ui.warn(_('named branch could not be reset: '
1975 ui.warn(_('named branch could not be reset: '
1916 'current branch is still \'%s\'\n')
1976 'current branch is still \'%s\'\n')
1917 % self.dirstate.branch())
1977 % self.dirstate.branch())
1918
1978
1919 parents = tuple([p.rev() for p in self[None].parents()])
1979 parents = tuple([p.rev() for p in self[None].parents()])
1920 if len(parents) > 1:
1980 if len(parents) > 1:
1921 ui.status(_('working directory now based on '
1981 ui.status(_('working directory now based on '
1922 'revisions %d and %d\n') % parents)
1982 'revisions %d and %d\n') % parents)
1923 else:
1983 else:
1924 ui.status(_('working directory now based on '
1984 ui.status(_('working directory now based on '
1925 'revision %d\n') % parents)
1985 'revision %d\n') % parents)
1926 mergemod.mergestate.clean(self, self['.'].node())
1986 mergemod.mergestate.clean(self, self['.'].node())
1927
1987
1928 # TODO: if we know which new heads may result from this rollback, pass
1988 # TODO: if we know which new heads may result from this rollback, pass
1929 # them to destroy(), which will prevent the branchhead cache from being
1989 # them to destroy(), which will prevent the branchhead cache from being
1930 # invalidated.
1990 # invalidated.
1931 self.destroyed()
1991 self.destroyed()
1932 return 0
1992 return 0
1933
1993
1934 def _buildcacheupdater(self, newtransaction):
1994 def _buildcacheupdater(self, newtransaction):
1935 """called during transaction to build the callback updating cache
1995 """called during transaction to build the callback updating cache
1936
1996
1937 Lives on the repository to help extension who might want to augment
1997 Lives on the repository to help extension who might want to augment
1938 this logic. For this purpose, the created transaction is passed to the
1998 this logic. For this purpose, the created transaction is passed to the
1939 method.
1999 method.
1940 """
2000 """
1941 # we must avoid cyclic reference between repo and transaction.
2001 # we must avoid cyclic reference between repo and transaction.
1942 reporef = weakref.ref(self)
2002 reporef = weakref.ref(self)
1943 def updater(tr):
2003 def updater(tr):
1944 repo = reporef()
2004 repo = reporef()
1945 repo.updatecaches(tr)
2005 repo.updatecaches(tr)
1946 return updater
2006 return updater
1947
2007
1948 @unfilteredmethod
2008 @unfilteredmethod
1949 def updatecaches(self, tr=None, full=False):
2009 def updatecaches(self, tr=None, full=False):
1950 """warm appropriate caches
2010 """warm appropriate caches
1951
2011
1952 If this function is called after a transaction closed. The transaction
2012 If this function is called after a transaction closed. The transaction
1953 will be available in the 'tr' argument. This can be used to selectively
2013 will be available in the 'tr' argument. This can be used to selectively
1954 update caches relevant to the changes in that transaction.
2014 update caches relevant to the changes in that transaction.
1955
2015
1956 If 'full' is set, make sure all caches the function knows about have
2016 If 'full' is set, make sure all caches the function knows about have
1957 up-to-date data. Even the ones usually loaded more lazily.
2017 up-to-date data. Even the ones usually loaded more lazily.
1958 """
2018 """
1959 if tr is not None and tr.hookargs.get('source') == 'strip':
2019 if tr is not None and tr.hookargs.get('source') == 'strip':
1960 # During strip, many caches are invalid but
2020 # During strip, many caches are invalid but
1961 # later call to `destroyed` will refresh them.
2021 # later call to `destroyed` will refresh them.
1962 return
2022 return
1963
2023
1964 if tr is None or tr.changes['origrepolen'] < len(self):
2024 if tr is None or tr.changes['origrepolen'] < len(self):
1965 # updating the unfiltered branchmap should refresh all the others,
2025 # updating the unfiltered branchmap should refresh all the others,
1966 self.ui.debug('updating the branch cache\n')
2026 self.ui.debug('updating the branch cache\n')
1967 branchmap.updatecache(self.filtered('served'))
2027 branchmap.updatecache(self.filtered('served'))
1968
2028
1969 if full:
2029 if full:
1970 rbc = self.revbranchcache()
2030 rbc = self.revbranchcache()
1971 for r in self.changelog:
2031 for r in self.changelog:
1972 rbc.branchinfo(r)
2032 rbc.branchinfo(r)
1973 rbc.write()
2033 rbc.write()
1974
2034
1975 # ensure the working copy parents are in the manifestfulltextcache
2035 # ensure the working copy parents are in the manifestfulltextcache
1976 for ctx in self['.'].parents():
2036 for ctx in self['.'].parents():
1977 ctx.manifest() # accessing the manifest is enough
2037 ctx.manifest() # accessing the manifest is enough
1978
2038
1979 def invalidatecaches(self):
2039 def invalidatecaches(self):
1980
2040
1981 if '_tagscache' in vars(self):
2041 if '_tagscache' in vars(self):
1982 # can't use delattr on proxy
2042 # can't use delattr on proxy
1983 del self.__dict__['_tagscache']
2043 del self.__dict__['_tagscache']
1984
2044
1985 self.unfiltered()._branchcaches.clear()
2045 self.unfiltered()._branchcaches.clear()
1986 self.invalidatevolatilesets()
2046 self.invalidatevolatilesets()
1987 self._sparsesignaturecache.clear()
2047 self._sparsesignaturecache.clear()
1988
2048
1989 def invalidatevolatilesets(self):
2049 def invalidatevolatilesets(self):
1990 self.filteredrevcache.clear()
2050 self.filteredrevcache.clear()
1991 obsolete.clearobscaches(self)
2051 obsolete.clearobscaches(self)
1992
2052
1993 def invalidatedirstate(self):
2053 def invalidatedirstate(self):
1994 '''Invalidates the dirstate, causing the next call to dirstate
2054 '''Invalidates the dirstate, causing the next call to dirstate
1995 to check if it was modified since the last time it was read,
2055 to check if it was modified since the last time it was read,
1996 rereading it if it has.
2056 rereading it if it has.
1997
2057
1998 This is different to dirstate.invalidate() that it doesn't always
2058 This is different to dirstate.invalidate() that it doesn't always
1999 rereads the dirstate. Use dirstate.invalidate() if you want to
2059 rereads the dirstate. Use dirstate.invalidate() if you want to
2000 explicitly read the dirstate again (i.e. restoring it to a previous
2060 explicitly read the dirstate again (i.e. restoring it to a previous
2001 known good state).'''
2061 known good state).'''
2002 if hasunfilteredcache(self, 'dirstate'):
2062 if hasunfilteredcache(self, 'dirstate'):
2003 for k in self.dirstate._filecache:
2063 for k in self.dirstate._filecache:
2004 try:
2064 try:
2005 delattr(self.dirstate, k)
2065 delattr(self.dirstate, k)
2006 except AttributeError:
2066 except AttributeError:
2007 pass
2067 pass
2008 delattr(self.unfiltered(), 'dirstate')
2068 delattr(self.unfiltered(), 'dirstate')
2009
2069
2010 def invalidate(self, clearfilecache=False):
2070 def invalidate(self, clearfilecache=False):
2011 '''Invalidates both store and non-store parts other than dirstate
2071 '''Invalidates both store and non-store parts other than dirstate
2012
2072
2013 If a transaction is running, invalidation of store is omitted,
2073 If a transaction is running, invalidation of store is omitted,
2014 because discarding in-memory changes might cause inconsistency
2074 because discarding in-memory changes might cause inconsistency
2015 (e.g. incomplete fncache causes unintentional failure, but
2075 (e.g. incomplete fncache causes unintentional failure, but
2016 redundant one doesn't).
2076 redundant one doesn't).
2017 '''
2077 '''
2018 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2078 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2019 for k in list(self._filecache.keys()):
2079 for k in list(self._filecache.keys()):
2020 # dirstate is invalidated separately in invalidatedirstate()
2080 # dirstate is invalidated separately in invalidatedirstate()
2021 if k == 'dirstate':
2081 if k == 'dirstate':
2022 continue
2082 continue
2023 if (k == 'changelog' and
2083 if (k == 'changelog' and
2024 self.currenttransaction() and
2084 self.currenttransaction() and
2025 self.changelog._delayed):
2085 self.changelog._delayed):
2026 # The changelog object may store unwritten revisions. We don't
2086 # The changelog object may store unwritten revisions. We don't
2027 # want to lose them.
2087 # want to lose them.
2028 # TODO: Solve the problem instead of working around it.
2088 # TODO: Solve the problem instead of working around it.
2029 continue
2089 continue
2030
2090
2031 if clearfilecache:
2091 if clearfilecache:
2032 del self._filecache[k]
2092 del self._filecache[k]
2033 try:
2093 try:
2034 delattr(unfiltered, k)
2094 delattr(unfiltered, k)
2035 except AttributeError:
2095 except AttributeError:
2036 pass
2096 pass
2037 self.invalidatecaches()
2097 self.invalidatecaches()
2038 if not self.currenttransaction():
2098 if not self.currenttransaction():
2039 # TODO: Changing contents of store outside transaction
2099 # TODO: Changing contents of store outside transaction
2040 # causes inconsistency. We should make in-memory store
2100 # causes inconsistency. We should make in-memory store
2041 # changes detectable, and abort if changed.
2101 # changes detectable, and abort if changed.
2042 self.store.invalidatecaches()
2102 self.store.invalidatecaches()
2043
2103
2044 def invalidateall(self):
2104 def invalidateall(self):
2045 '''Fully invalidates both store and non-store parts, causing the
2105 '''Fully invalidates both store and non-store parts, causing the
2046 subsequent operation to reread any outside changes.'''
2106 subsequent operation to reread any outside changes.'''
2047 # extension should hook this to invalidate its caches
2107 # extension should hook this to invalidate its caches
2048 self.invalidate()
2108 self.invalidate()
2049 self.invalidatedirstate()
2109 self.invalidatedirstate()
2050
2110
2051 @unfilteredmethod
2111 @unfilteredmethod
2052 def _refreshfilecachestats(self, tr):
2112 def _refreshfilecachestats(self, tr):
2053 """Reload stats of cached files so that they are flagged as valid"""
2113 """Reload stats of cached files so that they are flagged as valid"""
2054 for k, ce in self._filecache.items():
2114 for k, ce in self._filecache.items():
2055 k = pycompat.sysstr(k)
2115 k = pycompat.sysstr(k)
2056 if k == r'dirstate' or k not in self.__dict__:
2116 if k == r'dirstate' or k not in self.__dict__:
2057 continue
2117 continue
2058 ce.refresh()
2118 ce.refresh()
2059
2119
2060 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2120 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2061 inheritchecker=None, parentenvvar=None):
2121 inheritchecker=None, parentenvvar=None):
2062 parentlock = None
2122 parentlock = None
2063 # the contents of parentenvvar are used by the underlying lock to
2123 # the contents of parentenvvar are used by the underlying lock to
2064 # determine whether it can be inherited
2124 # determine whether it can be inherited
2065 if parentenvvar is not None:
2125 if parentenvvar is not None:
2066 parentlock = encoding.environ.get(parentenvvar)
2126 parentlock = encoding.environ.get(parentenvvar)
2067
2127
2068 timeout = 0
2128 timeout = 0
2069 warntimeout = 0
2129 warntimeout = 0
2070 if wait:
2130 if wait:
2071 timeout = self.ui.configint("ui", "timeout")
2131 timeout = self.ui.configint("ui", "timeout")
2072 warntimeout = self.ui.configint("ui", "timeout.warn")
2132 warntimeout = self.ui.configint("ui", "timeout.warn")
2073 # internal config: ui.signal-safe-lock
2133 # internal config: ui.signal-safe-lock
2074 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2134 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2075
2135
2076 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2136 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2077 releasefn=releasefn,
2137 releasefn=releasefn,
2078 acquirefn=acquirefn, desc=desc,
2138 acquirefn=acquirefn, desc=desc,
2079 inheritchecker=inheritchecker,
2139 inheritchecker=inheritchecker,
2080 parentlock=parentlock,
2140 parentlock=parentlock,
2081 signalsafe=signalsafe)
2141 signalsafe=signalsafe)
2082 return l
2142 return l
2083
2143
2084 def _afterlock(self, callback):
2144 def _afterlock(self, callback):
2085 """add a callback to be run when the repository is fully unlocked
2145 """add a callback to be run when the repository is fully unlocked
2086
2146
2087 The callback will be executed when the outermost lock is released
2147 The callback will be executed when the outermost lock is released
2088 (with wlock being higher level than 'lock')."""
2148 (with wlock being higher level than 'lock')."""
2089 for ref in (self._wlockref, self._lockref):
2149 for ref in (self._wlockref, self._lockref):
2090 l = ref and ref()
2150 l = ref and ref()
2091 if l and l.held:
2151 if l and l.held:
2092 l.postrelease.append(callback)
2152 l.postrelease.append(callback)
2093 break
2153 break
2094 else: # no lock have been found.
2154 else: # no lock have been found.
2095 callback()
2155 callback()
2096
2156
2097 def lock(self, wait=True):
2157 def lock(self, wait=True):
2098 '''Lock the repository store (.hg/store) and return a weak reference
2158 '''Lock the repository store (.hg/store) and return a weak reference
2099 to the lock. Use this before modifying the store (e.g. committing or
2159 to the lock. Use this before modifying the store (e.g. committing or
2100 stripping). If you are opening a transaction, get a lock as well.)
2160 stripping). If you are opening a transaction, get a lock as well.)
2101
2161
2102 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2162 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2103 'wlock' first to avoid a dead-lock hazard.'''
2163 'wlock' first to avoid a dead-lock hazard.'''
2104 l = self._currentlock(self._lockref)
2164 l = self._currentlock(self._lockref)
2105 if l is not None:
2165 if l is not None:
2106 l.lock()
2166 l.lock()
2107 return l
2167 return l
2108
2168
2109 l = self._lock(self.svfs, "lock", wait, None,
2169 l = self._lock(self.svfs, "lock", wait, None,
2110 self.invalidate, _('repository %s') % self.origroot)
2170 self.invalidate, _('repository %s') % self.origroot)
2111 self._lockref = weakref.ref(l)
2171 self._lockref = weakref.ref(l)
2112 return l
2172 return l
2113
2173
2114 def _wlockchecktransaction(self):
2174 def _wlockchecktransaction(self):
2115 if self.currenttransaction() is not None:
2175 if self.currenttransaction() is not None:
2116 raise error.LockInheritanceContractViolation(
2176 raise error.LockInheritanceContractViolation(
2117 'wlock cannot be inherited in the middle of a transaction')
2177 'wlock cannot be inherited in the middle of a transaction')
2118
2178
2119 def wlock(self, wait=True):
2179 def wlock(self, wait=True):
2120 '''Lock the non-store parts of the repository (everything under
2180 '''Lock the non-store parts of the repository (everything under
2121 .hg except .hg/store) and return a weak reference to the lock.
2181 .hg except .hg/store) and return a weak reference to the lock.
2122
2182
2123 Use this before modifying files in .hg.
2183 Use this before modifying files in .hg.
2124
2184
2125 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2185 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2126 'wlock' first to avoid a dead-lock hazard.'''
2186 'wlock' first to avoid a dead-lock hazard.'''
2127 l = self._wlockref and self._wlockref()
2187 l = self._wlockref and self._wlockref()
2128 if l is not None and l.held:
2188 if l is not None and l.held:
2129 l.lock()
2189 l.lock()
2130 return l
2190 return l
2131
2191
2132 # We do not need to check for non-waiting lock acquisition. Such
2192 # We do not need to check for non-waiting lock acquisition. Such
2133 # acquisition would not cause dead-lock as they would just fail.
2193 # acquisition would not cause dead-lock as they would just fail.
2134 if wait and (self.ui.configbool('devel', 'all-warnings')
2194 if wait and (self.ui.configbool('devel', 'all-warnings')
2135 or self.ui.configbool('devel', 'check-locks')):
2195 or self.ui.configbool('devel', 'check-locks')):
2136 if self._currentlock(self._lockref) is not None:
2196 if self._currentlock(self._lockref) is not None:
2137 self.ui.develwarn('"wlock" acquired after "lock"')
2197 self.ui.develwarn('"wlock" acquired after "lock"')
2138
2198
2139 def unlock():
2199 def unlock():
2140 if self.dirstate.pendingparentchange():
2200 if self.dirstate.pendingparentchange():
2141 self.dirstate.invalidate()
2201 self.dirstate.invalidate()
2142 else:
2202 else:
2143 self.dirstate.write(None)
2203 self.dirstate.write(None)
2144
2204
2145 self._filecache['dirstate'].refresh()
2205 self._filecache['dirstate'].refresh()
2146
2206
2147 l = self._lock(self.vfs, "wlock", wait, unlock,
2207 l = self._lock(self.vfs, "wlock", wait, unlock,
2148 self.invalidatedirstate, _('working directory of %s') %
2208 self.invalidatedirstate, _('working directory of %s') %
2149 self.origroot,
2209 self.origroot,
2150 inheritchecker=self._wlockchecktransaction,
2210 inheritchecker=self._wlockchecktransaction,
2151 parentenvvar='HG_WLOCK_LOCKER')
2211 parentenvvar='HG_WLOCK_LOCKER')
2152 self._wlockref = weakref.ref(l)
2212 self._wlockref = weakref.ref(l)
2153 return l
2213 return l
2154
2214
2155 def _currentlock(self, lockref):
2215 def _currentlock(self, lockref):
2156 """Returns the lock if it's held, or None if it's not."""
2216 """Returns the lock if it's held, or None if it's not."""
2157 if lockref is None:
2217 if lockref is None:
2158 return None
2218 return None
2159 l = lockref()
2219 l = lockref()
2160 if l is None or not l.held:
2220 if l is None or not l.held:
2161 return None
2221 return None
2162 return l
2222 return l
2163
2223
2164 def currentwlock(self):
2224 def currentwlock(self):
2165 """Returns the wlock if it's held, or None if it's not."""
2225 """Returns the wlock if it's held, or None if it's not."""
2166 return self._currentlock(self._wlockref)
2226 return self._currentlock(self._wlockref)
2167
2227
2168 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2228 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2169 """
2229 """
2170 commit an individual file as part of a larger transaction
2230 commit an individual file as part of a larger transaction
2171 """
2231 """
2172
2232
2173 fname = fctx.path()
2233 fname = fctx.path()
2174 fparent1 = manifest1.get(fname, nullid)
2234 fparent1 = manifest1.get(fname, nullid)
2175 fparent2 = manifest2.get(fname, nullid)
2235 fparent2 = manifest2.get(fname, nullid)
2176 if isinstance(fctx, context.filectx):
2236 if isinstance(fctx, context.filectx):
2177 node = fctx.filenode()
2237 node = fctx.filenode()
2178 if node in [fparent1, fparent2]:
2238 if node in [fparent1, fparent2]:
2179 self.ui.debug('reusing %s filelog entry\n' % fname)
2239 self.ui.debug('reusing %s filelog entry\n' % fname)
2180 if manifest1.flags(fname) != fctx.flags():
2240 if manifest1.flags(fname) != fctx.flags():
2181 changelist.append(fname)
2241 changelist.append(fname)
2182 return node
2242 return node
2183
2243
2184 flog = self.file(fname)
2244 flog = self.file(fname)
2185 meta = {}
2245 meta = {}
2186 copy = fctx.renamed()
2246 copy = fctx.renamed()
2187 if copy and copy[0] != fname:
2247 if copy and copy[0] != fname:
2188 # Mark the new revision of this file as a copy of another
2248 # Mark the new revision of this file as a copy of another
2189 # file. This copy data will effectively act as a parent
2249 # file. This copy data will effectively act as a parent
2190 # of this new revision. If this is a merge, the first
2250 # of this new revision. If this is a merge, the first
2191 # parent will be the nullid (meaning "look up the copy data")
2251 # parent will be the nullid (meaning "look up the copy data")
2192 # and the second one will be the other parent. For example:
2252 # and the second one will be the other parent. For example:
2193 #
2253 #
2194 # 0 --- 1 --- 3 rev1 changes file foo
2254 # 0 --- 1 --- 3 rev1 changes file foo
2195 # \ / rev2 renames foo to bar and changes it
2255 # \ / rev2 renames foo to bar and changes it
2196 # \- 2 -/ rev3 should have bar with all changes and
2256 # \- 2 -/ rev3 should have bar with all changes and
2197 # should record that bar descends from
2257 # should record that bar descends from
2198 # bar in rev2 and foo in rev1
2258 # bar in rev2 and foo in rev1
2199 #
2259 #
2200 # this allows this merge to succeed:
2260 # this allows this merge to succeed:
2201 #
2261 #
2202 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2262 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2203 # \ / merging rev3 and rev4 should use bar@rev2
2263 # \ / merging rev3 and rev4 should use bar@rev2
2204 # \- 2 --- 4 as the merge base
2264 # \- 2 --- 4 as the merge base
2205 #
2265 #
2206
2266
2207 cfname = copy[0]
2267 cfname = copy[0]
2208 crev = manifest1.get(cfname)
2268 crev = manifest1.get(cfname)
2209 newfparent = fparent2
2269 newfparent = fparent2
2210
2270
2211 if manifest2: # branch merge
2271 if manifest2: # branch merge
2212 if fparent2 == nullid or crev is None: # copied on remote side
2272 if fparent2 == nullid or crev is None: # copied on remote side
2213 if cfname in manifest2:
2273 if cfname in manifest2:
2214 crev = manifest2[cfname]
2274 crev = manifest2[cfname]
2215 newfparent = fparent1
2275 newfparent = fparent1
2216
2276
2217 # Here, we used to search backwards through history to try to find
2277 # Here, we used to search backwards through history to try to find
2218 # where the file copy came from if the source of a copy was not in
2278 # where the file copy came from if the source of a copy was not in
2219 # the parent directory. However, this doesn't actually make sense to
2279 # the parent directory. However, this doesn't actually make sense to
2220 # do (what does a copy from something not in your working copy even
2280 # do (what does a copy from something not in your working copy even
2221 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2281 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2222 # the user that copy information was dropped, so if they didn't
2282 # the user that copy information was dropped, so if they didn't
2223 # expect this outcome it can be fixed, but this is the correct
2283 # expect this outcome it can be fixed, but this is the correct
2224 # behavior in this circumstance.
2284 # behavior in this circumstance.
2225
2285
2226 if crev:
2286 if crev:
2227 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2287 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2228 meta["copy"] = cfname
2288 meta["copy"] = cfname
2229 meta["copyrev"] = hex(crev)
2289 meta["copyrev"] = hex(crev)
2230 fparent1, fparent2 = nullid, newfparent
2290 fparent1, fparent2 = nullid, newfparent
2231 else:
2291 else:
2232 self.ui.warn(_("warning: can't find ancestor for '%s' "
2292 self.ui.warn(_("warning: can't find ancestor for '%s' "
2233 "copied from '%s'!\n") % (fname, cfname))
2293 "copied from '%s'!\n") % (fname, cfname))
2234
2294
2235 elif fparent1 == nullid:
2295 elif fparent1 == nullid:
2236 fparent1, fparent2 = fparent2, nullid
2296 fparent1, fparent2 = fparent2, nullid
2237 elif fparent2 != nullid:
2297 elif fparent2 != nullid:
2238 # is one parent an ancestor of the other?
2298 # is one parent an ancestor of the other?
2239 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2299 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2240 if fparent1 in fparentancestors:
2300 if fparent1 in fparentancestors:
2241 fparent1, fparent2 = fparent2, nullid
2301 fparent1, fparent2 = fparent2, nullid
2242 elif fparent2 in fparentancestors:
2302 elif fparent2 in fparentancestors:
2243 fparent2 = nullid
2303 fparent2 = nullid
2244
2304
2245 # is the file changed?
2305 # is the file changed?
2246 text = fctx.data()
2306 text = fctx.data()
2247 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2307 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2248 changelist.append(fname)
2308 changelist.append(fname)
2249 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2309 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2250 # are just the flags changed during merge?
2310 # are just the flags changed during merge?
2251 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2311 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2252 changelist.append(fname)
2312 changelist.append(fname)
2253
2313
2254 return fparent1
2314 return fparent1
2255
2315
2256 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2316 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2257 """check for commit arguments that aren't committable"""
2317 """check for commit arguments that aren't committable"""
2258 if match.isexact() or match.prefix():
2318 if match.isexact() or match.prefix():
2259 matched = set(status.modified + status.added + status.removed)
2319 matched = set(status.modified + status.added + status.removed)
2260
2320
2261 for f in match.files():
2321 for f in match.files():
2262 f = self.dirstate.normalize(f)
2322 f = self.dirstate.normalize(f)
2263 if f == '.' or f in matched or f in wctx.substate:
2323 if f == '.' or f in matched or f in wctx.substate:
2264 continue
2324 continue
2265 if f in status.deleted:
2325 if f in status.deleted:
2266 fail(f, _('file not found!'))
2326 fail(f, _('file not found!'))
2267 if f in vdirs: # visited directory
2327 if f in vdirs: # visited directory
2268 d = f + '/'
2328 d = f + '/'
2269 for mf in matched:
2329 for mf in matched:
2270 if mf.startswith(d):
2330 if mf.startswith(d):
2271 break
2331 break
2272 else:
2332 else:
2273 fail(f, _("no match under directory!"))
2333 fail(f, _("no match under directory!"))
2274 elif f not in self.dirstate:
2334 elif f not in self.dirstate:
2275 fail(f, _("file not tracked!"))
2335 fail(f, _("file not tracked!"))
2276
2336
2277 @unfilteredmethod
2337 @unfilteredmethod
2278 def commit(self, text="", user=None, date=None, match=None, force=False,
2338 def commit(self, text="", user=None, date=None, match=None, force=False,
2279 editor=False, extra=None):
2339 editor=False, extra=None):
2280 """Add a new revision to current repository.
2340 """Add a new revision to current repository.
2281
2341
2282 Revision information is gathered from the working directory,
2342 Revision information is gathered from the working directory,
2283 match can be used to filter the committed files. If editor is
2343 match can be used to filter the committed files. If editor is
2284 supplied, it is called to get a commit message.
2344 supplied, it is called to get a commit message.
2285 """
2345 """
2286 if extra is None:
2346 if extra is None:
2287 extra = {}
2347 extra = {}
2288
2348
2289 def fail(f, msg):
2349 def fail(f, msg):
2290 raise error.Abort('%s: %s' % (f, msg))
2350 raise error.Abort('%s: %s' % (f, msg))
2291
2351
2292 if not match:
2352 if not match:
2293 match = matchmod.always(self.root, '')
2353 match = matchmod.always(self.root, '')
2294
2354
2295 if not force:
2355 if not force:
2296 vdirs = []
2356 vdirs = []
2297 match.explicitdir = vdirs.append
2357 match.explicitdir = vdirs.append
2298 match.bad = fail
2358 match.bad = fail
2299
2359
2300 wlock = lock = tr = None
2360 wlock = lock = tr = None
2301 try:
2361 try:
2302 wlock = self.wlock()
2362 wlock = self.wlock()
2303 lock = self.lock() # for recent changelog (see issue4368)
2363 lock = self.lock() # for recent changelog (see issue4368)
2304
2364
2305 wctx = self[None]
2365 wctx = self[None]
2306 merge = len(wctx.parents()) > 1
2366 merge = len(wctx.parents()) > 1
2307
2367
2308 if not force and merge and not match.always():
2368 if not force and merge and not match.always():
2309 raise error.Abort(_('cannot partially commit a merge '
2369 raise error.Abort(_('cannot partially commit a merge '
2310 '(do not specify files or patterns)'))
2370 '(do not specify files or patterns)'))
2311
2371
2312 status = self.status(match=match, clean=force)
2372 status = self.status(match=match, clean=force)
2313 if force:
2373 if force:
2314 status.modified.extend(status.clean) # mq may commit clean files
2374 status.modified.extend(status.clean) # mq may commit clean files
2315
2375
2316 # check subrepos
2376 # check subrepos
2317 subs, commitsubs, newstate = subrepoutil.precommit(
2377 subs, commitsubs, newstate = subrepoutil.precommit(
2318 self.ui, wctx, status, match, force=force)
2378 self.ui, wctx, status, match, force=force)
2319
2379
2320 # make sure all explicit patterns are matched
2380 # make sure all explicit patterns are matched
2321 if not force:
2381 if not force:
2322 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2382 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2323
2383
2324 cctx = context.workingcommitctx(self, status,
2384 cctx = context.workingcommitctx(self, status,
2325 text, user, date, extra)
2385 text, user, date, extra)
2326
2386
2327 # internal config: ui.allowemptycommit
2387 # internal config: ui.allowemptycommit
2328 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2388 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2329 or extra.get('close') or merge or cctx.files()
2389 or extra.get('close') or merge or cctx.files()
2330 or self.ui.configbool('ui', 'allowemptycommit'))
2390 or self.ui.configbool('ui', 'allowemptycommit'))
2331 if not allowemptycommit:
2391 if not allowemptycommit:
2332 return None
2392 return None
2333
2393
2334 if merge and cctx.deleted():
2394 if merge and cctx.deleted():
2335 raise error.Abort(_("cannot commit merge with missing files"))
2395 raise error.Abort(_("cannot commit merge with missing files"))
2336
2396
2337 ms = mergemod.mergestate.read(self)
2397 ms = mergemod.mergestate.read(self)
2338 mergeutil.checkunresolved(ms)
2398 mergeutil.checkunresolved(ms)
2339
2399
2340 if editor:
2400 if editor:
2341 cctx._text = editor(self, cctx, subs)
2401 cctx._text = editor(self, cctx, subs)
2342 edited = (text != cctx._text)
2402 edited = (text != cctx._text)
2343
2403
2344 # Save commit message in case this transaction gets rolled back
2404 # Save commit message in case this transaction gets rolled back
2345 # (e.g. by a pretxncommit hook). Leave the content alone on
2405 # (e.g. by a pretxncommit hook). Leave the content alone on
2346 # the assumption that the user will use the same editor again.
2406 # the assumption that the user will use the same editor again.
2347 msgfn = self.savecommitmessage(cctx._text)
2407 msgfn = self.savecommitmessage(cctx._text)
2348
2408
2349 # commit subs and write new state
2409 # commit subs and write new state
2350 if subs:
2410 if subs:
2351 for s in sorted(commitsubs):
2411 for s in sorted(commitsubs):
2352 sub = wctx.sub(s)
2412 sub = wctx.sub(s)
2353 self.ui.status(_('committing subrepository %s\n') %
2413 self.ui.status(_('committing subrepository %s\n') %
2354 subrepoutil.subrelpath(sub))
2414 subrepoutil.subrelpath(sub))
2355 sr = sub.commit(cctx._text, user, date)
2415 sr = sub.commit(cctx._text, user, date)
2356 newstate[s] = (newstate[s][0], sr)
2416 newstate[s] = (newstate[s][0], sr)
2357 subrepoutil.writestate(self, newstate)
2417 subrepoutil.writestate(self, newstate)
2358
2418
2359 p1, p2 = self.dirstate.parents()
2419 p1, p2 = self.dirstate.parents()
2360 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2420 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2361 try:
2421 try:
2362 self.hook("precommit", throw=True, parent1=hookp1,
2422 self.hook("precommit", throw=True, parent1=hookp1,
2363 parent2=hookp2)
2423 parent2=hookp2)
2364 tr = self.transaction('commit')
2424 tr = self.transaction('commit')
2365 ret = self.commitctx(cctx, True)
2425 ret = self.commitctx(cctx, True)
2366 except: # re-raises
2426 except: # re-raises
2367 if edited:
2427 if edited:
2368 self.ui.write(
2428 self.ui.write(
2369 _('note: commit message saved in %s\n') % msgfn)
2429 _('note: commit message saved in %s\n') % msgfn)
2370 raise
2430 raise
2371 # update bookmarks, dirstate and mergestate
2431 # update bookmarks, dirstate and mergestate
2372 bookmarks.update(self, [p1, p2], ret)
2432 bookmarks.update(self, [p1, p2], ret)
2373 cctx.markcommitted(ret)
2433 cctx.markcommitted(ret)
2374 ms.reset()
2434 ms.reset()
2375 tr.close()
2435 tr.close()
2376
2436
2377 finally:
2437 finally:
2378 lockmod.release(tr, lock, wlock)
2438 lockmod.release(tr, lock, wlock)
2379
2439
2380 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2440 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2381 # hack for command that use a temporary commit (eg: histedit)
2441 # hack for command that use a temporary commit (eg: histedit)
2382 # temporary commit got stripped before hook release
2442 # temporary commit got stripped before hook release
2383 if self.changelog.hasnode(ret):
2443 if self.changelog.hasnode(ret):
2384 self.hook("commit", node=node, parent1=parent1,
2444 self.hook("commit", node=node, parent1=parent1,
2385 parent2=parent2)
2445 parent2=parent2)
2386 self._afterlock(commithook)
2446 self._afterlock(commithook)
2387 return ret
2447 return ret
2388
2448
2389 @unfilteredmethod
2449 @unfilteredmethod
2390 def commitctx(self, ctx, error=False):
2450 def commitctx(self, ctx, error=False):
2391 """Add a new revision to current repository.
2451 """Add a new revision to current repository.
2392 Revision information is passed via the context argument.
2452 Revision information is passed via the context argument.
2393
2453
2394 ctx.files() should list all files involved in this commit, i.e.
2454 ctx.files() should list all files involved in this commit, i.e.
2395 modified/added/removed files. On merge, it may be wider than the
2455 modified/added/removed files. On merge, it may be wider than the
2396 ctx.files() to be committed, since any file nodes derived directly
2456 ctx.files() to be committed, since any file nodes derived directly
2397 from p1 or p2 are excluded from the committed ctx.files().
2457 from p1 or p2 are excluded from the committed ctx.files().
2398 """
2458 """
2399
2459
2400 tr = None
2460 tr = None
2401 p1, p2 = ctx.p1(), ctx.p2()
2461 p1, p2 = ctx.p1(), ctx.p2()
2402 user = ctx.user()
2462 user = ctx.user()
2403
2463
2404 lock = self.lock()
2464 lock = self.lock()
2405 try:
2465 try:
2406 tr = self.transaction("commit")
2466 tr = self.transaction("commit")
2407 trp = weakref.proxy(tr)
2467 trp = weakref.proxy(tr)
2408
2468
2409 if ctx.manifestnode():
2469 if ctx.manifestnode():
2410 # reuse an existing manifest revision
2470 # reuse an existing manifest revision
2411 self.ui.debug('reusing known manifest\n')
2471 self.ui.debug('reusing known manifest\n')
2412 mn = ctx.manifestnode()
2472 mn = ctx.manifestnode()
2413 files = ctx.files()
2473 files = ctx.files()
2414 elif ctx.files():
2474 elif ctx.files():
2415 m1ctx = p1.manifestctx()
2475 m1ctx = p1.manifestctx()
2416 m2ctx = p2.manifestctx()
2476 m2ctx = p2.manifestctx()
2417 mctx = m1ctx.copy()
2477 mctx = m1ctx.copy()
2418
2478
2419 m = mctx.read()
2479 m = mctx.read()
2420 m1 = m1ctx.read()
2480 m1 = m1ctx.read()
2421 m2 = m2ctx.read()
2481 m2 = m2ctx.read()
2422
2482
2423 # check in files
2483 # check in files
2424 added = []
2484 added = []
2425 changed = []
2485 changed = []
2426 removed = list(ctx.removed())
2486 removed = list(ctx.removed())
2427 linkrev = len(self)
2487 linkrev = len(self)
2428 self.ui.note(_("committing files:\n"))
2488 self.ui.note(_("committing files:\n"))
2429 for f in sorted(ctx.modified() + ctx.added()):
2489 for f in sorted(ctx.modified() + ctx.added()):
2430 self.ui.note(f + "\n")
2490 self.ui.note(f + "\n")
2431 try:
2491 try:
2432 fctx = ctx[f]
2492 fctx = ctx[f]
2433 if fctx is None:
2493 if fctx is None:
2434 removed.append(f)
2494 removed.append(f)
2435 else:
2495 else:
2436 added.append(f)
2496 added.append(f)
2437 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2497 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2438 trp, changed)
2498 trp, changed)
2439 m.setflag(f, fctx.flags())
2499 m.setflag(f, fctx.flags())
2440 except OSError as inst:
2500 except OSError as inst:
2441 self.ui.warn(_("trouble committing %s!\n") % f)
2501 self.ui.warn(_("trouble committing %s!\n") % f)
2442 raise
2502 raise
2443 except IOError as inst:
2503 except IOError as inst:
2444 errcode = getattr(inst, 'errno', errno.ENOENT)
2504 errcode = getattr(inst, 'errno', errno.ENOENT)
2445 if error or errcode and errcode != errno.ENOENT:
2505 if error or errcode and errcode != errno.ENOENT:
2446 self.ui.warn(_("trouble committing %s!\n") % f)
2506 self.ui.warn(_("trouble committing %s!\n") % f)
2447 raise
2507 raise
2448
2508
2449 # update manifest
2509 # update manifest
2450 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2510 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2451 drop = [f for f in removed if f in m]
2511 drop = [f for f in removed if f in m]
2452 for f in drop:
2512 for f in drop:
2453 del m[f]
2513 del m[f]
2454 files = changed + removed
2514 files = changed + removed
2455 md = None
2515 md = None
2456 if not files:
2516 if not files:
2457 # if no "files" actually changed in terms of the changelog,
2517 # if no "files" actually changed in terms of the changelog,
2458 # try hard to detect unmodified manifest entry so that the
2518 # try hard to detect unmodified manifest entry so that the
2459 # exact same commit can be reproduced later on convert.
2519 # exact same commit can be reproduced later on convert.
2460 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2520 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2461 if not files and md:
2521 if not files and md:
2462 self.ui.debug('not reusing manifest (no file change in '
2522 self.ui.debug('not reusing manifest (no file change in '
2463 'changelog, but manifest differs)\n')
2523 'changelog, but manifest differs)\n')
2464 if files or md:
2524 if files or md:
2465 self.ui.note(_("committing manifest\n"))
2525 self.ui.note(_("committing manifest\n"))
2466 # we're using narrowmatch here since it's already applied at
2526 # we're using narrowmatch here since it's already applied at
2467 # other stages (such as dirstate.walk), so we're already
2527 # other stages (such as dirstate.walk), so we're already
2468 # ignoring things outside of narrowspec in most cases. The
2528 # ignoring things outside of narrowspec in most cases. The
2469 # one case where we might have files outside the narrowspec
2529 # one case where we might have files outside the narrowspec
2470 # at this point is merges, and we already error out in the
2530 # at this point is merges, and we already error out in the
2471 # case where the merge has files outside of the narrowspec,
2531 # case where the merge has files outside of the narrowspec,
2472 # so this is safe.
2532 # so this is safe.
2473 mn = mctx.write(trp, linkrev,
2533 mn = mctx.write(trp, linkrev,
2474 p1.manifestnode(), p2.manifestnode(),
2534 p1.manifestnode(), p2.manifestnode(),
2475 added, drop, match=self.narrowmatch())
2535 added, drop, match=self.narrowmatch())
2476 else:
2536 else:
2477 self.ui.debug('reusing manifest form p1 (listed files '
2537 self.ui.debug('reusing manifest form p1 (listed files '
2478 'actually unchanged)\n')
2538 'actually unchanged)\n')
2479 mn = p1.manifestnode()
2539 mn = p1.manifestnode()
2480 else:
2540 else:
2481 self.ui.debug('reusing manifest from p1 (no file change)\n')
2541 self.ui.debug('reusing manifest from p1 (no file change)\n')
2482 mn = p1.manifestnode()
2542 mn = p1.manifestnode()
2483 files = []
2543 files = []
2484
2544
2485 # update changelog
2545 # update changelog
2486 self.ui.note(_("committing changelog\n"))
2546 self.ui.note(_("committing changelog\n"))
2487 self.changelog.delayupdate(tr)
2547 self.changelog.delayupdate(tr)
2488 n = self.changelog.add(mn, files, ctx.description(),
2548 n = self.changelog.add(mn, files, ctx.description(),
2489 trp, p1.node(), p2.node(),
2549 trp, p1.node(), p2.node(),
2490 user, ctx.date(), ctx.extra().copy())
2550 user, ctx.date(), ctx.extra().copy())
2491 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2551 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2492 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2552 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2493 parent2=xp2)
2553 parent2=xp2)
2494 # set the new commit is proper phase
2554 # set the new commit is proper phase
2495 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2555 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2496 if targetphase:
2556 if targetphase:
2497 # retract boundary do not alter parent changeset.
2557 # retract boundary do not alter parent changeset.
2498 # if a parent have higher the resulting phase will
2558 # if a parent have higher the resulting phase will
2499 # be compliant anyway
2559 # be compliant anyway
2500 #
2560 #
2501 # if minimal phase was 0 we don't need to retract anything
2561 # if minimal phase was 0 we don't need to retract anything
2502 phases.registernew(self, tr, targetphase, [n])
2562 phases.registernew(self, tr, targetphase, [n])
2503 tr.close()
2563 tr.close()
2504 return n
2564 return n
2505 finally:
2565 finally:
2506 if tr:
2566 if tr:
2507 tr.release()
2567 tr.release()
2508 lock.release()
2568 lock.release()
2509
2569
2510 @unfilteredmethod
2570 @unfilteredmethod
2511 def destroying(self):
2571 def destroying(self):
2512 '''Inform the repository that nodes are about to be destroyed.
2572 '''Inform the repository that nodes are about to be destroyed.
2513 Intended for use by strip and rollback, so there's a common
2573 Intended for use by strip and rollback, so there's a common
2514 place for anything that has to be done before destroying history.
2574 place for anything that has to be done before destroying history.
2515
2575
2516 This is mostly useful for saving state that is in memory and waiting
2576 This is mostly useful for saving state that is in memory and waiting
2517 to be flushed when the current lock is released. Because a call to
2577 to be flushed when the current lock is released. Because a call to
2518 destroyed is imminent, the repo will be invalidated causing those
2578 destroyed is imminent, the repo will be invalidated causing those
2519 changes to stay in memory (waiting for the next unlock), or vanish
2579 changes to stay in memory (waiting for the next unlock), or vanish
2520 completely.
2580 completely.
2521 '''
2581 '''
2522 # When using the same lock to commit and strip, the phasecache is left
2582 # When using the same lock to commit and strip, the phasecache is left
2523 # dirty after committing. Then when we strip, the repo is invalidated,
2583 # dirty after committing. Then when we strip, the repo is invalidated,
2524 # causing those changes to disappear.
2584 # causing those changes to disappear.
2525 if '_phasecache' in vars(self):
2585 if '_phasecache' in vars(self):
2526 self._phasecache.write()
2586 self._phasecache.write()
2527
2587
2528 @unfilteredmethod
2588 @unfilteredmethod
2529 def destroyed(self):
2589 def destroyed(self):
2530 '''Inform the repository that nodes have been destroyed.
2590 '''Inform the repository that nodes have been destroyed.
2531 Intended for use by strip and rollback, so there's a common
2591 Intended for use by strip and rollback, so there's a common
2532 place for anything that has to be done after destroying history.
2592 place for anything that has to be done after destroying history.
2533 '''
2593 '''
2534 # When one tries to:
2594 # When one tries to:
2535 # 1) destroy nodes thus calling this method (e.g. strip)
2595 # 1) destroy nodes thus calling this method (e.g. strip)
2536 # 2) use phasecache somewhere (e.g. commit)
2596 # 2) use phasecache somewhere (e.g. commit)
2537 #
2597 #
2538 # then 2) will fail because the phasecache contains nodes that were
2598 # then 2) will fail because the phasecache contains nodes that were
2539 # removed. We can either remove phasecache from the filecache,
2599 # removed. We can either remove phasecache from the filecache,
2540 # causing it to reload next time it is accessed, or simply filter
2600 # causing it to reload next time it is accessed, or simply filter
2541 # the removed nodes now and write the updated cache.
2601 # the removed nodes now and write the updated cache.
2542 self._phasecache.filterunknown(self)
2602 self._phasecache.filterunknown(self)
2543 self._phasecache.write()
2603 self._phasecache.write()
2544
2604
2545 # refresh all repository caches
2605 # refresh all repository caches
2546 self.updatecaches()
2606 self.updatecaches()
2547
2607
2548 # Ensure the persistent tag cache is updated. Doing it now
2608 # Ensure the persistent tag cache is updated. Doing it now
2549 # means that the tag cache only has to worry about destroyed
2609 # means that the tag cache only has to worry about destroyed
2550 # heads immediately after a strip/rollback. That in turn
2610 # heads immediately after a strip/rollback. That in turn
2551 # guarantees that "cachetip == currenttip" (comparing both rev
2611 # guarantees that "cachetip == currenttip" (comparing both rev
2552 # and node) always means no nodes have been added or destroyed.
2612 # and node) always means no nodes have been added or destroyed.
2553
2613
2554 # XXX this is suboptimal when qrefresh'ing: we strip the current
2614 # XXX this is suboptimal when qrefresh'ing: we strip the current
2555 # head, refresh the tag cache, then immediately add a new head.
2615 # head, refresh the tag cache, then immediately add a new head.
2556 # But I think doing it this way is necessary for the "instant
2616 # But I think doing it this way is necessary for the "instant
2557 # tag cache retrieval" case to work.
2617 # tag cache retrieval" case to work.
2558 self.invalidate()
2618 self.invalidate()
2559
2619
2560 def status(self, node1='.', node2=None, match=None,
2620 def status(self, node1='.', node2=None, match=None,
2561 ignored=False, clean=False, unknown=False,
2621 ignored=False, clean=False, unknown=False,
2562 listsubrepos=False):
2622 listsubrepos=False):
2563 '''a convenience method that calls node1.status(node2)'''
2623 '''a convenience method that calls node1.status(node2)'''
2564 return self[node1].status(node2, match, ignored, clean, unknown,
2624 return self[node1].status(node2, match, ignored, clean, unknown,
2565 listsubrepos)
2625 listsubrepos)
2566
2626
2567 def addpostdsstatus(self, ps):
2627 def addpostdsstatus(self, ps):
2568 """Add a callback to run within the wlock, at the point at which status
2628 """Add a callback to run within the wlock, at the point at which status
2569 fixups happen.
2629 fixups happen.
2570
2630
2571 On status completion, callback(wctx, status) will be called with the
2631 On status completion, callback(wctx, status) will be called with the
2572 wlock held, unless the dirstate has changed from underneath or the wlock
2632 wlock held, unless the dirstate has changed from underneath or the wlock
2573 couldn't be grabbed.
2633 couldn't be grabbed.
2574
2634
2575 Callbacks should not capture and use a cached copy of the dirstate --
2635 Callbacks should not capture and use a cached copy of the dirstate --
2576 it might change in the meanwhile. Instead, they should access the
2636 it might change in the meanwhile. Instead, they should access the
2577 dirstate via wctx.repo().dirstate.
2637 dirstate via wctx.repo().dirstate.
2578
2638
2579 This list is emptied out after each status run -- extensions should
2639 This list is emptied out after each status run -- extensions should
2580 make sure it adds to this list each time dirstate.status is called.
2640 make sure it adds to this list each time dirstate.status is called.
2581 Extensions should also make sure they don't call this for statuses
2641 Extensions should also make sure they don't call this for statuses
2582 that don't involve the dirstate.
2642 that don't involve the dirstate.
2583 """
2643 """
2584
2644
2585 # The list is located here for uniqueness reasons -- it is actually
2645 # The list is located here for uniqueness reasons -- it is actually
2586 # managed by the workingctx, but that isn't unique per-repo.
2646 # managed by the workingctx, but that isn't unique per-repo.
2587 self._postdsstatus.append(ps)
2647 self._postdsstatus.append(ps)
2588
2648
2589 def postdsstatus(self):
2649 def postdsstatus(self):
2590 """Used by workingctx to get the list of post-dirstate-status hooks."""
2650 """Used by workingctx to get the list of post-dirstate-status hooks."""
2591 return self._postdsstatus
2651 return self._postdsstatus
2592
2652
2593 def clearpostdsstatus(self):
2653 def clearpostdsstatus(self):
2594 """Used by workingctx to clear post-dirstate-status hooks."""
2654 """Used by workingctx to clear post-dirstate-status hooks."""
2595 del self._postdsstatus[:]
2655 del self._postdsstatus[:]
2596
2656
2597 def heads(self, start=None):
2657 def heads(self, start=None):
2598 if start is None:
2658 if start is None:
2599 cl = self.changelog
2659 cl = self.changelog
2600 headrevs = reversed(cl.headrevs())
2660 headrevs = reversed(cl.headrevs())
2601 return [cl.node(rev) for rev in headrevs]
2661 return [cl.node(rev) for rev in headrevs]
2602
2662
2603 heads = self.changelog.heads(start)
2663 heads = self.changelog.heads(start)
2604 # sort the output in rev descending order
2664 # sort the output in rev descending order
2605 return sorted(heads, key=self.changelog.rev, reverse=True)
2665 return sorted(heads, key=self.changelog.rev, reverse=True)
2606
2666
2607 def branchheads(self, branch=None, start=None, closed=False):
2667 def branchheads(self, branch=None, start=None, closed=False):
2608 '''return a (possibly filtered) list of heads for the given branch
2668 '''return a (possibly filtered) list of heads for the given branch
2609
2669
2610 Heads are returned in topological order, from newest to oldest.
2670 Heads are returned in topological order, from newest to oldest.
2611 If branch is None, use the dirstate branch.
2671 If branch is None, use the dirstate branch.
2612 If start is not None, return only heads reachable from start.
2672 If start is not None, return only heads reachable from start.
2613 If closed is True, return heads that are marked as closed as well.
2673 If closed is True, return heads that are marked as closed as well.
2614 '''
2674 '''
2615 if branch is None:
2675 if branch is None:
2616 branch = self[None].branch()
2676 branch = self[None].branch()
2617 branches = self.branchmap()
2677 branches = self.branchmap()
2618 if branch not in branches:
2678 if branch not in branches:
2619 return []
2679 return []
2620 # the cache returns heads ordered lowest to highest
2680 # the cache returns heads ordered lowest to highest
2621 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2681 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2622 if start is not None:
2682 if start is not None:
2623 # filter out the heads that cannot be reached from startrev
2683 # filter out the heads that cannot be reached from startrev
2624 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2684 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2625 bheads = [h for h in bheads if h in fbheads]
2685 bheads = [h for h in bheads if h in fbheads]
2626 return bheads
2686 return bheads
2627
2687
2628 def branches(self, nodes):
2688 def branches(self, nodes):
2629 if not nodes:
2689 if not nodes:
2630 nodes = [self.changelog.tip()]
2690 nodes = [self.changelog.tip()]
2631 b = []
2691 b = []
2632 for n in nodes:
2692 for n in nodes:
2633 t = n
2693 t = n
2634 while True:
2694 while True:
2635 p = self.changelog.parents(n)
2695 p = self.changelog.parents(n)
2636 if p[1] != nullid or p[0] == nullid:
2696 if p[1] != nullid or p[0] == nullid:
2637 b.append((t, n, p[0], p[1]))
2697 b.append((t, n, p[0], p[1]))
2638 break
2698 break
2639 n = p[0]
2699 n = p[0]
2640 return b
2700 return b
2641
2701
2642 def between(self, pairs):
2702 def between(self, pairs):
2643 r = []
2703 r = []
2644
2704
2645 for top, bottom in pairs:
2705 for top, bottom in pairs:
2646 n, l, i = top, [], 0
2706 n, l, i = top, [], 0
2647 f = 1
2707 f = 1
2648
2708
2649 while n != bottom and n != nullid:
2709 while n != bottom and n != nullid:
2650 p = self.changelog.parents(n)[0]
2710 p = self.changelog.parents(n)[0]
2651 if i == f:
2711 if i == f:
2652 l.append(n)
2712 l.append(n)
2653 f = f * 2
2713 f = f * 2
2654 n = p
2714 n = p
2655 i += 1
2715 i += 1
2656
2716
2657 r.append(l)
2717 r.append(l)
2658
2718
2659 return r
2719 return r
2660
2720
2661 def checkpush(self, pushop):
2721 def checkpush(self, pushop):
2662 """Extensions can override this function if additional checks have
2722 """Extensions can override this function if additional checks have
2663 to be performed before pushing, or call it if they override push
2723 to be performed before pushing, or call it if they override push
2664 command.
2724 command.
2665 """
2725 """
2666
2726
2667 @unfilteredpropertycache
2727 @unfilteredpropertycache
2668 def prepushoutgoinghooks(self):
2728 def prepushoutgoinghooks(self):
2669 """Return util.hooks consists of a pushop with repo, remote, outgoing
2729 """Return util.hooks consists of a pushop with repo, remote, outgoing
2670 methods, which are called before pushing changesets.
2730 methods, which are called before pushing changesets.
2671 """
2731 """
2672 return util.hooks()
2732 return util.hooks()
2673
2733
2674 def pushkey(self, namespace, key, old, new):
2734 def pushkey(self, namespace, key, old, new):
2675 try:
2735 try:
2676 tr = self.currenttransaction()
2736 tr = self.currenttransaction()
2677 hookargs = {}
2737 hookargs = {}
2678 if tr is not None:
2738 if tr is not None:
2679 hookargs.update(tr.hookargs)
2739 hookargs.update(tr.hookargs)
2680 hookargs = pycompat.strkwargs(hookargs)
2740 hookargs = pycompat.strkwargs(hookargs)
2681 hookargs[r'namespace'] = namespace
2741 hookargs[r'namespace'] = namespace
2682 hookargs[r'key'] = key
2742 hookargs[r'key'] = key
2683 hookargs[r'old'] = old
2743 hookargs[r'old'] = old
2684 hookargs[r'new'] = new
2744 hookargs[r'new'] = new
2685 self.hook('prepushkey', throw=True, **hookargs)
2745 self.hook('prepushkey', throw=True, **hookargs)
2686 except error.HookAbort as exc:
2746 except error.HookAbort as exc:
2687 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2747 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2688 if exc.hint:
2748 if exc.hint:
2689 self.ui.write_err(_("(%s)\n") % exc.hint)
2749 self.ui.write_err(_("(%s)\n") % exc.hint)
2690 return False
2750 return False
2691 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2751 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2692 ret = pushkey.push(self, namespace, key, old, new)
2752 ret = pushkey.push(self, namespace, key, old, new)
2693 def runhook():
2753 def runhook():
2694 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2754 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2695 ret=ret)
2755 ret=ret)
2696 self._afterlock(runhook)
2756 self._afterlock(runhook)
2697 return ret
2757 return ret
2698
2758
2699 def listkeys(self, namespace):
2759 def listkeys(self, namespace):
2700 self.hook('prelistkeys', throw=True, namespace=namespace)
2760 self.hook('prelistkeys', throw=True, namespace=namespace)
2701 self.ui.debug('listing keys for "%s"\n' % namespace)
2761 self.ui.debug('listing keys for "%s"\n' % namespace)
2702 values = pushkey.list(self, namespace)
2762 values = pushkey.list(self, namespace)
2703 self.hook('listkeys', namespace=namespace, values=values)
2763 self.hook('listkeys', namespace=namespace, values=values)
2704 return values
2764 return values
2705
2765
2706 def debugwireargs(self, one, two, three=None, four=None, five=None):
2766 def debugwireargs(self, one, two, three=None, four=None, five=None):
2707 '''used to test argument passing over the wire'''
2767 '''used to test argument passing over the wire'''
2708 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2768 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2709 pycompat.bytestr(four),
2769 pycompat.bytestr(four),
2710 pycompat.bytestr(five))
2770 pycompat.bytestr(five))
2711
2771
2712 def savecommitmessage(self, text):
2772 def savecommitmessage(self, text):
2713 fp = self.vfs('last-message.txt', 'wb')
2773 fp = self.vfs('last-message.txt', 'wb')
2714 try:
2774 try:
2715 fp.write(text)
2775 fp.write(text)
2716 finally:
2776 finally:
2717 fp.close()
2777 fp.close()
2718 return self.pathto(fp.name[len(self.root) + 1:])
2778 return self.pathto(fp.name[len(self.root) + 1:])
2719
2779
2720 # used to avoid circular references so destructors work
2780 # used to avoid circular references so destructors work
2721 def aftertrans(files):
2781 def aftertrans(files):
2722 renamefiles = [tuple(t) for t in files]
2782 renamefiles = [tuple(t) for t in files]
2723 def a():
2783 def a():
2724 for vfs, src, dest in renamefiles:
2784 for vfs, src, dest in renamefiles:
2725 # if src and dest refer to a same file, vfs.rename is a no-op,
2785 # if src and dest refer to a same file, vfs.rename is a no-op,
2726 # leaving both src and dest on disk. delete dest to make sure
2786 # leaving both src and dest on disk. delete dest to make sure
2727 # the rename couldn't be such a no-op.
2787 # the rename couldn't be such a no-op.
2728 vfs.tryunlink(dest)
2788 vfs.tryunlink(dest)
2729 try:
2789 try:
2730 vfs.rename(src, dest)
2790 vfs.rename(src, dest)
2731 except OSError: # journal file does not yet exist
2791 except OSError: # journal file does not yet exist
2732 pass
2792 pass
2733 return a
2793 return a
2734
2794
2735 def undoname(fn):
2795 def undoname(fn):
2736 base, name = os.path.split(fn)
2796 base, name = os.path.split(fn)
2737 assert name.startswith('journal')
2797 assert name.startswith('journal')
2738 return os.path.join(base, name.replace('journal', 'undo', 1))
2798 return os.path.join(base, name.replace('journal', 'undo', 1))
2739
2799
2740 def instance(ui, path, create, intents=None, createopts=None):
2800 def instance(ui, path, create, intents=None, createopts=None):
2741 localpath = util.urllocalpath(path)
2801 localpath = util.urllocalpath(path)
2742 if create:
2802 if create:
2743 createrepository(ui, localpath, createopts=createopts)
2803 createrepository(ui, localpath, createopts=createopts)
2744
2804
2745 return makelocalrepository(ui, localpath, intents=intents)
2805 return makelocalrepository(ui, localpath, intents=intents)
2746
2806
2747 def islocal(path):
2807 def islocal(path):
2748 return True
2808 return True
2749
2809
2750 def newreporequirements(ui, createopts=None):
2810 def newreporequirements(ui, createopts=None):
2751 """Determine the set of requirements for a new local repository.
2811 """Determine the set of requirements for a new local repository.
2752
2812
2753 Extensions can wrap this function to specify custom requirements for
2813 Extensions can wrap this function to specify custom requirements for
2754 new repositories.
2814 new repositories.
2755 """
2815 """
2756 createopts = createopts or {}
2816 createopts = createopts or {}
2757
2817
2758 # If the repo is being created from a shared repository, we copy
2818 # If the repo is being created from a shared repository, we copy
2759 # its requirements.
2819 # its requirements.
2760 if 'sharedrepo' in createopts:
2820 if 'sharedrepo' in createopts:
2761 requirements = set(createopts['sharedrepo'].requirements)
2821 requirements = set(createopts['sharedrepo'].requirements)
2762 if createopts.get('sharedrelative'):
2822 if createopts.get('sharedrelative'):
2763 requirements.add('relshared')
2823 requirements.add('relshared')
2764 else:
2824 else:
2765 requirements.add('shared')
2825 requirements.add('shared')
2766
2826
2767 return requirements
2827 return requirements
2768
2828
2769 requirements = {'revlogv1'}
2829 requirements = {'revlogv1'}
2770 if ui.configbool('format', 'usestore'):
2830 if ui.configbool('format', 'usestore'):
2771 requirements.add('store')
2831 requirements.add('store')
2772 if ui.configbool('format', 'usefncache'):
2832 if ui.configbool('format', 'usefncache'):
2773 requirements.add('fncache')
2833 requirements.add('fncache')
2774 if ui.configbool('format', 'dotencode'):
2834 if ui.configbool('format', 'dotencode'):
2775 requirements.add('dotencode')
2835 requirements.add('dotencode')
2776
2836
2777 compengine = ui.config('experimental', 'format.compression')
2837 compengine = ui.config('experimental', 'format.compression')
2778 if compengine not in util.compengines:
2838 if compengine not in util.compengines:
2779 raise error.Abort(_('compression engine %s defined by '
2839 raise error.Abort(_('compression engine %s defined by '
2780 'experimental.format.compression not available') %
2840 'experimental.format.compression not available') %
2781 compengine,
2841 compengine,
2782 hint=_('run "hg debuginstall" to list available '
2842 hint=_('run "hg debuginstall" to list available '
2783 'compression engines'))
2843 'compression engines'))
2784
2844
2785 # zlib is the historical default and doesn't need an explicit requirement.
2845 # zlib is the historical default and doesn't need an explicit requirement.
2786 if compengine != 'zlib':
2846 if compengine != 'zlib':
2787 requirements.add('exp-compression-%s' % compengine)
2847 requirements.add('exp-compression-%s' % compengine)
2788
2848
2789 if scmutil.gdinitconfig(ui):
2849 if scmutil.gdinitconfig(ui):
2790 requirements.add('generaldelta')
2850 requirements.add('generaldelta')
2791 if ui.configbool('experimental', 'treemanifest'):
2851 if ui.configbool('experimental', 'treemanifest'):
2792 requirements.add('treemanifest')
2852 requirements.add('treemanifest')
2793 # experimental config: format.sparse-revlog
2853 # experimental config: format.sparse-revlog
2794 if ui.configbool('format', 'sparse-revlog'):
2854 if ui.configbool('format', 'sparse-revlog'):
2795 requirements.add(SPARSEREVLOG_REQUIREMENT)
2855 requirements.add(SPARSEREVLOG_REQUIREMENT)
2796
2856
2797 revlogv2 = ui.config('experimental', 'revlogv2')
2857 revlogv2 = ui.config('experimental', 'revlogv2')
2798 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2858 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2799 requirements.remove('revlogv1')
2859 requirements.remove('revlogv1')
2800 # generaldelta is implied by revlogv2.
2860 # generaldelta is implied by revlogv2.
2801 requirements.discard('generaldelta')
2861 requirements.discard('generaldelta')
2802 requirements.add(REVLOGV2_REQUIREMENT)
2862 requirements.add(REVLOGV2_REQUIREMENT)
2803 # experimental config: format.internal-phase
2863 # experimental config: format.internal-phase
2804 if ui.configbool('format', 'internal-phase'):
2864 if ui.configbool('format', 'internal-phase'):
2805 requirements.add('internal-phase')
2865 requirements.add('internal-phase')
2806
2866
2807 if createopts.get('narrowfiles'):
2867 if createopts.get('narrowfiles'):
2808 requirements.add(repository.NARROW_REQUIREMENT)
2868 requirements.add(repository.NARROW_REQUIREMENT)
2809
2869
2810 return requirements
2870 return requirements
2811
2871
2812 def filterknowncreateopts(ui, createopts):
2872 def filterknowncreateopts(ui, createopts):
2813 """Filters a dict of repo creation options against options that are known.
2873 """Filters a dict of repo creation options against options that are known.
2814
2874
2815 Receives a dict of repo creation options and returns a dict of those
2875 Receives a dict of repo creation options and returns a dict of those
2816 options that we don't know how to handle.
2876 options that we don't know how to handle.
2817
2877
2818 This function is called as part of repository creation. If the
2878 This function is called as part of repository creation. If the
2819 returned dict contains any items, repository creation will not
2879 returned dict contains any items, repository creation will not
2820 be allowed, as it means there was a request to create a repository
2880 be allowed, as it means there was a request to create a repository
2821 with options not recognized by loaded code.
2881 with options not recognized by loaded code.
2822
2882
2823 Extensions can wrap this function to filter out creation options
2883 Extensions can wrap this function to filter out creation options
2824 they know how to handle.
2884 they know how to handle.
2825 """
2885 """
2826 known = {
2886 known = {
2827 'narrowfiles',
2887 'narrowfiles',
2828 'sharedrepo',
2888 'sharedrepo',
2829 'sharedrelative',
2889 'sharedrelative',
2830 'shareditems',
2890 'shareditems',
2831 }
2891 }
2832
2892
2833 return {k: v for k, v in createopts.items() if k not in known}
2893 return {k: v for k, v in createopts.items() if k not in known}
2834
2894
2835 def createrepository(ui, path, createopts=None):
2895 def createrepository(ui, path, createopts=None):
2836 """Create a new repository in a vfs.
2896 """Create a new repository in a vfs.
2837
2897
2838 ``path`` path to the new repo's working directory.
2898 ``path`` path to the new repo's working directory.
2839 ``createopts`` options for the new repository.
2899 ``createopts`` options for the new repository.
2840
2900
2841 The following keys for ``createopts`` are recognized:
2901 The following keys for ``createopts`` are recognized:
2842
2902
2843 narrowfiles
2903 narrowfiles
2844 Set up repository to support narrow file storage.
2904 Set up repository to support narrow file storage.
2845 sharedrepo
2905 sharedrepo
2846 Repository object from which storage should be shared.
2906 Repository object from which storage should be shared.
2847 sharedrelative
2907 sharedrelative
2848 Boolean indicating if the path to the shared repo should be
2908 Boolean indicating if the path to the shared repo should be
2849 stored as relative. By default, the pointer to the "parent" repo
2909 stored as relative. By default, the pointer to the "parent" repo
2850 is stored as an absolute path.
2910 is stored as an absolute path.
2851 shareditems
2911 shareditems
2852 Set of items to share to the new repository (in addition to storage).
2912 Set of items to share to the new repository (in addition to storage).
2853 """
2913 """
2854 createopts = createopts or {}
2914 createopts = createopts or {}
2855
2915
2856 unknownopts = filterknowncreateopts(ui, createopts)
2916 unknownopts = filterknowncreateopts(ui, createopts)
2857
2917
2858 if not isinstance(unknownopts, dict):
2918 if not isinstance(unknownopts, dict):
2859 raise error.ProgrammingError('filterknowncreateopts() did not return '
2919 raise error.ProgrammingError('filterknowncreateopts() did not return '
2860 'a dict')
2920 'a dict')
2861
2921
2862 if unknownopts:
2922 if unknownopts:
2863 raise error.Abort(_('unable to create repository because of unknown '
2923 raise error.Abort(_('unable to create repository because of unknown '
2864 'creation option: %s') %
2924 'creation option: %s') %
2865 ', '.join(sorted(unknownopts)),
2925 ', '.join(sorted(unknownopts)),
2866 hint=_('is a required extension not loaded?'))
2926 hint=_('is a required extension not loaded?'))
2867
2927
2868 requirements = newreporequirements(ui, createopts=createopts)
2928 requirements = newreporequirements(ui, createopts=createopts)
2869
2929
2870 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2930 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2871
2931
2872 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2932 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2873 if hgvfs.exists():
2933 if hgvfs.exists():
2874 raise error.RepoError(_('repository %s already exists') % path)
2934 raise error.RepoError(_('repository %s already exists') % path)
2875
2935
2876 if 'sharedrepo' in createopts:
2936 if 'sharedrepo' in createopts:
2877 sharedpath = createopts['sharedrepo'].sharedpath
2937 sharedpath = createopts['sharedrepo'].sharedpath
2878
2938
2879 if createopts.get('sharedrelative'):
2939 if createopts.get('sharedrelative'):
2880 try:
2940 try:
2881 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2941 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2882 except (IOError, ValueError) as e:
2942 except (IOError, ValueError) as e:
2883 # ValueError is raised on Windows if the drive letters differ
2943 # ValueError is raised on Windows if the drive letters differ
2884 # on each path.
2944 # on each path.
2885 raise error.Abort(_('cannot calculate relative path'),
2945 raise error.Abort(_('cannot calculate relative path'),
2886 hint=stringutil.forcebytestr(e))
2946 hint=stringutil.forcebytestr(e))
2887
2947
2888 if not wdirvfs.exists():
2948 if not wdirvfs.exists():
2889 wdirvfs.makedirs()
2949 wdirvfs.makedirs()
2890
2950
2891 hgvfs.makedir(notindexed=True)
2951 hgvfs.makedir(notindexed=True)
2892
2952
2893 if b'store' in requirements and 'sharedrepo' not in createopts:
2953 if b'store' in requirements and 'sharedrepo' not in createopts:
2894 hgvfs.mkdir(b'store')
2954 hgvfs.mkdir(b'store')
2895
2955
2896 # We create an invalid changelog outside the store so very old
2956 # We create an invalid changelog outside the store so very old
2897 # Mercurial versions (which didn't know about the requirements
2957 # Mercurial versions (which didn't know about the requirements
2898 # file) encounter an error on reading the changelog. This
2958 # file) encounter an error on reading the changelog. This
2899 # effectively locks out old clients and prevents them from
2959 # effectively locks out old clients and prevents them from
2900 # mucking with a repo in an unknown format.
2960 # mucking with a repo in an unknown format.
2901 #
2961 #
2902 # The revlog header has version 2, which won't be recognized by
2962 # The revlog header has version 2, which won't be recognized by
2903 # such old clients.
2963 # such old clients.
2904 hgvfs.append(b'00changelog.i',
2964 hgvfs.append(b'00changelog.i',
2905 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2965 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2906 b'layout')
2966 b'layout')
2907
2967
2908 scmutil.writerequires(hgvfs, requirements)
2968 scmutil.writerequires(hgvfs, requirements)
2909
2969
2910 # Write out file telling readers where to find the shared store.
2970 # Write out file telling readers where to find the shared store.
2911 if 'sharedrepo' in createopts:
2971 if 'sharedrepo' in createopts:
2912 hgvfs.write(b'sharedpath', sharedpath)
2972 hgvfs.write(b'sharedpath', sharedpath)
2913
2973
2914 if createopts.get('shareditems'):
2974 if createopts.get('shareditems'):
2915 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2975 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2916 hgvfs.write(b'shared', shared)
2976 hgvfs.write(b'shared', shared)
2917
2977
2918 def poisonrepository(repo):
2978 def poisonrepository(repo):
2919 """Poison a repository instance so it can no longer be used."""
2979 """Poison a repository instance so it can no longer be used."""
2920 # Perform any cleanup on the instance.
2980 # Perform any cleanup on the instance.
2921 repo.close()
2981 repo.close()
2922
2982
2923 # Our strategy is to replace the type of the object with one that
2983 # Our strategy is to replace the type of the object with one that
2924 # has all attribute lookups result in error.
2984 # has all attribute lookups result in error.
2925 #
2985 #
2926 # But we have to allow the close() method because some constructors
2986 # But we have to allow the close() method because some constructors
2927 # of repos call close() on repo references.
2987 # of repos call close() on repo references.
2928 class poisonedrepository(object):
2988 class poisonedrepository(object):
2929 def __getattribute__(self, item):
2989 def __getattribute__(self, item):
2930 if item == r'close':
2990 if item == r'close':
2931 return object.__getattribute__(self, item)
2991 return object.__getattribute__(self, item)
2932
2992
2933 raise error.ProgrammingError('repo instances should not be used '
2993 raise error.ProgrammingError('repo instances should not be used '
2934 'after unshare')
2994 'after unshare')
2935
2995
2936 def close(self):
2996 def close(self):
2937 pass
2997 pass
2938
2998
2939 # We may have a repoview, which intercepts __setattr__. So be sure
2999 # We may have a repoview, which intercepts __setattr__. So be sure
2940 # we operate at the lowest level possible.
3000 # we operate at the lowest level possible.
2941 object.__setattr__(repo, r'__class__', poisonedrepository)
3001 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now