##// END OF EJS Templates
manifest: move manifest creation to a helper function...
Durham Goode -
r30218:1767723f stable
parent child Browse files
Show More
@@ -1,554 +1,553 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18 import tempfile
18 import tempfile
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import nullid
21 from .node import nullid
22
22
23 from . import (
23 from . import (
24 bundle2,
24 bundle2,
25 changegroup,
25 changegroup,
26 changelog,
26 changelog,
27 cmdutil,
27 cmdutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 filelog,
31 filelog,
32 localrepo,
32 localrepo,
33 manifest,
33 manifest,
34 mdiff,
34 mdiff,
35 node as nodemod,
35 node as nodemod,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 revlog,
38 revlog,
39 scmutil,
39 scmutil,
40 util,
40 util,
41 )
41 )
42
42
43 class bundlerevlog(revlog.revlog):
43 class bundlerevlog(revlog.revlog):
44 def __init__(self, opener, indexfile, bundle, linkmapper):
44 def __init__(self, opener, indexfile, bundle, linkmapper):
45 # How it works:
45 # How it works:
46 # To retrieve a revision, we need to know the offset of the revision in
46 # To retrieve a revision, we need to know the offset of the revision in
47 # the bundle (an unbundle object). We store this offset in the index
47 # the bundle (an unbundle object). We store this offset in the index
48 # (start). The base of the delta is stored in the base field.
48 # (start). The base of the delta is stored in the base field.
49 #
49 #
50 # To differentiate a rev in the bundle from a rev in the revlog, we
50 # To differentiate a rev in the bundle from a rev in the revlog, we
51 # check revision against repotiprev.
51 # check revision against repotiprev.
52 opener = scmutil.readonlyvfs(opener)
52 opener = scmutil.readonlyvfs(opener)
53 revlog.revlog.__init__(self, opener, indexfile)
53 revlog.revlog.__init__(self, opener, indexfile)
54 self.bundle = bundle
54 self.bundle = bundle
55 n = len(self)
55 n = len(self)
56 self.repotiprev = n - 1
56 self.repotiprev = n - 1
57 chain = None
57 chain = None
58 self.bundlerevs = set() # used by 'bundle()' revset expression
58 self.bundlerevs = set() # used by 'bundle()' revset expression
59 getchunk = lambda: bundle.deltachunk(chain)
59 getchunk = lambda: bundle.deltachunk(chain)
60 for chunkdata in iter(getchunk, {}):
60 for chunkdata in iter(getchunk, {}):
61 node = chunkdata['node']
61 node = chunkdata['node']
62 p1 = chunkdata['p1']
62 p1 = chunkdata['p1']
63 p2 = chunkdata['p2']
63 p2 = chunkdata['p2']
64 cs = chunkdata['cs']
64 cs = chunkdata['cs']
65 deltabase = chunkdata['deltabase']
65 deltabase = chunkdata['deltabase']
66 delta = chunkdata['delta']
66 delta = chunkdata['delta']
67
67
68 size = len(delta)
68 size = len(delta)
69 start = bundle.tell() - size
69 start = bundle.tell() - size
70
70
71 link = linkmapper(cs)
71 link = linkmapper(cs)
72 if node in self.nodemap:
72 if node in self.nodemap:
73 # this can happen if two branches make the same change
73 # this can happen if two branches make the same change
74 chain = node
74 chain = node
75 self.bundlerevs.add(self.nodemap[node])
75 self.bundlerevs.add(self.nodemap[node])
76 continue
76 continue
77
77
78 for p in (p1, p2):
78 for p in (p1, p2):
79 if p not in self.nodemap:
79 if p not in self.nodemap:
80 raise error.LookupError(p, self.indexfile,
80 raise error.LookupError(p, self.indexfile,
81 _("unknown parent"))
81 _("unknown parent"))
82
82
83 if deltabase not in self.nodemap:
83 if deltabase not in self.nodemap:
84 raise LookupError(deltabase, self.indexfile,
84 raise LookupError(deltabase, self.indexfile,
85 _('unknown delta base'))
85 _('unknown delta base'))
86
86
87 baserev = self.rev(deltabase)
87 baserev = self.rev(deltabase)
88 # start, size, full unc. size, base (unused), link, p1, p2, node
88 # start, size, full unc. size, base (unused), link, p1, p2, node
89 e = (revlog.offset_type(start, 0), size, -1, baserev, link,
89 e = (revlog.offset_type(start, 0), size, -1, baserev, link,
90 self.rev(p1), self.rev(p2), node)
90 self.rev(p1), self.rev(p2), node)
91 self.index.insert(-1, e)
91 self.index.insert(-1, e)
92 self.nodemap[node] = n
92 self.nodemap[node] = n
93 self.bundlerevs.add(n)
93 self.bundlerevs.add(n)
94 chain = node
94 chain = node
95 n += 1
95 n += 1
96
96
97 def _chunk(self, rev):
97 def _chunk(self, rev):
98 # Warning: in case of bundle, the diff is against what we stored as
98 # Warning: in case of bundle, the diff is against what we stored as
99 # delta base, not against rev - 1
99 # delta base, not against rev - 1
100 # XXX: could use some caching
100 # XXX: could use some caching
101 if rev <= self.repotiprev:
101 if rev <= self.repotiprev:
102 return revlog.revlog._chunk(self, rev)
102 return revlog.revlog._chunk(self, rev)
103 self.bundle.seek(self.start(rev))
103 self.bundle.seek(self.start(rev))
104 return self.bundle.read(self.length(rev))
104 return self.bundle.read(self.length(rev))
105
105
106 def revdiff(self, rev1, rev2):
106 def revdiff(self, rev1, rev2):
107 """return or calculate a delta between two revisions"""
107 """return or calculate a delta between two revisions"""
108 if rev1 > self.repotiprev and rev2 > self.repotiprev:
108 if rev1 > self.repotiprev and rev2 > self.repotiprev:
109 # hot path for bundle
109 # hot path for bundle
110 revb = self.index[rev2][3]
110 revb = self.index[rev2][3]
111 if revb == rev1:
111 if revb == rev1:
112 return self._chunk(rev2)
112 return self._chunk(rev2)
113 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
113 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
114 return revlog.revlog.revdiff(self, rev1, rev2)
114 return revlog.revlog.revdiff(self, rev1, rev2)
115
115
116 return mdiff.textdiff(self.revision(self.node(rev1)),
116 return mdiff.textdiff(self.revision(self.node(rev1)),
117 self.revision(self.node(rev2)))
117 self.revision(self.node(rev2)))
118
118
119 def revision(self, nodeorrev):
119 def revision(self, nodeorrev):
120 """return an uncompressed revision of a given node or revision
120 """return an uncompressed revision of a given node or revision
121 number.
121 number.
122 """
122 """
123 if isinstance(nodeorrev, int):
123 if isinstance(nodeorrev, int):
124 rev = nodeorrev
124 rev = nodeorrev
125 node = self.node(rev)
125 node = self.node(rev)
126 else:
126 else:
127 node = nodeorrev
127 node = nodeorrev
128 rev = self.rev(node)
128 rev = self.rev(node)
129
129
130 if node == nullid:
130 if node == nullid:
131 return ""
131 return ""
132
132
133 text = None
133 text = None
134 chain = []
134 chain = []
135 iterrev = rev
135 iterrev = rev
136 # reconstruct the revision if it is from a changegroup
136 # reconstruct the revision if it is from a changegroup
137 while iterrev > self.repotiprev:
137 while iterrev > self.repotiprev:
138 if self._cache and self._cache[1] == iterrev:
138 if self._cache and self._cache[1] == iterrev:
139 text = self._cache[2]
139 text = self._cache[2]
140 break
140 break
141 chain.append(iterrev)
141 chain.append(iterrev)
142 iterrev = self.index[iterrev][3]
142 iterrev = self.index[iterrev][3]
143 if text is None:
143 if text is None:
144 text = self.baserevision(iterrev)
144 text = self.baserevision(iterrev)
145
145
146 while chain:
146 while chain:
147 delta = self._chunk(chain.pop())
147 delta = self._chunk(chain.pop())
148 text = mdiff.patches(text, [delta])
148 text = mdiff.patches(text, [delta])
149
149
150 self._checkhash(text, node, rev)
150 self._checkhash(text, node, rev)
151 self._cache = (node, rev, text)
151 self._cache = (node, rev, text)
152 return text
152 return text
153
153
154 def baserevision(self, nodeorrev):
154 def baserevision(self, nodeorrev):
155 # Revlog subclasses may override 'revision' method to modify format of
155 # Revlog subclasses may override 'revision' method to modify format of
156 # content retrieved from revlog. To use bundlerevlog with such class one
156 # content retrieved from revlog. To use bundlerevlog with such class one
157 # needs to override 'baserevision' and make more specific call here.
157 # needs to override 'baserevision' and make more specific call here.
158 return revlog.revlog.revision(self, nodeorrev)
158 return revlog.revlog.revision(self, nodeorrev)
159
159
160 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
160 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
161 raise NotImplementedError
161 raise NotImplementedError
162 def addgroup(self, revs, linkmapper, transaction):
162 def addgroup(self, revs, linkmapper, transaction):
163 raise NotImplementedError
163 raise NotImplementedError
164 def strip(self, rev, minlink):
164 def strip(self, rev, minlink):
165 raise NotImplementedError
165 raise NotImplementedError
166 def checksize(self):
166 def checksize(self):
167 raise NotImplementedError
167 raise NotImplementedError
168
168
169 class bundlechangelog(bundlerevlog, changelog.changelog):
169 class bundlechangelog(bundlerevlog, changelog.changelog):
170 def __init__(self, opener, bundle):
170 def __init__(self, opener, bundle):
171 changelog.changelog.__init__(self, opener)
171 changelog.changelog.__init__(self, opener)
172 linkmapper = lambda x: x
172 linkmapper = lambda x: x
173 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
173 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
174 linkmapper)
174 linkmapper)
175
175
176 def baserevision(self, nodeorrev):
176 def baserevision(self, nodeorrev):
177 # Although changelog doesn't override 'revision' method, some extensions
177 # Although changelog doesn't override 'revision' method, some extensions
178 # may replace this class with another that does. Same story with
178 # may replace this class with another that does. Same story with
179 # manifest and filelog classes.
179 # manifest and filelog classes.
180
180
181 # This bypasses filtering on changelog.node() and rev() because we need
181 # This bypasses filtering on changelog.node() and rev() because we need
182 # revision text of the bundle base even if it is hidden.
182 # revision text of the bundle base even if it is hidden.
183 oldfilter = self.filteredrevs
183 oldfilter = self.filteredrevs
184 try:
184 try:
185 self.filteredrevs = ()
185 self.filteredrevs = ()
186 return changelog.changelog.revision(self, nodeorrev)
186 return changelog.changelog.revision(self, nodeorrev)
187 finally:
187 finally:
188 self.filteredrevs = oldfilter
188 self.filteredrevs = oldfilter
189
189
190 class bundlemanifest(bundlerevlog, manifest.manifest):
190 class bundlemanifest(bundlerevlog, manifest.manifest):
191 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
191 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
192 manifest.manifest.__init__(self, opener, dir=dir)
192 manifest.manifest.__init__(self, opener, dir=dir)
193 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
193 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
194 linkmapper)
194 linkmapper)
195 if dirlogstarts is None:
195 if dirlogstarts is None:
196 dirlogstarts = {}
196 dirlogstarts = {}
197 if self.bundle.version == "03":
197 if self.bundle.version == "03":
198 dirlogstarts = _getfilestarts(self.bundle)
198 dirlogstarts = _getfilestarts(self.bundle)
199 self._dirlogstarts = dirlogstarts
199 self._dirlogstarts = dirlogstarts
200 self._linkmapper = linkmapper
200 self._linkmapper = linkmapper
201
201
202 def baserevision(self, nodeorrev):
202 def baserevision(self, nodeorrev):
203 node = nodeorrev
203 node = nodeorrev
204 if isinstance(node, int):
204 if isinstance(node, int):
205 node = self.node(node)
205 node = self.node(node)
206
206
207 if node in self.fulltextcache:
207 if node in self.fulltextcache:
208 result = self.fulltextcache[node].tostring()
208 result = self.fulltextcache[node].tostring()
209 else:
209 else:
210 result = manifest.manifest.revision(self, nodeorrev)
210 result = manifest.manifest.revision(self, nodeorrev)
211 return result
211 return result
212
212
213 def dirlog(self, d):
213 def dirlog(self, d):
214 if d in self._dirlogstarts:
214 if d in self._dirlogstarts:
215 self.bundle.seek(self._dirlogstarts[d])
215 self.bundle.seek(self._dirlogstarts[d])
216 return bundlemanifest(
216 return bundlemanifest(
217 self.opener, self.bundle, self._linkmapper,
217 self.opener, self.bundle, self._linkmapper,
218 self._dirlogstarts, dir=d)
218 self._dirlogstarts, dir=d)
219 return super(bundlemanifest, self).dirlog(d)
219 return super(bundlemanifest, self).dirlog(d)
220
220
221 class bundlefilelog(bundlerevlog, filelog.filelog):
221 class bundlefilelog(bundlerevlog, filelog.filelog):
222 def __init__(self, opener, path, bundle, linkmapper):
222 def __init__(self, opener, path, bundle, linkmapper):
223 filelog.filelog.__init__(self, opener, path)
223 filelog.filelog.__init__(self, opener, path)
224 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
224 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
225 linkmapper)
225 linkmapper)
226
226
227 def baserevision(self, nodeorrev):
227 def baserevision(self, nodeorrev):
228 return filelog.filelog.revision(self, nodeorrev)
228 return filelog.filelog.revision(self, nodeorrev)
229
229
230 class bundlepeer(localrepo.localpeer):
230 class bundlepeer(localrepo.localpeer):
231 def canpush(self):
231 def canpush(self):
232 return False
232 return False
233
233
234 class bundlephasecache(phases.phasecache):
234 class bundlephasecache(phases.phasecache):
235 def __init__(self, *args, **kwargs):
235 def __init__(self, *args, **kwargs):
236 super(bundlephasecache, self).__init__(*args, **kwargs)
236 super(bundlephasecache, self).__init__(*args, **kwargs)
237 if util.safehasattr(self, 'opener'):
237 if util.safehasattr(self, 'opener'):
238 self.opener = scmutil.readonlyvfs(self.opener)
238 self.opener = scmutil.readonlyvfs(self.opener)
239
239
240 def write(self):
240 def write(self):
241 raise NotImplementedError
241 raise NotImplementedError
242
242
243 def _write(self, fp):
243 def _write(self, fp):
244 raise NotImplementedError
244 raise NotImplementedError
245
245
246 def _updateroots(self, phase, newroots, tr):
246 def _updateroots(self, phase, newroots, tr):
247 self.phaseroots[phase] = newroots
247 self.phaseroots[phase] = newroots
248 self.invalidate()
248 self.invalidate()
249 self.dirty = True
249 self.dirty = True
250
250
251 def _getfilestarts(bundle):
251 def _getfilestarts(bundle):
252 bundlefilespos = {}
252 bundlefilespos = {}
253 for chunkdata in iter(bundle.filelogheader, {}):
253 for chunkdata in iter(bundle.filelogheader, {}):
254 fname = chunkdata['filename']
254 fname = chunkdata['filename']
255 bundlefilespos[fname] = bundle.tell()
255 bundlefilespos[fname] = bundle.tell()
256 for chunk in iter(lambda: bundle.deltachunk(None), {}):
256 for chunk in iter(lambda: bundle.deltachunk(None), {}):
257 pass
257 pass
258 return bundlefilespos
258 return bundlefilespos
259
259
260 class bundlerepository(localrepo.localrepository):
260 class bundlerepository(localrepo.localrepository):
261 def __init__(self, ui, path, bundlename):
261 def __init__(self, ui, path, bundlename):
262 def _writetempbundle(read, suffix, header=''):
262 def _writetempbundle(read, suffix, header=''):
263 """Write a temporary file to disk
263 """Write a temporary file to disk
264
264
265 This is closure because we need to make sure this tracked by
265 This is closure because we need to make sure this tracked by
266 self.tempfile for cleanup purposes."""
266 self.tempfile for cleanup purposes."""
267 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
267 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
268 suffix=".hg10un")
268 suffix=".hg10un")
269 self.tempfile = temp
269 self.tempfile = temp
270
270
271 with os.fdopen(fdtemp, 'wb') as fptemp:
271 with os.fdopen(fdtemp, 'wb') as fptemp:
272 fptemp.write(header)
272 fptemp.write(header)
273 while True:
273 while True:
274 chunk = read(2**18)
274 chunk = read(2**18)
275 if not chunk:
275 if not chunk:
276 break
276 break
277 fptemp.write(chunk)
277 fptemp.write(chunk)
278
278
279 return self.vfs.open(self.tempfile, mode="rb")
279 return self.vfs.open(self.tempfile, mode="rb")
280 self._tempparent = None
280 self._tempparent = None
281 try:
281 try:
282 localrepo.localrepository.__init__(self, ui, path)
282 localrepo.localrepository.__init__(self, ui, path)
283 except error.RepoError:
283 except error.RepoError:
284 self._tempparent = tempfile.mkdtemp()
284 self._tempparent = tempfile.mkdtemp()
285 localrepo.instance(ui, self._tempparent, 1)
285 localrepo.instance(ui, self._tempparent, 1)
286 localrepo.localrepository.__init__(self, ui, self._tempparent)
286 localrepo.localrepository.__init__(self, ui, self._tempparent)
287 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
287 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
288
288
289 if path:
289 if path:
290 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
290 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
291 else:
291 else:
292 self._url = 'bundle:' + bundlename
292 self._url = 'bundle:' + bundlename
293
293
294 self.tempfile = None
294 self.tempfile = None
295 f = util.posixfile(bundlename, "rb")
295 f = util.posixfile(bundlename, "rb")
296 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
296 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
297
297
298 if isinstance(self.bundle, bundle2.unbundle20):
298 if isinstance(self.bundle, bundle2.unbundle20):
299 cgstream = None
299 cgstream = None
300 for part in self.bundle.iterparts():
300 for part in self.bundle.iterparts():
301 if part.type == 'changegroup':
301 if part.type == 'changegroup':
302 if cgstream is not None:
302 if cgstream is not None:
303 raise NotImplementedError("can't process "
303 raise NotImplementedError("can't process "
304 "multiple changegroups")
304 "multiple changegroups")
305 cgstream = part
305 cgstream = part
306 version = part.params.get('version', '01')
306 version = part.params.get('version', '01')
307 legalcgvers = changegroup.supportedincomingversions(self)
307 legalcgvers = changegroup.supportedincomingversions(self)
308 if version not in legalcgvers:
308 if version not in legalcgvers:
309 msg = _('Unsupported changegroup version: %s')
309 msg = _('Unsupported changegroup version: %s')
310 raise error.Abort(msg % version)
310 raise error.Abort(msg % version)
311 if self.bundle.compressed():
311 if self.bundle.compressed():
312 cgstream = _writetempbundle(part.read,
312 cgstream = _writetempbundle(part.read,
313 ".cg%sun" % version)
313 ".cg%sun" % version)
314
314
315 if cgstream is None:
315 if cgstream is None:
316 raise error.Abort(_('No changegroups found'))
316 raise error.Abort(_('No changegroups found'))
317 cgstream.seek(0)
317 cgstream.seek(0)
318
318
319 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
319 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
320
320
321 elif self.bundle.compressed():
321 elif self.bundle.compressed():
322 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
322 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
323 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
323 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
324 bundlename,
324 bundlename,
325 self.vfs)
325 self.vfs)
326
326
327 # dict with the mapping 'filename' -> position in the bundle
327 # dict with the mapping 'filename' -> position in the bundle
328 self.bundlefilespos = {}
328 self.bundlefilespos = {}
329
329
330 self.firstnewrev = self.changelog.repotiprev + 1
330 self.firstnewrev = self.changelog.repotiprev + 1
331 phases.retractboundary(self, None, phases.draft,
331 phases.retractboundary(self, None, phases.draft,
332 [ctx.node() for ctx in self[self.firstnewrev:]])
332 [ctx.node() for ctx in self[self.firstnewrev:]])
333
333
334 @localrepo.unfilteredpropertycache
334 @localrepo.unfilteredpropertycache
335 def _phasecache(self):
335 def _phasecache(self):
336 return bundlephasecache(self, self._phasedefaults)
336 return bundlephasecache(self, self._phasedefaults)
337
337
338 @localrepo.unfilteredpropertycache
338 @localrepo.unfilteredpropertycache
339 def changelog(self):
339 def changelog(self):
340 # consume the header if it exists
340 # consume the header if it exists
341 self.bundle.changelogheader()
341 self.bundle.changelogheader()
342 c = bundlechangelog(self.svfs, self.bundle)
342 c = bundlechangelog(self.svfs, self.bundle)
343 self.manstart = self.bundle.tell()
343 self.manstart = self.bundle.tell()
344 return c
344 return c
345
345
346 @localrepo.unfilteredpropertycache
346 def _constructmanifest(self):
347 def manifest(self):
348 self.bundle.seek(self.manstart)
347 self.bundle.seek(self.manstart)
349 # consume the header if it exists
348 # consume the header if it exists
350 self.bundle.manifestheader()
349 self.bundle.manifestheader()
351 linkmapper = self.unfiltered().changelog.rev
350 linkmapper = self.unfiltered().changelog.rev
352 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
351 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
353 self.filestart = self.bundle.tell()
352 self.filestart = self.bundle.tell()
354 return m
353 return m
355
354
356 @localrepo.unfilteredpropertycache
355 @localrepo.unfilteredpropertycache
357 def manstart(self):
356 def manstart(self):
358 self.changelog
357 self.changelog
359 return self.manstart
358 return self.manstart
360
359
361 @localrepo.unfilteredpropertycache
360 @localrepo.unfilteredpropertycache
362 def filestart(self):
361 def filestart(self):
363 self.manifest
362 self.manifest
364 return self.filestart
363 return self.filestart
365
364
366 def url(self):
365 def url(self):
367 return self._url
366 return self._url
368
367
369 def file(self, f):
368 def file(self, f):
370 if not self.bundlefilespos:
369 if not self.bundlefilespos:
371 self.bundle.seek(self.filestart)
370 self.bundle.seek(self.filestart)
372 self.bundlefilespos = _getfilestarts(self.bundle)
371 self.bundlefilespos = _getfilestarts(self.bundle)
373
372
374 if f in self.bundlefilespos:
373 if f in self.bundlefilespos:
375 self.bundle.seek(self.bundlefilespos[f])
374 self.bundle.seek(self.bundlefilespos[f])
376 linkmapper = self.unfiltered().changelog.rev
375 linkmapper = self.unfiltered().changelog.rev
377 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
376 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
378 else:
377 else:
379 return filelog.filelog(self.svfs, f)
378 return filelog.filelog(self.svfs, f)
380
379
381 def close(self):
380 def close(self):
382 """Close assigned bundle file immediately."""
381 """Close assigned bundle file immediately."""
383 self.bundlefile.close()
382 self.bundlefile.close()
384 if self.tempfile is not None:
383 if self.tempfile is not None:
385 self.vfs.unlink(self.tempfile)
384 self.vfs.unlink(self.tempfile)
386 if self._tempparent:
385 if self._tempparent:
387 shutil.rmtree(self._tempparent, True)
386 shutil.rmtree(self._tempparent, True)
388
387
389 def cancopy(self):
388 def cancopy(self):
390 return False
389 return False
391
390
392 def peer(self):
391 def peer(self):
393 return bundlepeer(self)
392 return bundlepeer(self)
394
393
395 def getcwd(self):
394 def getcwd(self):
396 return os.getcwd() # always outside the repo
395 return os.getcwd() # always outside the repo
397
396
398 # Check if parents exist in localrepo before setting
397 # Check if parents exist in localrepo before setting
399 def setparents(self, p1, p2=nullid):
398 def setparents(self, p1, p2=nullid):
400 p1rev = self.changelog.rev(p1)
399 p1rev = self.changelog.rev(p1)
401 p2rev = self.changelog.rev(p2)
400 p2rev = self.changelog.rev(p2)
402 msg = _("setting parent to node %s that only exists in the bundle\n")
401 msg = _("setting parent to node %s that only exists in the bundle\n")
403 if self.changelog.repotiprev < p1rev:
402 if self.changelog.repotiprev < p1rev:
404 self.ui.warn(msg % nodemod.hex(p1))
403 self.ui.warn(msg % nodemod.hex(p1))
405 if self.changelog.repotiprev < p2rev:
404 if self.changelog.repotiprev < p2rev:
406 self.ui.warn(msg % nodemod.hex(p2))
405 self.ui.warn(msg % nodemod.hex(p2))
407 return super(bundlerepository, self).setparents(p1, p2)
406 return super(bundlerepository, self).setparents(p1, p2)
408
407
409 def instance(ui, path, create):
408 def instance(ui, path, create):
410 if create:
409 if create:
411 raise error.Abort(_('cannot create new bundle repository'))
410 raise error.Abort(_('cannot create new bundle repository'))
412 # internal config: bundle.mainreporoot
411 # internal config: bundle.mainreporoot
413 parentpath = ui.config("bundle", "mainreporoot", "")
412 parentpath = ui.config("bundle", "mainreporoot", "")
414 if not parentpath:
413 if not parentpath:
415 # try to find the correct path to the working directory repo
414 # try to find the correct path to the working directory repo
416 parentpath = cmdutil.findrepo(os.getcwd())
415 parentpath = cmdutil.findrepo(os.getcwd())
417 if parentpath is None:
416 if parentpath is None:
418 parentpath = ''
417 parentpath = ''
419 if parentpath:
418 if parentpath:
420 # Try to make the full path relative so we get a nice, short URL.
419 # Try to make the full path relative so we get a nice, short URL.
421 # In particular, we don't want temp dir names in test outputs.
420 # In particular, we don't want temp dir names in test outputs.
422 cwd = os.getcwd()
421 cwd = os.getcwd()
423 if parentpath == cwd:
422 if parentpath == cwd:
424 parentpath = ''
423 parentpath = ''
425 else:
424 else:
426 cwd = pathutil.normasprefix(cwd)
425 cwd = pathutil.normasprefix(cwd)
427 if parentpath.startswith(cwd):
426 if parentpath.startswith(cwd):
428 parentpath = parentpath[len(cwd):]
427 parentpath = parentpath[len(cwd):]
429 u = util.url(path)
428 u = util.url(path)
430 path = u.localpath()
429 path = u.localpath()
431 if u.scheme == 'bundle':
430 if u.scheme == 'bundle':
432 s = path.split("+", 1)
431 s = path.split("+", 1)
433 if len(s) == 1:
432 if len(s) == 1:
434 repopath, bundlename = parentpath, s[0]
433 repopath, bundlename = parentpath, s[0]
435 else:
434 else:
436 repopath, bundlename = s
435 repopath, bundlename = s
437 else:
436 else:
438 repopath, bundlename = parentpath, path
437 repopath, bundlename = parentpath, path
439 return bundlerepository(ui, repopath, bundlename)
438 return bundlerepository(ui, repopath, bundlename)
440
439
441 class bundletransactionmanager(object):
440 class bundletransactionmanager(object):
442 def transaction(self):
441 def transaction(self):
443 return None
442 return None
444
443
445 def close(self):
444 def close(self):
446 raise NotImplementedError
445 raise NotImplementedError
447
446
448 def release(self):
447 def release(self):
449 raise NotImplementedError
448 raise NotImplementedError
450
449
451 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
450 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
452 force=False):
451 force=False):
453 '''obtains a bundle of changes incoming from other
452 '''obtains a bundle of changes incoming from other
454
453
455 "onlyheads" restricts the returned changes to those reachable from the
454 "onlyheads" restricts the returned changes to those reachable from the
456 specified heads.
455 specified heads.
457 "bundlename", if given, stores the bundle to this file path permanently;
456 "bundlename", if given, stores the bundle to this file path permanently;
458 otherwise it's stored to a temp file and gets deleted again when you call
457 otherwise it's stored to a temp file and gets deleted again when you call
459 the returned "cleanupfn".
458 the returned "cleanupfn".
460 "force" indicates whether to proceed on unrelated repos.
459 "force" indicates whether to proceed on unrelated repos.
461
460
462 Returns a tuple (local, csets, cleanupfn):
461 Returns a tuple (local, csets, cleanupfn):
463
462
464 "local" is a local repo from which to obtain the actual incoming
463 "local" is a local repo from which to obtain the actual incoming
465 changesets; it is a bundlerepo for the obtained bundle when the
464 changesets; it is a bundlerepo for the obtained bundle when the
466 original "other" is remote.
465 original "other" is remote.
467 "csets" lists the incoming changeset node ids.
466 "csets" lists the incoming changeset node ids.
468 "cleanupfn" must be called without arguments when you're done processing
467 "cleanupfn" must be called without arguments when you're done processing
469 the changes; it closes both the original "other" and the one returned
468 the changes; it closes both the original "other" and the one returned
470 here.
469 here.
471 '''
470 '''
472 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
471 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
473 force=force)
472 force=force)
474 common, incoming, rheads = tmp
473 common, incoming, rheads = tmp
475 if not incoming:
474 if not incoming:
476 try:
475 try:
477 if bundlename:
476 if bundlename:
478 os.unlink(bundlename)
477 os.unlink(bundlename)
479 except OSError:
478 except OSError:
480 pass
479 pass
481 return repo, [], other.close
480 return repo, [], other.close
482
481
483 commonset = set(common)
482 commonset = set(common)
484 rheads = [x for x in rheads if x not in commonset]
483 rheads = [x for x in rheads if x not in commonset]
485
484
486 bundle = None
485 bundle = None
487 bundlerepo = None
486 bundlerepo = None
488 localrepo = other.local()
487 localrepo = other.local()
489 if bundlename or not localrepo:
488 if bundlename or not localrepo:
490 # create a bundle (uncompressed if other repo is not local)
489 # create a bundle (uncompressed if other repo is not local)
491
490
492 # developer config: devel.legacy.exchange
491 # developer config: devel.legacy.exchange
493 legexc = ui.configlist('devel', 'legacy.exchange')
492 legexc = ui.configlist('devel', 'legacy.exchange')
494 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
493 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
495 canbundle2 = (not forcebundle1
494 canbundle2 = (not forcebundle1
496 and other.capable('getbundle')
495 and other.capable('getbundle')
497 and other.capable('bundle2'))
496 and other.capable('bundle2'))
498 if canbundle2:
497 if canbundle2:
499 kwargs = {}
498 kwargs = {}
500 kwargs['common'] = common
499 kwargs['common'] = common
501 kwargs['heads'] = rheads
500 kwargs['heads'] = rheads
502 kwargs['bundlecaps'] = exchange.caps20to10(repo)
501 kwargs['bundlecaps'] = exchange.caps20to10(repo)
503 kwargs['cg'] = True
502 kwargs['cg'] = True
504 b2 = other.getbundle('incoming', **kwargs)
503 b2 = other.getbundle('incoming', **kwargs)
505 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
504 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
506 bundlename)
505 bundlename)
507 else:
506 else:
508 if other.capable('getbundle'):
507 if other.capable('getbundle'):
509 cg = other.getbundle('incoming', common=common, heads=rheads)
508 cg = other.getbundle('incoming', common=common, heads=rheads)
510 elif onlyheads is None and not other.capable('changegroupsubset'):
509 elif onlyheads is None and not other.capable('changegroupsubset'):
511 # compat with older servers when pulling all remote heads
510 # compat with older servers when pulling all remote heads
512 cg = other.changegroup(incoming, "incoming")
511 cg = other.changegroup(incoming, "incoming")
513 rheads = None
512 rheads = None
514 else:
513 else:
515 cg = other.changegroupsubset(incoming, rheads, 'incoming')
514 cg = other.changegroupsubset(incoming, rheads, 'incoming')
516 if localrepo:
515 if localrepo:
517 bundletype = "HG10BZ"
516 bundletype = "HG10BZ"
518 else:
517 else:
519 bundletype = "HG10UN"
518 bundletype = "HG10UN"
520 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
519 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
521 bundletype)
520 bundletype)
522 # keep written bundle?
521 # keep written bundle?
523 if bundlename:
522 if bundlename:
524 bundle = None
523 bundle = None
525 if not localrepo:
524 if not localrepo:
526 # use the created uncompressed bundlerepo
525 # use the created uncompressed bundlerepo
527 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
526 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
528 fname)
527 fname)
529 # this repo contains local and other now, so filter out local again
528 # this repo contains local and other now, so filter out local again
530 common = repo.heads()
529 common = repo.heads()
531 if localrepo:
530 if localrepo:
532 # Part of common may be remotely filtered
531 # Part of common may be remotely filtered
533 # So use an unfiltered version
532 # So use an unfiltered version
534 # The discovery process probably need cleanup to avoid that
533 # The discovery process probably need cleanup to avoid that
535 localrepo = localrepo.unfiltered()
534 localrepo = localrepo.unfiltered()
536
535
537 csets = localrepo.changelog.findmissing(common, rheads)
536 csets = localrepo.changelog.findmissing(common, rheads)
538
537
539 if bundlerepo:
538 if bundlerepo:
540 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
539 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
541 remotephases = other.listkeys('phases')
540 remotephases = other.listkeys('phases')
542
541
543 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
542 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
544 pullop.trmanager = bundletransactionmanager()
543 pullop.trmanager = bundletransactionmanager()
545 exchange._pullapplyphases(pullop, remotephases)
544 exchange._pullapplyphases(pullop, remotephases)
546
545
547 def cleanup():
546 def cleanup():
548 if bundlerepo:
547 if bundlerepo:
549 bundlerepo.close()
548 bundlerepo.close()
550 if bundle:
549 if bundle:
551 os.unlink(bundle)
550 os.unlink(bundle)
552 other.close()
551 other.close()
553
552
554 return (localrepo, csets, cleanup)
553 return (localrepo, csets, cleanup)
@@ -1,2000 +1,2006 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 namespaces,
44 namespaces,
45 obsolete,
45 obsolete,
46 pathutil,
46 pathutil,
47 peer,
47 peer,
48 phases,
48 phases,
49 pushkey,
49 pushkey,
50 repoview,
50 repoview,
51 revset,
51 revset,
52 scmutil,
52 scmutil,
53 store,
53 store,
54 subrepo,
54 subrepo,
55 tags as tagsmod,
55 tags as tagsmod,
56 transaction,
56 transaction,
57 util,
57 util,
58 )
58 )
59
59
60 release = lockmod.release
60 release = lockmod.release
61 urlerr = util.urlerr
61 urlerr = util.urlerr
62 urlreq = util.urlreq
62 urlreq = util.urlreq
63
63
64 class repofilecache(scmutil.filecache):
64 class repofilecache(scmutil.filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """
66 """
67
67
68 def __get__(self, repo, type=None):
68 def __get__(self, repo, type=None):
69 if repo is None:
69 if repo is None:
70 return self
70 return self
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 def __set__(self, repo, value):
72 def __set__(self, repo, value):
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 def __delete__(self, repo):
74 def __delete__(self, repo):
75 return super(repofilecache, self).__delete__(repo.unfiltered())
75 return super(repofilecache, self).__delete__(repo.unfiltered())
76
76
77 class storecache(repofilecache):
77 class storecache(repofilecache):
78 """filecache for files in the store"""
78 """filecache for files in the store"""
79 def join(self, obj, fname):
79 def join(self, obj, fname):
80 return obj.sjoin(fname)
80 return obj.sjoin(fname)
81
81
82 class unfilteredpropertycache(util.propertycache):
82 class unfilteredpropertycache(util.propertycache):
83 """propertycache that apply to unfiltered repo only"""
83 """propertycache that apply to unfiltered repo only"""
84
84
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 unfi = repo.unfiltered()
86 unfi = repo.unfiltered()
87 if unfi is repo:
87 if unfi is repo:
88 return super(unfilteredpropertycache, self).__get__(unfi)
88 return super(unfilteredpropertycache, self).__get__(unfi)
89 return getattr(unfi, self.name)
89 return getattr(unfi, self.name)
90
90
91 class filteredpropertycache(util.propertycache):
91 class filteredpropertycache(util.propertycache):
92 """propertycache that must take filtering in account"""
92 """propertycache that must take filtering in account"""
93
93
94 def cachevalue(self, obj, value):
94 def cachevalue(self, obj, value):
95 object.__setattr__(obj, self.name, value)
95 object.__setattr__(obj, self.name, value)
96
96
97
97
98 def hasunfilteredcache(repo, name):
98 def hasunfilteredcache(repo, name):
99 """check if a repo has an unfilteredpropertycache value for <name>"""
99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 return name in vars(repo.unfiltered())
100 return name in vars(repo.unfiltered())
101
101
102 def unfilteredmethod(orig):
102 def unfilteredmethod(orig):
103 """decorate method that always need to be run on unfiltered version"""
103 """decorate method that always need to be run on unfiltered version"""
104 def wrapper(repo, *args, **kwargs):
104 def wrapper(repo, *args, **kwargs):
105 return orig(repo.unfiltered(), *args, **kwargs)
105 return orig(repo.unfiltered(), *args, **kwargs)
106 return wrapper
106 return wrapper
107
107
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 'unbundle'))
109 'unbundle'))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111
111
112 class localpeer(peer.peerrepository):
112 class localpeer(peer.peerrepository):
113 '''peer for a local repo; reflects only the most recent API'''
113 '''peer for a local repo; reflects only the most recent API'''
114
114
115 def __init__(self, repo, caps=moderncaps):
115 def __init__(self, repo, caps=moderncaps):
116 peer.peerrepository.__init__(self)
116 peer.peerrepository.__init__(self)
117 self._repo = repo.filtered('served')
117 self._repo = repo.filtered('served')
118 self.ui = repo.ui
118 self.ui = repo.ui
119 self._caps = repo._restrictcapabilities(caps)
119 self._caps = repo._restrictcapabilities(caps)
120 self.requirements = repo.requirements
120 self.requirements = repo.requirements
121 self.supportedformats = repo.supportedformats
121 self.supportedformats = repo.supportedformats
122
122
123 def close(self):
123 def close(self):
124 self._repo.close()
124 self._repo.close()
125
125
126 def _capabilities(self):
126 def _capabilities(self):
127 return self._caps
127 return self._caps
128
128
129 def local(self):
129 def local(self):
130 return self._repo
130 return self._repo
131
131
132 def canpush(self):
132 def canpush(self):
133 return True
133 return True
134
134
135 def url(self):
135 def url(self):
136 return self._repo.url()
136 return self._repo.url()
137
137
138 def lookup(self, key):
138 def lookup(self, key):
139 return self._repo.lookup(key)
139 return self._repo.lookup(key)
140
140
141 def branchmap(self):
141 def branchmap(self):
142 return self._repo.branchmap()
142 return self._repo.branchmap()
143
143
144 def heads(self):
144 def heads(self):
145 return self._repo.heads()
145 return self._repo.heads()
146
146
147 def known(self, nodes):
147 def known(self, nodes):
148 return self._repo.known(nodes)
148 return self._repo.known(nodes)
149
149
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 **kwargs):
151 **kwargs):
152 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
152 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
153 common=common, bundlecaps=bundlecaps,
153 common=common, bundlecaps=bundlecaps,
154 **kwargs)
154 **kwargs)
155 cb = util.chunkbuffer(chunks)
155 cb = util.chunkbuffer(chunks)
156
156
157 if bundlecaps is not None and 'HG20' in bundlecaps:
157 if bundlecaps is not None and 'HG20' in bundlecaps:
158 # When requesting a bundle2, getbundle returns a stream to make the
158 # When requesting a bundle2, getbundle returns a stream to make the
159 # wire level function happier. We need to build a proper object
159 # wire level function happier. We need to build a proper object
160 # from it in local peer.
160 # from it in local peer.
161 return bundle2.getunbundler(self.ui, cb)
161 return bundle2.getunbundler(self.ui, cb)
162 else:
162 else:
163 return changegroup.getunbundler('01', cb, None)
163 return changegroup.getunbundler('01', cb, None)
164
164
165 # TODO We might want to move the next two calls into legacypeer and add
165 # TODO We might want to move the next two calls into legacypeer and add
166 # unbundle instead.
166 # unbundle instead.
167
167
168 def unbundle(self, cg, heads, url):
168 def unbundle(self, cg, heads, url):
169 """apply a bundle on a repo
169 """apply a bundle on a repo
170
170
171 This function handles the repo locking itself."""
171 This function handles the repo locking itself."""
172 try:
172 try:
173 try:
173 try:
174 cg = exchange.readbundle(self.ui, cg, None)
174 cg = exchange.readbundle(self.ui, cg, None)
175 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
175 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
176 if util.safehasattr(ret, 'getchunks'):
176 if util.safehasattr(ret, 'getchunks'):
177 # This is a bundle20 object, turn it into an unbundler.
177 # This is a bundle20 object, turn it into an unbundler.
178 # This little dance should be dropped eventually when the
178 # This little dance should be dropped eventually when the
179 # API is finally improved.
179 # API is finally improved.
180 stream = util.chunkbuffer(ret.getchunks())
180 stream = util.chunkbuffer(ret.getchunks())
181 ret = bundle2.getunbundler(self.ui, stream)
181 ret = bundle2.getunbundler(self.ui, stream)
182 return ret
182 return ret
183 except Exception as exc:
183 except Exception as exc:
184 # If the exception contains output salvaged from a bundle2
184 # If the exception contains output salvaged from a bundle2
185 # reply, we need to make sure it is printed before continuing
185 # reply, we need to make sure it is printed before continuing
186 # to fail. So we build a bundle2 with such output and consume
186 # to fail. So we build a bundle2 with such output and consume
187 # it directly.
187 # it directly.
188 #
188 #
189 # This is not very elegant but allows a "simple" solution for
189 # This is not very elegant but allows a "simple" solution for
190 # issue4594
190 # issue4594
191 output = getattr(exc, '_bundle2salvagedoutput', ())
191 output = getattr(exc, '_bundle2salvagedoutput', ())
192 if output:
192 if output:
193 bundler = bundle2.bundle20(self._repo.ui)
193 bundler = bundle2.bundle20(self._repo.ui)
194 for out in output:
194 for out in output:
195 bundler.addpart(out)
195 bundler.addpart(out)
196 stream = util.chunkbuffer(bundler.getchunks())
196 stream = util.chunkbuffer(bundler.getchunks())
197 b = bundle2.getunbundler(self.ui, stream)
197 b = bundle2.getunbundler(self.ui, stream)
198 bundle2.processbundle(self._repo, b)
198 bundle2.processbundle(self._repo, b)
199 raise
199 raise
200 except error.PushRaced as exc:
200 except error.PushRaced as exc:
201 raise error.ResponseError(_('push failed:'), str(exc))
201 raise error.ResponseError(_('push failed:'), str(exc))
202
202
203 def lock(self):
203 def lock(self):
204 return self._repo.lock()
204 return self._repo.lock()
205
205
206 def addchangegroup(self, cg, source, url):
206 def addchangegroup(self, cg, source, url):
207 return cg.apply(self._repo, source, url)
207 return cg.apply(self._repo, source, url)
208
208
209 def pushkey(self, namespace, key, old, new):
209 def pushkey(self, namespace, key, old, new):
210 return self._repo.pushkey(namespace, key, old, new)
210 return self._repo.pushkey(namespace, key, old, new)
211
211
212 def listkeys(self, namespace):
212 def listkeys(self, namespace):
213 return self._repo.listkeys(namespace)
213 return self._repo.listkeys(namespace)
214
214
215 def debugwireargs(self, one, two, three=None, four=None, five=None):
215 def debugwireargs(self, one, two, three=None, four=None, five=None):
216 '''used to test argument passing over the wire'''
216 '''used to test argument passing over the wire'''
217 return "%s %s %s %s %s" % (one, two, three, four, five)
217 return "%s %s %s %s %s" % (one, two, three, four, five)
218
218
219 class locallegacypeer(localpeer):
219 class locallegacypeer(localpeer):
220 '''peer extension which implements legacy methods too; used for tests with
220 '''peer extension which implements legacy methods too; used for tests with
221 restricted capabilities'''
221 restricted capabilities'''
222
222
223 def __init__(self, repo):
223 def __init__(self, repo):
224 localpeer.__init__(self, repo, caps=legacycaps)
224 localpeer.__init__(self, repo, caps=legacycaps)
225
225
226 def branches(self, nodes):
226 def branches(self, nodes):
227 return self._repo.branches(nodes)
227 return self._repo.branches(nodes)
228
228
229 def between(self, pairs):
229 def between(self, pairs):
230 return self._repo.between(pairs)
230 return self._repo.between(pairs)
231
231
232 def changegroup(self, basenodes, source):
232 def changegroup(self, basenodes, source):
233 return changegroup.changegroup(self._repo, basenodes, source)
233 return changegroup.changegroup(self._repo, basenodes, source)
234
234
235 def changegroupsubset(self, bases, heads, source):
235 def changegroupsubset(self, bases, heads, source):
236 return changegroup.changegroupsubset(self._repo, bases, heads, source)
236 return changegroup.changegroupsubset(self._repo, bases, heads, source)
237
237
238 class localrepository(object):
238 class localrepository(object):
239
239
240 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
240 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
241 'manifestv2'))
241 'manifestv2'))
242 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
242 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
243 'dotencode'))
243 'dotencode'))
244 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
244 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
245 filtername = None
245 filtername = None
246
246
247 # a list of (ui, featureset) functions.
247 # a list of (ui, featureset) functions.
248 # only functions defined in module of enabled extensions are invoked
248 # only functions defined in module of enabled extensions are invoked
249 featuresetupfuncs = set()
249 featuresetupfuncs = set()
250
250
251 def __init__(self, baseui, path=None, create=False):
251 def __init__(self, baseui, path=None, create=False):
252 self.requirements = set()
252 self.requirements = set()
253 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
253 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
254 self.wopener = self.wvfs
254 self.wopener = self.wvfs
255 self.root = self.wvfs.base
255 self.root = self.wvfs.base
256 self.path = self.wvfs.join(".hg")
256 self.path = self.wvfs.join(".hg")
257 self.origroot = path
257 self.origroot = path
258 self.auditor = pathutil.pathauditor(self.root, self._checknested)
258 self.auditor = pathutil.pathauditor(self.root, self._checknested)
259 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
259 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
260 realfs=False)
260 realfs=False)
261 self.vfs = scmutil.vfs(self.path)
261 self.vfs = scmutil.vfs(self.path)
262 self.opener = self.vfs
262 self.opener = self.vfs
263 self.baseui = baseui
263 self.baseui = baseui
264 self.ui = baseui.copy()
264 self.ui = baseui.copy()
265 self.ui.copy = baseui.copy # prevent copying repo configuration
265 self.ui.copy = baseui.copy # prevent copying repo configuration
266 # A list of callback to shape the phase if no data were found.
266 # A list of callback to shape the phase if no data were found.
267 # Callback are in the form: func(repo, roots) --> processed root.
267 # Callback are in the form: func(repo, roots) --> processed root.
268 # This list it to be filled by extension during repo setup
268 # This list it to be filled by extension during repo setup
269 self._phasedefaults = []
269 self._phasedefaults = []
270 try:
270 try:
271 self.ui.readconfig(self.join("hgrc"), self.root)
271 self.ui.readconfig(self.join("hgrc"), self.root)
272 extensions.loadall(self.ui)
272 extensions.loadall(self.ui)
273 except IOError:
273 except IOError:
274 pass
274 pass
275
275
276 if self.featuresetupfuncs:
276 if self.featuresetupfuncs:
277 self.supported = set(self._basesupported) # use private copy
277 self.supported = set(self._basesupported) # use private copy
278 extmods = set(m.__name__ for n, m
278 extmods = set(m.__name__ for n, m
279 in extensions.extensions(self.ui))
279 in extensions.extensions(self.ui))
280 for setupfunc in self.featuresetupfuncs:
280 for setupfunc in self.featuresetupfuncs:
281 if setupfunc.__module__ in extmods:
281 if setupfunc.__module__ in extmods:
282 setupfunc(self.ui, self.supported)
282 setupfunc(self.ui, self.supported)
283 else:
283 else:
284 self.supported = self._basesupported
284 self.supported = self._basesupported
285
285
286 if not self.vfs.isdir():
286 if not self.vfs.isdir():
287 if create:
287 if create:
288 self.requirements = newreporequirements(self)
288 self.requirements = newreporequirements(self)
289
289
290 if not self.wvfs.exists():
290 if not self.wvfs.exists():
291 self.wvfs.makedirs()
291 self.wvfs.makedirs()
292 self.vfs.makedir(notindexed=True)
292 self.vfs.makedir(notindexed=True)
293
293
294 if 'store' in self.requirements:
294 if 'store' in self.requirements:
295 self.vfs.mkdir("store")
295 self.vfs.mkdir("store")
296
296
297 # create an invalid changelog
297 # create an invalid changelog
298 self.vfs.append(
298 self.vfs.append(
299 "00changelog.i",
299 "00changelog.i",
300 '\0\0\0\2' # represents revlogv2
300 '\0\0\0\2' # represents revlogv2
301 ' dummy changelog to prevent using the old repo layout'
301 ' dummy changelog to prevent using the old repo layout'
302 )
302 )
303 else:
303 else:
304 raise error.RepoError(_("repository %s not found") % path)
304 raise error.RepoError(_("repository %s not found") % path)
305 elif create:
305 elif create:
306 raise error.RepoError(_("repository %s already exists") % path)
306 raise error.RepoError(_("repository %s already exists") % path)
307 else:
307 else:
308 try:
308 try:
309 self.requirements = scmutil.readrequires(
309 self.requirements = scmutil.readrequires(
310 self.vfs, self.supported)
310 self.vfs, self.supported)
311 except IOError as inst:
311 except IOError as inst:
312 if inst.errno != errno.ENOENT:
312 if inst.errno != errno.ENOENT:
313 raise
313 raise
314
314
315 self.sharedpath = self.path
315 self.sharedpath = self.path
316 try:
316 try:
317 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
317 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
318 realpath=True)
318 realpath=True)
319 s = vfs.base
319 s = vfs.base
320 if not vfs.exists():
320 if not vfs.exists():
321 raise error.RepoError(
321 raise error.RepoError(
322 _('.hg/sharedpath points to nonexistent directory %s') % s)
322 _('.hg/sharedpath points to nonexistent directory %s') % s)
323 self.sharedpath = s
323 self.sharedpath = s
324 except IOError as inst:
324 except IOError as inst:
325 if inst.errno != errno.ENOENT:
325 if inst.errno != errno.ENOENT:
326 raise
326 raise
327
327
328 self.store = store.store(
328 self.store = store.store(
329 self.requirements, self.sharedpath, scmutil.vfs)
329 self.requirements, self.sharedpath, scmutil.vfs)
330 self.spath = self.store.path
330 self.spath = self.store.path
331 self.svfs = self.store.vfs
331 self.svfs = self.store.vfs
332 self.sjoin = self.store.join
332 self.sjoin = self.store.join
333 self.vfs.createmode = self.store.createmode
333 self.vfs.createmode = self.store.createmode
334 self._applyopenerreqs()
334 self._applyopenerreqs()
335 if create:
335 if create:
336 self._writerequirements()
336 self._writerequirements()
337
337
338 self._dirstatevalidatewarned = False
338 self._dirstatevalidatewarned = False
339
339
340 self._branchcaches = {}
340 self._branchcaches = {}
341 self._revbranchcache = None
341 self._revbranchcache = None
342 self.filterpats = {}
342 self.filterpats = {}
343 self._datafilters = {}
343 self._datafilters = {}
344 self._transref = self._lockref = self._wlockref = None
344 self._transref = self._lockref = self._wlockref = None
345
345
346 # A cache for various files under .hg/ that tracks file changes,
346 # A cache for various files under .hg/ that tracks file changes,
347 # (used by the filecache decorator)
347 # (used by the filecache decorator)
348 #
348 #
349 # Maps a property name to its util.filecacheentry
349 # Maps a property name to its util.filecacheentry
350 self._filecache = {}
350 self._filecache = {}
351
351
352 # hold sets of revision to be filtered
352 # hold sets of revision to be filtered
353 # should be cleared when something might have changed the filter value:
353 # should be cleared when something might have changed the filter value:
354 # - new changesets,
354 # - new changesets,
355 # - phase change,
355 # - phase change,
356 # - new obsolescence marker,
356 # - new obsolescence marker,
357 # - working directory parent change,
357 # - working directory parent change,
358 # - bookmark changes
358 # - bookmark changes
359 self.filteredrevcache = {}
359 self.filteredrevcache = {}
360
360
361 # generic mapping between names and nodes
361 # generic mapping between names and nodes
362 self.names = namespaces.namespaces()
362 self.names = namespaces.namespaces()
363
363
364 def close(self):
364 def close(self):
365 self._writecaches()
365 self._writecaches()
366
366
367 def _writecaches(self):
367 def _writecaches(self):
368 if self._revbranchcache:
368 if self._revbranchcache:
369 self._revbranchcache.write()
369 self._revbranchcache.write()
370
370
371 def _restrictcapabilities(self, caps):
371 def _restrictcapabilities(self, caps):
372 if self.ui.configbool('experimental', 'bundle2-advertise', True):
372 if self.ui.configbool('experimental', 'bundle2-advertise', True):
373 caps = set(caps)
373 caps = set(caps)
374 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
374 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
375 caps.add('bundle2=' + urlreq.quote(capsblob))
375 caps.add('bundle2=' + urlreq.quote(capsblob))
376 return caps
376 return caps
377
377
378 def _applyopenerreqs(self):
378 def _applyopenerreqs(self):
379 self.svfs.options = dict((r, 1) for r in self.requirements
379 self.svfs.options = dict((r, 1) for r in self.requirements
380 if r in self.openerreqs)
380 if r in self.openerreqs)
381 # experimental config: format.chunkcachesize
381 # experimental config: format.chunkcachesize
382 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
382 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
383 if chunkcachesize is not None:
383 if chunkcachesize is not None:
384 self.svfs.options['chunkcachesize'] = chunkcachesize
384 self.svfs.options['chunkcachesize'] = chunkcachesize
385 # experimental config: format.maxchainlen
385 # experimental config: format.maxchainlen
386 maxchainlen = self.ui.configint('format', 'maxchainlen')
386 maxchainlen = self.ui.configint('format', 'maxchainlen')
387 if maxchainlen is not None:
387 if maxchainlen is not None:
388 self.svfs.options['maxchainlen'] = maxchainlen
388 self.svfs.options['maxchainlen'] = maxchainlen
389 # experimental config: format.manifestcachesize
389 # experimental config: format.manifestcachesize
390 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
390 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
391 if manifestcachesize is not None:
391 if manifestcachesize is not None:
392 self.svfs.options['manifestcachesize'] = manifestcachesize
392 self.svfs.options['manifestcachesize'] = manifestcachesize
393 # experimental config: format.aggressivemergedeltas
393 # experimental config: format.aggressivemergedeltas
394 aggressivemergedeltas = self.ui.configbool('format',
394 aggressivemergedeltas = self.ui.configbool('format',
395 'aggressivemergedeltas', False)
395 'aggressivemergedeltas', False)
396 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
396 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
397 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
397 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
398
398
399 def _writerequirements(self):
399 def _writerequirements(self):
400 scmutil.writerequires(self.vfs, self.requirements)
400 scmutil.writerequires(self.vfs, self.requirements)
401
401
402 def _checknested(self, path):
402 def _checknested(self, path):
403 """Determine if path is a legal nested repository."""
403 """Determine if path is a legal nested repository."""
404 if not path.startswith(self.root):
404 if not path.startswith(self.root):
405 return False
405 return False
406 subpath = path[len(self.root) + 1:]
406 subpath = path[len(self.root) + 1:]
407 normsubpath = util.pconvert(subpath)
407 normsubpath = util.pconvert(subpath)
408
408
409 # XXX: Checking against the current working copy is wrong in
409 # XXX: Checking against the current working copy is wrong in
410 # the sense that it can reject things like
410 # the sense that it can reject things like
411 #
411 #
412 # $ hg cat -r 10 sub/x.txt
412 # $ hg cat -r 10 sub/x.txt
413 #
413 #
414 # if sub/ is no longer a subrepository in the working copy
414 # if sub/ is no longer a subrepository in the working copy
415 # parent revision.
415 # parent revision.
416 #
416 #
417 # However, it can of course also allow things that would have
417 # However, it can of course also allow things that would have
418 # been rejected before, such as the above cat command if sub/
418 # been rejected before, such as the above cat command if sub/
419 # is a subrepository now, but was a normal directory before.
419 # is a subrepository now, but was a normal directory before.
420 # The old path auditor would have rejected by mistake since it
420 # The old path auditor would have rejected by mistake since it
421 # panics when it sees sub/.hg/.
421 # panics when it sees sub/.hg/.
422 #
422 #
423 # All in all, checking against the working copy seems sensible
423 # All in all, checking against the working copy seems sensible
424 # since we want to prevent access to nested repositories on
424 # since we want to prevent access to nested repositories on
425 # the filesystem *now*.
425 # the filesystem *now*.
426 ctx = self[None]
426 ctx = self[None]
427 parts = util.splitpath(subpath)
427 parts = util.splitpath(subpath)
428 while parts:
428 while parts:
429 prefix = '/'.join(parts)
429 prefix = '/'.join(parts)
430 if prefix in ctx.substate:
430 if prefix in ctx.substate:
431 if prefix == normsubpath:
431 if prefix == normsubpath:
432 return True
432 return True
433 else:
433 else:
434 sub = ctx.sub(prefix)
434 sub = ctx.sub(prefix)
435 return sub.checknested(subpath[len(prefix) + 1:])
435 return sub.checknested(subpath[len(prefix) + 1:])
436 else:
436 else:
437 parts.pop()
437 parts.pop()
438 return False
438 return False
439
439
440 def peer(self):
440 def peer(self):
441 return localpeer(self) # not cached to avoid reference cycle
441 return localpeer(self) # not cached to avoid reference cycle
442
442
443 def unfiltered(self):
443 def unfiltered(self):
444 """Return unfiltered version of the repository
444 """Return unfiltered version of the repository
445
445
446 Intended to be overwritten by filtered repo."""
446 Intended to be overwritten by filtered repo."""
447 return self
447 return self
448
448
449 def filtered(self, name):
449 def filtered(self, name):
450 """Return a filtered version of a repository"""
450 """Return a filtered version of a repository"""
451 # build a new class with the mixin and the current class
451 # build a new class with the mixin and the current class
452 # (possibly subclass of the repo)
452 # (possibly subclass of the repo)
453 class proxycls(repoview.repoview, self.unfiltered().__class__):
453 class proxycls(repoview.repoview, self.unfiltered().__class__):
454 pass
454 pass
455 return proxycls(self, name)
455 return proxycls(self, name)
456
456
457 @repofilecache('bookmarks', 'bookmarks.current')
457 @repofilecache('bookmarks', 'bookmarks.current')
458 def _bookmarks(self):
458 def _bookmarks(self):
459 return bookmarks.bmstore(self)
459 return bookmarks.bmstore(self)
460
460
461 @property
461 @property
462 def _activebookmark(self):
462 def _activebookmark(self):
463 return self._bookmarks.active
463 return self._bookmarks.active
464
464
465 def bookmarkheads(self, bookmark):
465 def bookmarkheads(self, bookmark):
466 name = bookmark.split('@', 1)[0]
466 name = bookmark.split('@', 1)[0]
467 heads = []
467 heads = []
468 for mark, n in self._bookmarks.iteritems():
468 for mark, n in self._bookmarks.iteritems():
469 if mark.split('@', 1)[0] == name:
469 if mark.split('@', 1)[0] == name:
470 heads.append(n)
470 heads.append(n)
471 return heads
471 return heads
472
472
473 # _phaserevs and _phasesets depend on changelog. what we need is to
473 # _phaserevs and _phasesets depend on changelog. what we need is to
474 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
474 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
475 # can't be easily expressed in filecache mechanism.
475 # can't be easily expressed in filecache mechanism.
476 @storecache('phaseroots', '00changelog.i')
476 @storecache('phaseroots', '00changelog.i')
477 def _phasecache(self):
477 def _phasecache(self):
478 return phases.phasecache(self, self._phasedefaults)
478 return phases.phasecache(self, self._phasedefaults)
479
479
480 @storecache('obsstore')
480 @storecache('obsstore')
481 def obsstore(self):
481 def obsstore(self):
482 # read default format for new obsstore.
482 # read default format for new obsstore.
483 # developer config: format.obsstore-version
483 # developer config: format.obsstore-version
484 defaultformat = self.ui.configint('format', 'obsstore-version', None)
484 defaultformat = self.ui.configint('format', 'obsstore-version', None)
485 # rely on obsstore class default when possible.
485 # rely on obsstore class default when possible.
486 kwargs = {}
486 kwargs = {}
487 if defaultformat is not None:
487 if defaultformat is not None:
488 kwargs['defaultformat'] = defaultformat
488 kwargs['defaultformat'] = defaultformat
489 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
489 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
490 store = obsolete.obsstore(self.svfs, readonly=readonly,
490 store = obsolete.obsstore(self.svfs, readonly=readonly,
491 **kwargs)
491 **kwargs)
492 if store and readonly:
492 if store and readonly:
493 self.ui.warn(
493 self.ui.warn(
494 _('obsolete feature not enabled but %i markers found!\n')
494 _('obsolete feature not enabled but %i markers found!\n')
495 % len(list(store)))
495 % len(list(store)))
496 return store
496 return store
497
497
498 @storecache('00changelog.i')
498 @storecache('00changelog.i')
499 def changelog(self):
499 def changelog(self):
500 c = changelog.changelog(self.svfs)
500 c = changelog.changelog(self.svfs)
501 if 'HG_PENDING' in os.environ:
501 if 'HG_PENDING' in os.environ:
502 p = os.environ['HG_PENDING']
502 p = os.environ['HG_PENDING']
503 if p.startswith(self.root):
503 if p.startswith(self.root):
504 c.readpending('00changelog.i.a')
504 c.readpending('00changelog.i.a')
505 return c
505 return c
506
506
507 @storecache('00manifest.i')
507 @storecache('00manifest.i')
508 def manifest(self):
508 def manifest(self):
509 return self._constructmanifest()
510
511 def _constructmanifest(self):
512 # This is a temporary function while we migrate from manifest to
513 # manifestlog. It allows bundlerepo and unionrepo to intercept the
514 # manifest creation.
509 return manifest.manifest(self.svfs)
515 return manifest.manifest(self.svfs)
510
516
511 @property
517 @property
512 def manifestlog(self):
518 def manifestlog(self):
513 return manifest.manifestlog(self.svfs, self)
519 return manifest.manifestlog(self.svfs, self)
514
520
515 @repofilecache('dirstate')
521 @repofilecache('dirstate')
516 def dirstate(self):
522 def dirstate(self):
517 return dirstate.dirstate(self.vfs, self.ui, self.root,
523 return dirstate.dirstate(self.vfs, self.ui, self.root,
518 self._dirstatevalidate)
524 self._dirstatevalidate)
519
525
520 def _dirstatevalidate(self, node):
526 def _dirstatevalidate(self, node):
521 try:
527 try:
522 self.changelog.rev(node)
528 self.changelog.rev(node)
523 return node
529 return node
524 except error.LookupError:
530 except error.LookupError:
525 if not self._dirstatevalidatewarned:
531 if not self._dirstatevalidatewarned:
526 self._dirstatevalidatewarned = True
532 self._dirstatevalidatewarned = True
527 self.ui.warn(_("warning: ignoring unknown"
533 self.ui.warn(_("warning: ignoring unknown"
528 " working parent %s!\n") % short(node))
534 " working parent %s!\n") % short(node))
529 return nullid
535 return nullid
530
536
531 def __getitem__(self, changeid):
537 def __getitem__(self, changeid):
532 if changeid is None or changeid == wdirrev:
538 if changeid is None or changeid == wdirrev:
533 return context.workingctx(self)
539 return context.workingctx(self)
534 if isinstance(changeid, slice):
540 if isinstance(changeid, slice):
535 return [context.changectx(self, i)
541 return [context.changectx(self, i)
536 for i in xrange(*changeid.indices(len(self)))
542 for i in xrange(*changeid.indices(len(self)))
537 if i not in self.changelog.filteredrevs]
543 if i not in self.changelog.filteredrevs]
538 return context.changectx(self, changeid)
544 return context.changectx(self, changeid)
539
545
540 def __contains__(self, changeid):
546 def __contains__(self, changeid):
541 try:
547 try:
542 self[changeid]
548 self[changeid]
543 return True
549 return True
544 except error.RepoLookupError:
550 except error.RepoLookupError:
545 return False
551 return False
546
552
547 def __nonzero__(self):
553 def __nonzero__(self):
548 return True
554 return True
549
555
550 def __len__(self):
556 def __len__(self):
551 return len(self.changelog)
557 return len(self.changelog)
552
558
553 def __iter__(self):
559 def __iter__(self):
554 return iter(self.changelog)
560 return iter(self.changelog)
555
561
556 def revs(self, expr, *args):
562 def revs(self, expr, *args):
557 '''Find revisions matching a revset.
563 '''Find revisions matching a revset.
558
564
559 The revset is specified as a string ``expr`` that may contain
565 The revset is specified as a string ``expr`` that may contain
560 %-formatting to escape certain types. See ``revset.formatspec``.
566 %-formatting to escape certain types. See ``revset.formatspec``.
561
567
562 Revset aliases from the configuration are not expanded. To expand
568 Revset aliases from the configuration are not expanded. To expand
563 user aliases, consider calling ``scmutil.revrange()``.
569 user aliases, consider calling ``scmutil.revrange()``.
564
570
565 Returns a revset.abstractsmartset, which is a list-like interface
571 Returns a revset.abstractsmartset, which is a list-like interface
566 that contains integer revisions.
572 that contains integer revisions.
567 '''
573 '''
568 expr = revset.formatspec(expr, *args)
574 expr = revset.formatspec(expr, *args)
569 m = revset.match(None, expr)
575 m = revset.match(None, expr)
570 return m(self)
576 return m(self)
571
577
572 def set(self, expr, *args):
578 def set(self, expr, *args):
573 '''Find revisions matching a revset and emit changectx instances.
579 '''Find revisions matching a revset and emit changectx instances.
574
580
575 This is a convenience wrapper around ``revs()`` that iterates the
581 This is a convenience wrapper around ``revs()`` that iterates the
576 result and is a generator of changectx instances.
582 result and is a generator of changectx instances.
577
583
578 Revset aliases from the configuration are not expanded. To expand
584 Revset aliases from the configuration are not expanded. To expand
579 user aliases, consider calling ``scmutil.revrange()``.
585 user aliases, consider calling ``scmutil.revrange()``.
580 '''
586 '''
581 for r in self.revs(expr, *args):
587 for r in self.revs(expr, *args):
582 yield self[r]
588 yield self[r]
583
589
584 def url(self):
590 def url(self):
585 return 'file:' + self.root
591 return 'file:' + self.root
586
592
587 def hook(self, name, throw=False, **args):
593 def hook(self, name, throw=False, **args):
588 """Call a hook, passing this repo instance.
594 """Call a hook, passing this repo instance.
589
595
590 This a convenience method to aid invoking hooks. Extensions likely
596 This a convenience method to aid invoking hooks. Extensions likely
591 won't call this unless they have registered a custom hook or are
597 won't call this unless they have registered a custom hook or are
592 replacing code that is expected to call a hook.
598 replacing code that is expected to call a hook.
593 """
599 """
594 return hook.hook(self.ui, self, name, throw, **args)
600 return hook.hook(self.ui, self, name, throw, **args)
595
601
596 @unfilteredmethod
602 @unfilteredmethod
597 def _tag(self, names, node, message, local, user, date, extra=None,
603 def _tag(self, names, node, message, local, user, date, extra=None,
598 editor=False):
604 editor=False):
599 if isinstance(names, str):
605 if isinstance(names, str):
600 names = (names,)
606 names = (names,)
601
607
602 branches = self.branchmap()
608 branches = self.branchmap()
603 for name in names:
609 for name in names:
604 self.hook('pretag', throw=True, node=hex(node), tag=name,
610 self.hook('pretag', throw=True, node=hex(node), tag=name,
605 local=local)
611 local=local)
606 if name in branches:
612 if name in branches:
607 self.ui.warn(_("warning: tag %s conflicts with existing"
613 self.ui.warn(_("warning: tag %s conflicts with existing"
608 " branch name\n") % name)
614 " branch name\n") % name)
609
615
610 def writetags(fp, names, munge, prevtags):
616 def writetags(fp, names, munge, prevtags):
611 fp.seek(0, 2)
617 fp.seek(0, 2)
612 if prevtags and prevtags[-1] != '\n':
618 if prevtags and prevtags[-1] != '\n':
613 fp.write('\n')
619 fp.write('\n')
614 for name in names:
620 for name in names:
615 if munge:
621 if munge:
616 m = munge(name)
622 m = munge(name)
617 else:
623 else:
618 m = name
624 m = name
619
625
620 if (self._tagscache.tagtypes and
626 if (self._tagscache.tagtypes and
621 name in self._tagscache.tagtypes):
627 name in self._tagscache.tagtypes):
622 old = self.tags().get(name, nullid)
628 old = self.tags().get(name, nullid)
623 fp.write('%s %s\n' % (hex(old), m))
629 fp.write('%s %s\n' % (hex(old), m))
624 fp.write('%s %s\n' % (hex(node), m))
630 fp.write('%s %s\n' % (hex(node), m))
625 fp.close()
631 fp.close()
626
632
627 prevtags = ''
633 prevtags = ''
628 if local:
634 if local:
629 try:
635 try:
630 fp = self.vfs('localtags', 'r+')
636 fp = self.vfs('localtags', 'r+')
631 except IOError:
637 except IOError:
632 fp = self.vfs('localtags', 'a')
638 fp = self.vfs('localtags', 'a')
633 else:
639 else:
634 prevtags = fp.read()
640 prevtags = fp.read()
635
641
636 # local tags are stored in the current charset
642 # local tags are stored in the current charset
637 writetags(fp, names, None, prevtags)
643 writetags(fp, names, None, prevtags)
638 for name in names:
644 for name in names:
639 self.hook('tag', node=hex(node), tag=name, local=local)
645 self.hook('tag', node=hex(node), tag=name, local=local)
640 return
646 return
641
647
642 try:
648 try:
643 fp = self.wfile('.hgtags', 'rb+')
649 fp = self.wfile('.hgtags', 'rb+')
644 except IOError as e:
650 except IOError as e:
645 if e.errno != errno.ENOENT:
651 if e.errno != errno.ENOENT:
646 raise
652 raise
647 fp = self.wfile('.hgtags', 'ab')
653 fp = self.wfile('.hgtags', 'ab')
648 else:
654 else:
649 prevtags = fp.read()
655 prevtags = fp.read()
650
656
651 # committed tags are stored in UTF-8
657 # committed tags are stored in UTF-8
652 writetags(fp, names, encoding.fromlocal, prevtags)
658 writetags(fp, names, encoding.fromlocal, prevtags)
653
659
654 fp.close()
660 fp.close()
655
661
656 self.invalidatecaches()
662 self.invalidatecaches()
657
663
658 if '.hgtags' not in self.dirstate:
664 if '.hgtags' not in self.dirstate:
659 self[None].add(['.hgtags'])
665 self[None].add(['.hgtags'])
660
666
661 m = matchmod.exact(self.root, '', ['.hgtags'])
667 m = matchmod.exact(self.root, '', ['.hgtags'])
662 tagnode = self.commit(message, user, date, extra=extra, match=m,
668 tagnode = self.commit(message, user, date, extra=extra, match=m,
663 editor=editor)
669 editor=editor)
664
670
665 for name in names:
671 for name in names:
666 self.hook('tag', node=hex(node), tag=name, local=local)
672 self.hook('tag', node=hex(node), tag=name, local=local)
667
673
668 return tagnode
674 return tagnode
669
675
670 def tag(self, names, node, message, local, user, date, editor=False):
676 def tag(self, names, node, message, local, user, date, editor=False):
671 '''tag a revision with one or more symbolic names.
677 '''tag a revision with one or more symbolic names.
672
678
673 names is a list of strings or, when adding a single tag, names may be a
679 names is a list of strings or, when adding a single tag, names may be a
674 string.
680 string.
675
681
676 if local is True, the tags are stored in a per-repository file.
682 if local is True, the tags are stored in a per-repository file.
677 otherwise, they are stored in the .hgtags file, and a new
683 otherwise, they are stored in the .hgtags file, and a new
678 changeset is committed with the change.
684 changeset is committed with the change.
679
685
680 keyword arguments:
686 keyword arguments:
681
687
682 local: whether to store tags in non-version-controlled file
688 local: whether to store tags in non-version-controlled file
683 (default False)
689 (default False)
684
690
685 message: commit message to use if committing
691 message: commit message to use if committing
686
692
687 user: name of user to use if committing
693 user: name of user to use if committing
688
694
689 date: date tuple to use if committing'''
695 date: date tuple to use if committing'''
690
696
691 if not local:
697 if not local:
692 m = matchmod.exact(self.root, '', ['.hgtags'])
698 m = matchmod.exact(self.root, '', ['.hgtags'])
693 if any(self.status(match=m, unknown=True, ignored=True)):
699 if any(self.status(match=m, unknown=True, ignored=True)):
694 raise error.Abort(_('working copy of .hgtags is changed'),
700 raise error.Abort(_('working copy of .hgtags is changed'),
695 hint=_('please commit .hgtags manually'))
701 hint=_('please commit .hgtags manually'))
696
702
697 self.tags() # instantiate the cache
703 self.tags() # instantiate the cache
698 self._tag(names, node, message, local, user, date, editor=editor)
704 self._tag(names, node, message, local, user, date, editor=editor)
699
705
700 @filteredpropertycache
706 @filteredpropertycache
701 def _tagscache(self):
707 def _tagscache(self):
702 '''Returns a tagscache object that contains various tags related
708 '''Returns a tagscache object that contains various tags related
703 caches.'''
709 caches.'''
704
710
705 # This simplifies its cache management by having one decorated
711 # This simplifies its cache management by having one decorated
706 # function (this one) and the rest simply fetch things from it.
712 # function (this one) and the rest simply fetch things from it.
707 class tagscache(object):
713 class tagscache(object):
708 def __init__(self):
714 def __init__(self):
709 # These two define the set of tags for this repository. tags
715 # These two define the set of tags for this repository. tags
710 # maps tag name to node; tagtypes maps tag name to 'global' or
716 # maps tag name to node; tagtypes maps tag name to 'global' or
711 # 'local'. (Global tags are defined by .hgtags across all
717 # 'local'. (Global tags are defined by .hgtags across all
712 # heads, and local tags are defined in .hg/localtags.)
718 # heads, and local tags are defined in .hg/localtags.)
713 # They constitute the in-memory cache of tags.
719 # They constitute the in-memory cache of tags.
714 self.tags = self.tagtypes = None
720 self.tags = self.tagtypes = None
715
721
716 self.nodetagscache = self.tagslist = None
722 self.nodetagscache = self.tagslist = None
717
723
718 cache = tagscache()
724 cache = tagscache()
719 cache.tags, cache.tagtypes = self._findtags()
725 cache.tags, cache.tagtypes = self._findtags()
720
726
721 return cache
727 return cache
722
728
723 def tags(self):
729 def tags(self):
724 '''return a mapping of tag to node'''
730 '''return a mapping of tag to node'''
725 t = {}
731 t = {}
726 if self.changelog.filteredrevs:
732 if self.changelog.filteredrevs:
727 tags, tt = self._findtags()
733 tags, tt = self._findtags()
728 else:
734 else:
729 tags = self._tagscache.tags
735 tags = self._tagscache.tags
730 for k, v in tags.iteritems():
736 for k, v in tags.iteritems():
731 try:
737 try:
732 # ignore tags to unknown nodes
738 # ignore tags to unknown nodes
733 self.changelog.rev(v)
739 self.changelog.rev(v)
734 t[k] = v
740 t[k] = v
735 except (error.LookupError, ValueError):
741 except (error.LookupError, ValueError):
736 pass
742 pass
737 return t
743 return t
738
744
739 def _findtags(self):
745 def _findtags(self):
740 '''Do the hard work of finding tags. Return a pair of dicts
746 '''Do the hard work of finding tags. Return a pair of dicts
741 (tags, tagtypes) where tags maps tag name to node, and tagtypes
747 (tags, tagtypes) where tags maps tag name to node, and tagtypes
742 maps tag name to a string like \'global\' or \'local\'.
748 maps tag name to a string like \'global\' or \'local\'.
743 Subclasses or extensions are free to add their own tags, but
749 Subclasses or extensions are free to add their own tags, but
744 should be aware that the returned dicts will be retained for the
750 should be aware that the returned dicts will be retained for the
745 duration of the localrepo object.'''
751 duration of the localrepo object.'''
746
752
747 # XXX what tagtype should subclasses/extensions use? Currently
753 # XXX what tagtype should subclasses/extensions use? Currently
748 # mq and bookmarks add tags, but do not set the tagtype at all.
754 # mq and bookmarks add tags, but do not set the tagtype at all.
749 # Should each extension invent its own tag type? Should there
755 # Should each extension invent its own tag type? Should there
750 # be one tagtype for all such "virtual" tags? Or is the status
756 # be one tagtype for all such "virtual" tags? Or is the status
751 # quo fine?
757 # quo fine?
752
758
753 alltags = {} # map tag name to (node, hist)
759 alltags = {} # map tag name to (node, hist)
754 tagtypes = {}
760 tagtypes = {}
755
761
756 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
762 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
757 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
763 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
758
764
759 # Build the return dicts. Have to re-encode tag names because
765 # Build the return dicts. Have to re-encode tag names because
760 # the tags module always uses UTF-8 (in order not to lose info
766 # the tags module always uses UTF-8 (in order not to lose info
761 # writing to the cache), but the rest of Mercurial wants them in
767 # writing to the cache), but the rest of Mercurial wants them in
762 # local encoding.
768 # local encoding.
763 tags = {}
769 tags = {}
764 for (name, (node, hist)) in alltags.iteritems():
770 for (name, (node, hist)) in alltags.iteritems():
765 if node != nullid:
771 if node != nullid:
766 tags[encoding.tolocal(name)] = node
772 tags[encoding.tolocal(name)] = node
767 tags['tip'] = self.changelog.tip()
773 tags['tip'] = self.changelog.tip()
768 tagtypes = dict([(encoding.tolocal(name), value)
774 tagtypes = dict([(encoding.tolocal(name), value)
769 for (name, value) in tagtypes.iteritems()])
775 for (name, value) in tagtypes.iteritems()])
770 return (tags, tagtypes)
776 return (tags, tagtypes)
771
777
772 def tagtype(self, tagname):
778 def tagtype(self, tagname):
773 '''
779 '''
774 return the type of the given tag. result can be:
780 return the type of the given tag. result can be:
775
781
776 'local' : a local tag
782 'local' : a local tag
777 'global' : a global tag
783 'global' : a global tag
778 None : tag does not exist
784 None : tag does not exist
779 '''
785 '''
780
786
781 return self._tagscache.tagtypes.get(tagname)
787 return self._tagscache.tagtypes.get(tagname)
782
788
783 def tagslist(self):
789 def tagslist(self):
784 '''return a list of tags ordered by revision'''
790 '''return a list of tags ordered by revision'''
785 if not self._tagscache.tagslist:
791 if not self._tagscache.tagslist:
786 l = []
792 l = []
787 for t, n in self.tags().iteritems():
793 for t, n in self.tags().iteritems():
788 l.append((self.changelog.rev(n), t, n))
794 l.append((self.changelog.rev(n), t, n))
789 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
795 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
790
796
791 return self._tagscache.tagslist
797 return self._tagscache.tagslist
792
798
793 def nodetags(self, node):
799 def nodetags(self, node):
794 '''return the tags associated with a node'''
800 '''return the tags associated with a node'''
795 if not self._tagscache.nodetagscache:
801 if not self._tagscache.nodetagscache:
796 nodetagscache = {}
802 nodetagscache = {}
797 for t, n in self._tagscache.tags.iteritems():
803 for t, n in self._tagscache.tags.iteritems():
798 nodetagscache.setdefault(n, []).append(t)
804 nodetagscache.setdefault(n, []).append(t)
799 for tags in nodetagscache.itervalues():
805 for tags in nodetagscache.itervalues():
800 tags.sort()
806 tags.sort()
801 self._tagscache.nodetagscache = nodetagscache
807 self._tagscache.nodetagscache = nodetagscache
802 return self._tagscache.nodetagscache.get(node, [])
808 return self._tagscache.nodetagscache.get(node, [])
803
809
804 def nodebookmarks(self, node):
810 def nodebookmarks(self, node):
805 """return the list of bookmarks pointing to the specified node"""
811 """return the list of bookmarks pointing to the specified node"""
806 marks = []
812 marks = []
807 for bookmark, n in self._bookmarks.iteritems():
813 for bookmark, n in self._bookmarks.iteritems():
808 if n == node:
814 if n == node:
809 marks.append(bookmark)
815 marks.append(bookmark)
810 return sorted(marks)
816 return sorted(marks)
811
817
812 def branchmap(self):
818 def branchmap(self):
813 '''returns a dictionary {branch: [branchheads]} with branchheads
819 '''returns a dictionary {branch: [branchheads]} with branchheads
814 ordered by increasing revision number'''
820 ordered by increasing revision number'''
815 branchmap.updatecache(self)
821 branchmap.updatecache(self)
816 return self._branchcaches[self.filtername]
822 return self._branchcaches[self.filtername]
817
823
818 @unfilteredmethod
824 @unfilteredmethod
819 def revbranchcache(self):
825 def revbranchcache(self):
820 if not self._revbranchcache:
826 if not self._revbranchcache:
821 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
827 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
822 return self._revbranchcache
828 return self._revbranchcache
823
829
824 def branchtip(self, branch, ignoremissing=False):
830 def branchtip(self, branch, ignoremissing=False):
825 '''return the tip node for a given branch
831 '''return the tip node for a given branch
826
832
827 If ignoremissing is True, then this method will not raise an error.
833 If ignoremissing is True, then this method will not raise an error.
828 This is helpful for callers that only expect None for a missing branch
834 This is helpful for callers that only expect None for a missing branch
829 (e.g. namespace).
835 (e.g. namespace).
830
836
831 '''
837 '''
832 try:
838 try:
833 return self.branchmap().branchtip(branch)
839 return self.branchmap().branchtip(branch)
834 except KeyError:
840 except KeyError:
835 if not ignoremissing:
841 if not ignoremissing:
836 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
842 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
837 else:
843 else:
838 pass
844 pass
839
845
840 def lookup(self, key):
846 def lookup(self, key):
841 return self[key].node()
847 return self[key].node()
842
848
843 def lookupbranch(self, key, remote=None):
849 def lookupbranch(self, key, remote=None):
844 repo = remote or self
850 repo = remote or self
845 if key in repo.branchmap():
851 if key in repo.branchmap():
846 return key
852 return key
847
853
848 repo = (remote and remote.local()) and remote or self
854 repo = (remote and remote.local()) and remote or self
849 return repo[key].branch()
855 return repo[key].branch()
850
856
851 def known(self, nodes):
857 def known(self, nodes):
852 cl = self.changelog
858 cl = self.changelog
853 nm = cl.nodemap
859 nm = cl.nodemap
854 filtered = cl.filteredrevs
860 filtered = cl.filteredrevs
855 result = []
861 result = []
856 for n in nodes:
862 for n in nodes:
857 r = nm.get(n)
863 r = nm.get(n)
858 resp = not (r is None or r in filtered)
864 resp = not (r is None or r in filtered)
859 result.append(resp)
865 result.append(resp)
860 return result
866 return result
861
867
862 def local(self):
868 def local(self):
863 return self
869 return self
864
870
865 def publishing(self):
871 def publishing(self):
866 # it's safe (and desirable) to trust the publish flag unconditionally
872 # it's safe (and desirable) to trust the publish flag unconditionally
867 # so that we don't finalize changes shared between users via ssh or nfs
873 # so that we don't finalize changes shared between users via ssh or nfs
868 return self.ui.configbool('phases', 'publish', True, untrusted=True)
874 return self.ui.configbool('phases', 'publish', True, untrusted=True)
869
875
870 def cancopy(self):
876 def cancopy(self):
871 # so statichttprepo's override of local() works
877 # so statichttprepo's override of local() works
872 if not self.local():
878 if not self.local():
873 return False
879 return False
874 if not self.publishing():
880 if not self.publishing():
875 return True
881 return True
876 # if publishing we can't copy if there is filtered content
882 # if publishing we can't copy if there is filtered content
877 return not self.filtered('visible').changelog.filteredrevs
883 return not self.filtered('visible').changelog.filteredrevs
878
884
879 def shared(self):
885 def shared(self):
880 '''the type of shared repository (None if not shared)'''
886 '''the type of shared repository (None if not shared)'''
881 if self.sharedpath != self.path:
887 if self.sharedpath != self.path:
882 return 'store'
888 return 'store'
883 return None
889 return None
884
890
885 def join(self, f, *insidef):
891 def join(self, f, *insidef):
886 return self.vfs.join(os.path.join(f, *insidef))
892 return self.vfs.join(os.path.join(f, *insidef))
887
893
888 def wjoin(self, f, *insidef):
894 def wjoin(self, f, *insidef):
889 return self.vfs.reljoin(self.root, f, *insidef)
895 return self.vfs.reljoin(self.root, f, *insidef)
890
896
891 def file(self, f):
897 def file(self, f):
892 if f[0] == '/':
898 if f[0] == '/':
893 f = f[1:]
899 f = f[1:]
894 return filelog.filelog(self.svfs, f)
900 return filelog.filelog(self.svfs, f)
895
901
896 def changectx(self, changeid):
902 def changectx(self, changeid):
897 return self[changeid]
903 return self[changeid]
898
904
899 def setparents(self, p1, p2=nullid):
905 def setparents(self, p1, p2=nullid):
900 self.dirstate.beginparentchange()
906 self.dirstate.beginparentchange()
901 copies = self.dirstate.setparents(p1, p2)
907 copies = self.dirstate.setparents(p1, p2)
902 pctx = self[p1]
908 pctx = self[p1]
903 if copies:
909 if copies:
904 # Adjust copy records, the dirstate cannot do it, it
910 # Adjust copy records, the dirstate cannot do it, it
905 # requires access to parents manifests. Preserve them
911 # requires access to parents manifests. Preserve them
906 # only for entries added to first parent.
912 # only for entries added to first parent.
907 for f in copies:
913 for f in copies:
908 if f not in pctx and copies[f] in pctx:
914 if f not in pctx and copies[f] in pctx:
909 self.dirstate.copy(copies[f], f)
915 self.dirstate.copy(copies[f], f)
910 if p2 == nullid:
916 if p2 == nullid:
911 for f, s in sorted(self.dirstate.copies().items()):
917 for f, s in sorted(self.dirstate.copies().items()):
912 if f not in pctx and s not in pctx:
918 if f not in pctx and s not in pctx:
913 self.dirstate.copy(None, f)
919 self.dirstate.copy(None, f)
914 self.dirstate.endparentchange()
920 self.dirstate.endparentchange()
915
921
916 def filectx(self, path, changeid=None, fileid=None):
922 def filectx(self, path, changeid=None, fileid=None):
917 """changeid can be a changeset revision, node, or tag.
923 """changeid can be a changeset revision, node, or tag.
918 fileid can be a file revision or node."""
924 fileid can be a file revision or node."""
919 return context.filectx(self, path, changeid, fileid)
925 return context.filectx(self, path, changeid, fileid)
920
926
921 def getcwd(self):
927 def getcwd(self):
922 return self.dirstate.getcwd()
928 return self.dirstate.getcwd()
923
929
924 def pathto(self, f, cwd=None):
930 def pathto(self, f, cwd=None):
925 return self.dirstate.pathto(f, cwd)
931 return self.dirstate.pathto(f, cwd)
926
932
927 def wfile(self, f, mode='r'):
933 def wfile(self, f, mode='r'):
928 return self.wvfs(f, mode)
934 return self.wvfs(f, mode)
929
935
930 def _link(self, f):
936 def _link(self, f):
931 return self.wvfs.islink(f)
937 return self.wvfs.islink(f)
932
938
933 def _loadfilter(self, filter):
939 def _loadfilter(self, filter):
934 if filter not in self.filterpats:
940 if filter not in self.filterpats:
935 l = []
941 l = []
936 for pat, cmd in self.ui.configitems(filter):
942 for pat, cmd in self.ui.configitems(filter):
937 if cmd == '!':
943 if cmd == '!':
938 continue
944 continue
939 mf = matchmod.match(self.root, '', [pat])
945 mf = matchmod.match(self.root, '', [pat])
940 fn = None
946 fn = None
941 params = cmd
947 params = cmd
942 for name, filterfn in self._datafilters.iteritems():
948 for name, filterfn in self._datafilters.iteritems():
943 if cmd.startswith(name):
949 if cmd.startswith(name):
944 fn = filterfn
950 fn = filterfn
945 params = cmd[len(name):].lstrip()
951 params = cmd[len(name):].lstrip()
946 break
952 break
947 if not fn:
953 if not fn:
948 fn = lambda s, c, **kwargs: util.filter(s, c)
954 fn = lambda s, c, **kwargs: util.filter(s, c)
949 # Wrap old filters not supporting keyword arguments
955 # Wrap old filters not supporting keyword arguments
950 if not inspect.getargspec(fn)[2]:
956 if not inspect.getargspec(fn)[2]:
951 oldfn = fn
957 oldfn = fn
952 fn = lambda s, c, **kwargs: oldfn(s, c)
958 fn = lambda s, c, **kwargs: oldfn(s, c)
953 l.append((mf, fn, params))
959 l.append((mf, fn, params))
954 self.filterpats[filter] = l
960 self.filterpats[filter] = l
955 return self.filterpats[filter]
961 return self.filterpats[filter]
956
962
957 def _filter(self, filterpats, filename, data):
963 def _filter(self, filterpats, filename, data):
958 for mf, fn, cmd in filterpats:
964 for mf, fn, cmd in filterpats:
959 if mf(filename):
965 if mf(filename):
960 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
966 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
961 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
967 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
962 break
968 break
963
969
964 return data
970 return data
965
971
966 @unfilteredpropertycache
972 @unfilteredpropertycache
967 def _encodefilterpats(self):
973 def _encodefilterpats(self):
968 return self._loadfilter('encode')
974 return self._loadfilter('encode')
969
975
970 @unfilteredpropertycache
976 @unfilteredpropertycache
971 def _decodefilterpats(self):
977 def _decodefilterpats(self):
972 return self._loadfilter('decode')
978 return self._loadfilter('decode')
973
979
974 def adddatafilter(self, name, filter):
980 def adddatafilter(self, name, filter):
975 self._datafilters[name] = filter
981 self._datafilters[name] = filter
976
982
977 def wread(self, filename):
983 def wread(self, filename):
978 if self._link(filename):
984 if self._link(filename):
979 data = self.wvfs.readlink(filename)
985 data = self.wvfs.readlink(filename)
980 else:
986 else:
981 data = self.wvfs.read(filename)
987 data = self.wvfs.read(filename)
982 return self._filter(self._encodefilterpats, filename, data)
988 return self._filter(self._encodefilterpats, filename, data)
983
989
984 def wwrite(self, filename, data, flags, backgroundclose=False):
990 def wwrite(self, filename, data, flags, backgroundclose=False):
985 """write ``data`` into ``filename`` in the working directory
991 """write ``data`` into ``filename`` in the working directory
986
992
987 This returns length of written (maybe decoded) data.
993 This returns length of written (maybe decoded) data.
988 """
994 """
989 data = self._filter(self._decodefilterpats, filename, data)
995 data = self._filter(self._decodefilterpats, filename, data)
990 if 'l' in flags:
996 if 'l' in flags:
991 self.wvfs.symlink(data, filename)
997 self.wvfs.symlink(data, filename)
992 else:
998 else:
993 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
999 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
994 if 'x' in flags:
1000 if 'x' in flags:
995 self.wvfs.setflags(filename, False, True)
1001 self.wvfs.setflags(filename, False, True)
996 return len(data)
1002 return len(data)
997
1003
998 def wwritedata(self, filename, data):
1004 def wwritedata(self, filename, data):
999 return self._filter(self._decodefilterpats, filename, data)
1005 return self._filter(self._decodefilterpats, filename, data)
1000
1006
1001 def currenttransaction(self):
1007 def currenttransaction(self):
1002 """return the current transaction or None if non exists"""
1008 """return the current transaction or None if non exists"""
1003 if self._transref:
1009 if self._transref:
1004 tr = self._transref()
1010 tr = self._transref()
1005 else:
1011 else:
1006 tr = None
1012 tr = None
1007
1013
1008 if tr and tr.running():
1014 if tr and tr.running():
1009 return tr
1015 return tr
1010 return None
1016 return None
1011
1017
1012 def transaction(self, desc, report=None):
1018 def transaction(self, desc, report=None):
1013 if (self.ui.configbool('devel', 'all-warnings')
1019 if (self.ui.configbool('devel', 'all-warnings')
1014 or self.ui.configbool('devel', 'check-locks')):
1020 or self.ui.configbool('devel', 'check-locks')):
1015 if self._currentlock(self._lockref) is None:
1021 if self._currentlock(self._lockref) is None:
1016 raise RuntimeError('programming error: transaction requires '
1022 raise RuntimeError('programming error: transaction requires '
1017 'locking')
1023 'locking')
1018 tr = self.currenttransaction()
1024 tr = self.currenttransaction()
1019 if tr is not None:
1025 if tr is not None:
1020 return tr.nest()
1026 return tr.nest()
1021
1027
1022 # abort here if the journal already exists
1028 # abort here if the journal already exists
1023 if self.svfs.exists("journal"):
1029 if self.svfs.exists("journal"):
1024 raise error.RepoError(
1030 raise error.RepoError(
1025 _("abandoned transaction found"),
1031 _("abandoned transaction found"),
1026 hint=_("run 'hg recover' to clean up transaction"))
1032 hint=_("run 'hg recover' to clean up transaction"))
1027
1033
1028 idbase = "%.40f#%f" % (random.random(), time.time())
1034 idbase = "%.40f#%f" % (random.random(), time.time())
1029 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1035 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1030 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1036 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1031
1037
1032 self._writejournal(desc)
1038 self._writejournal(desc)
1033 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1039 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1034 if report:
1040 if report:
1035 rp = report
1041 rp = report
1036 else:
1042 else:
1037 rp = self.ui.warn
1043 rp = self.ui.warn
1038 vfsmap = {'plain': self.vfs} # root of .hg/
1044 vfsmap = {'plain': self.vfs} # root of .hg/
1039 # we must avoid cyclic reference between repo and transaction.
1045 # we must avoid cyclic reference between repo and transaction.
1040 reporef = weakref.ref(self)
1046 reporef = weakref.ref(self)
1041 def validate(tr):
1047 def validate(tr):
1042 """will run pre-closing hooks"""
1048 """will run pre-closing hooks"""
1043 reporef().hook('pretxnclose', throw=True,
1049 reporef().hook('pretxnclose', throw=True,
1044 txnname=desc, **tr.hookargs)
1050 txnname=desc, **tr.hookargs)
1045 def releasefn(tr, success):
1051 def releasefn(tr, success):
1046 repo = reporef()
1052 repo = reporef()
1047 if success:
1053 if success:
1048 # this should be explicitly invoked here, because
1054 # this should be explicitly invoked here, because
1049 # in-memory changes aren't written out at closing
1055 # in-memory changes aren't written out at closing
1050 # transaction, if tr.addfilegenerator (via
1056 # transaction, if tr.addfilegenerator (via
1051 # dirstate.write or so) isn't invoked while
1057 # dirstate.write or so) isn't invoked while
1052 # transaction running
1058 # transaction running
1053 repo.dirstate.write(None)
1059 repo.dirstate.write(None)
1054 else:
1060 else:
1055 # discard all changes (including ones already written
1061 # discard all changes (including ones already written
1056 # out) in this transaction
1062 # out) in this transaction
1057 repo.dirstate.restorebackup(None, prefix='journal.')
1063 repo.dirstate.restorebackup(None, prefix='journal.')
1058
1064
1059 repo.invalidate(clearfilecache=True)
1065 repo.invalidate(clearfilecache=True)
1060
1066
1061 tr = transaction.transaction(rp, self.svfs, vfsmap,
1067 tr = transaction.transaction(rp, self.svfs, vfsmap,
1062 "journal",
1068 "journal",
1063 "undo",
1069 "undo",
1064 aftertrans(renames),
1070 aftertrans(renames),
1065 self.store.createmode,
1071 self.store.createmode,
1066 validator=validate,
1072 validator=validate,
1067 releasefn=releasefn)
1073 releasefn=releasefn)
1068
1074
1069 tr.hookargs['txnid'] = txnid
1075 tr.hookargs['txnid'] = txnid
1070 # note: writing the fncache only during finalize mean that the file is
1076 # note: writing the fncache only during finalize mean that the file is
1071 # outdated when running hooks. As fncache is used for streaming clone,
1077 # outdated when running hooks. As fncache is used for streaming clone,
1072 # this is not expected to break anything that happen during the hooks.
1078 # this is not expected to break anything that happen during the hooks.
1073 tr.addfinalize('flush-fncache', self.store.write)
1079 tr.addfinalize('flush-fncache', self.store.write)
1074 def txnclosehook(tr2):
1080 def txnclosehook(tr2):
1075 """To be run if transaction is successful, will schedule a hook run
1081 """To be run if transaction is successful, will schedule a hook run
1076 """
1082 """
1077 # Don't reference tr2 in hook() so we don't hold a reference.
1083 # Don't reference tr2 in hook() so we don't hold a reference.
1078 # This reduces memory consumption when there are multiple
1084 # This reduces memory consumption when there are multiple
1079 # transactions per lock. This can likely go away if issue5045
1085 # transactions per lock. This can likely go away if issue5045
1080 # fixes the function accumulation.
1086 # fixes the function accumulation.
1081 hookargs = tr2.hookargs
1087 hookargs = tr2.hookargs
1082
1088
1083 def hook():
1089 def hook():
1084 reporef().hook('txnclose', throw=False, txnname=desc,
1090 reporef().hook('txnclose', throw=False, txnname=desc,
1085 **hookargs)
1091 **hookargs)
1086 reporef()._afterlock(hook)
1092 reporef()._afterlock(hook)
1087 tr.addfinalize('txnclose-hook', txnclosehook)
1093 tr.addfinalize('txnclose-hook', txnclosehook)
1088 def txnaborthook(tr2):
1094 def txnaborthook(tr2):
1089 """To be run if transaction is aborted
1095 """To be run if transaction is aborted
1090 """
1096 """
1091 reporef().hook('txnabort', throw=False, txnname=desc,
1097 reporef().hook('txnabort', throw=False, txnname=desc,
1092 **tr2.hookargs)
1098 **tr2.hookargs)
1093 tr.addabort('txnabort-hook', txnaborthook)
1099 tr.addabort('txnabort-hook', txnaborthook)
1094 # avoid eager cache invalidation. in-memory data should be identical
1100 # avoid eager cache invalidation. in-memory data should be identical
1095 # to stored data if transaction has no error.
1101 # to stored data if transaction has no error.
1096 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1102 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1097 self._transref = weakref.ref(tr)
1103 self._transref = weakref.ref(tr)
1098 return tr
1104 return tr
1099
1105
1100 def _journalfiles(self):
1106 def _journalfiles(self):
1101 return ((self.svfs, 'journal'),
1107 return ((self.svfs, 'journal'),
1102 (self.vfs, 'journal.dirstate'),
1108 (self.vfs, 'journal.dirstate'),
1103 (self.vfs, 'journal.branch'),
1109 (self.vfs, 'journal.branch'),
1104 (self.vfs, 'journal.desc'),
1110 (self.vfs, 'journal.desc'),
1105 (self.vfs, 'journal.bookmarks'),
1111 (self.vfs, 'journal.bookmarks'),
1106 (self.svfs, 'journal.phaseroots'))
1112 (self.svfs, 'journal.phaseroots'))
1107
1113
1108 def undofiles(self):
1114 def undofiles(self):
1109 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1115 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1110
1116
1111 def _writejournal(self, desc):
1117 def _writejournal(self, desc):
1112 self.dirstate.savebackup(None, prefix='journal.')
1118 self.dirstate.savebackup(None, prefix='journal.')
1113 self.vfs.write("journal.branch",
1119 self.vfs.write("journal.branch",
1114 encoding.fromlocal(self.dirstate.branch()))
1120 encoding.fromlocal(self.dirstate.branch()))
1115 self.vfs.write("journal.desc",
1121 self.vfs.write("journal.desc",
1116 "%d\n%s\n" % (len(self), desc))
1122 "%d\n%s\n" % (len(self), desc))
1117 self.vfs.write("journal.bookmarks",
1123 self.vfs.write("journal.bookmarks",
1118 self.vfs.tryread("bookmarks"))
1124 self.vfs.tryread("bookmarks"))
1119 self.svfs.write("journal.phaseroots",
1125 self.svfs.write("journal.phaseroots",
1120 self.svfs.tryread("phaseroots"))
1126 self.svfs.tryread("phaseroots"))
1121
1127
1122 def recover(self):
1128 def recover(self):
1123 with self.lock():
1129 with self.lock():
1124 if self.svfs.exists("journal"):
1130 if self.svfs.exists("journal"):
1125 self.ui.status(_("rolling back interrupted transaction\n"))
1131 self.ui.status(_("rolling back interrupted transaction\n"))
1126 vfsmap = {'': self.svfs,
1132 vfsmap = {'': self.svfs,
1127 'plain': self.vfs,}
1133 'plain': self.vfs,}
1128 transaction.rollback(self.svfs, vfsmap, "journal",
1134 transaction.rollback(self.svfs, vfsmap, "journal",
1129 self.ui.warn)
1135 self.ui.warn)
1130 self.invalidate()
1136 self.invalidate()
1131 return True
1137 return True
1132 else:
1138 else:
1133 self.ui.warn(_("no interrupted transaction available\n"))
1139 self.ui.warn(_("no interrupted transaction available\n"))
1134 return False
1140 return False
1135
1141
1136 def rollback(self, dryrun=False, force=False):
1142 def rollback(self, dryrun=False, force=False):
1137 wlock = lock = dsguard = None
1143 wlock = lock = dsguard = None
1138 try:
1144 try:
1139 wlock = self.wlock()
1145 wlock = self.wlock()
1140 lock = self.lock()
1146 lock = self.lock()
1141 if self.svfs.exists("undo"):
1147 if self.svfs.exists("undo"):
1142 dsguard = cmdutil.dirstateguard(self, 'rollback')
1148 dsguard = cmdutil.dirstateguard(self, 'rollback')
1143
1149
1144 return self._rollback(dryrun, force, dsguard)
1150 return self._rollback(dryrun, force, dsguard)
1145 else:
1151 else:
1146 self.ui.warn(_("no rollback information available\n"))
1152 self.ui.warn(_("no rollback information available\n"))
1147 return 1
1153 return 1
1148 finally:
1154 finally:
1149 release(dsguard, lock, wlock)
1155 release(dsguard, lock, wlock)
1150
1156
1151 @unfilteredmethod # Until we get smarter cache management
1157 @unfilteredmethod # Until we get smarter cache management
1152 def _rollback(self, dryrun, force, dsguard):
1158 def _rollback(self, dryrun, force, dsguard):
1153 ui = self.ui
1159 ui = self.ui
1154 try:
1160 try:
1155 args = self.vfs.read('undo.desc').splitlines()
1161 args = self.vfs.read('undo.desc').splitlines()
1156 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1162 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1157 if len(args) >= 3:
1163 if len(args) >= 3:
1158 detail = args[2]
1164 detail = args[2]
1159 oldtip = oldlen - 1
1165 oldtip = oldlen - 1
1160
1166
1161 if detail and ui.verbose:
1167 if detail and ui.verbose:
1162 msg = (_('repository tip rolled back to revision %s'
1168 msg = (_('repository tip rolled back to revision %s'
1163 ' (undo %s: %s)\n')
1169 ' (undo %s: %s)\n')
1164 % (oldtip, desc, detail))
1170 % (oldtip, desc, detail))
1165 else:
1171 else:
1166 msg = (_('repository tip rolled back to revision %s'
1172 msg = (_('repository tip rolled back to revision %s'
1167 ' (undo %s)\n')
1173 ' (undo %s)\n')
1168 % (oldtip, desc))
1174 % (oldtip, desc))
1169 except IOError:
1175 except IOError:
1170 msg = _('rolling back unknown transaction\n')
1176 msg = _('rolling back unknown transaction\n')
1171 desc = None
1177 desc = None
1172
1178
1173 if not force and self['.'] != self['tip'] and desc == 'commit':
1179 if not force and self['.'] != self['tip'] and desc == 'commit':
1174 raise error.Abort(
1180 raise error.Abort(
1175 _('rollback of last commit while not checked out '
1181 _('rollback of last commit while not checked out '
1176 'may lose data'), hint=_('use -f to force'))
1182 'may lose data'), hint=_('use -f to force'))
1177
1183
1178 ui.status(msg)
1184 ui.status(msg)
1179 if dryrun:
1185 if dryrun:
1180 return 0
1186 return 0
1181
1187
1182 parents = self.dirstate.parents()
1188 parents = self.dirstate.parents()
1183 self.destroying()
1189 self.destroying()
1184 vfsmap = {'plain': self.vfs, '': self.svfs}
1190 vfsmap = {'plain': self.vfs, '': self.svfs}
1185 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1191 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1186 if self.vfs.exists('undo.bookmarks'):
1192 if self.vfs.exists('undo.bookmarks'):
1187 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1193 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1188 if self.svfs.exists('undo.phaseroots'):
1194 if self.svfs.exists('undo.phaseroots'):
1189 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1195 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1190 self.invalidate()
1196 self.invalidate()
1191
1197
1192 parentgone = (parents[0] not in self.changelog.nodemap or
1198 parentgone = (parents[0] not in self.changelog.nodemap or
1193 parents[1] not in self.changelog.nodemap)
1199 parents[1] not in self.changelog.nodemap)
1194 if parentgone:
1200 if parentgone:
1195 # prevent dirstateguard from overwriting already restored one
1201 # prevent dirstateguard from overwriting already restored one
1196 dsguard.close()
1202 dsguard.close()
1197
1203
1198 self.dirstate.restorebackup(None, prefix='undo.')
1204 self.dirstate.restorebackup(None, prefix='undo.')
1199 try:
1205 try:
1200 branch = self.vfs.read('undo.branch')
1206 branch = self.vfs.read('undo.branch')
1201 self.dirstate.setbranch(encoding.tolocal(branch))
1207 self.dirstate.setbranch(encoding.tolocal(branch))
1202 except IOError:
1208 except IOError:
1203 ui.warn(_('named branch could not be reset: '
1209 ui.warn(_('named branch could not be reset: '
1204 'current branch is still \'%s\'\n')
1210 'current branch is still \'%s\'\n')
1205 % self.dirstate.branch())
1211 % self.dirstate.branch())
1206
1212
1207 parents = tuple([p.rev() for p in self[None].parents()])
1213 parents = tuple([p.rev() for p in self[None].parents()])
1208 if len(parents) > 1:
1214 if len(parents) > 1:
1209 ui.status(_('working directory now based on '
1215 ui.status(_('working directory now based on '
1210 'revisions %d and %d\n') % parents)
1216 'revisions %d and %d\n') % parents)
1211 else:
1217 else:
1212 ui.status(_('working directory now based on '
1218 ui.status(_('working directory now based on '
1213 'revision %d\n') % parents)
1219 'revision %d\n') % parents)
1214 mergemod.mergestate.clean(self, self['.'].node())
1220 mergemod.mergestate.clean(self, self['.'].node())
1215
1221
1216 # TODO: if we know which new heads may result from this rollback, pass
1222 # TODO: if we know which new heads may result from this rollback, pass
1217 # them to destroy(), which will prevent the branchhead cache from being
1223 # them to destroy(), which will prevent the branchhead cache from being
1218 # invalidated.
1224 # invalidated.
1219 self.destroyed()
1225 self.destroyed()
1220 return 0
1226 return 0
1221
1227
1222 def invalidatecaches(self):
1228 def invalidatecaches(self):
1223
1229
1224 if '_tagscache' in vars(self):
1230 if '_tagscache' in vars(self):
1225 # can't use delattr on proxy
1231 # can't use delattr on proxy
1226 del self.__dict__['_tagscache']
1232 del self.__dict__['_tagscache']
1227
1233
1228 self.unfiltered()._branchcaches.clear()
1234 self.unfiltered()._branchcaches.clear()
1229 self.invalidatevolatilesets()
1235 self.invalidatevolatilesets()
1230
1236
1231 def invalidatevolatilesets(self):
1237 def invalidatevolatilesets(self):
1232 self.filteredrevcache.clear()
1238 self.filteredrevcache.clear()
1233 obsolete.clearobscaches(self)
1239 obsolete.clearobscaches(self)
1234
1240
1235 def invalidatedirstate(self):
1241 def invalidatedirstate(self):
1236 '''Invalidates the dirstate, causing the next call to dirstate
1242 '''Invalidates the dirstate, causing the next call to dirstate
1237 to check if it was modified since the last time it was read,
1243 to check if it was modified since the last time it was read,
1238 rereading it if it has.
1244 rereading it if it has.
1239
1245
1240 This is different to dirstate.invalidate() that it doesn't always
1246 This is different to dirstate.invalidate() that it doesn't always
1241 rereads the dirstate. Use dirstate.invalidate() if you want to
1247 rereads the dirstate. Use dirstate.invalidate() if you want to
1242 explicitly read the dirstate again (i.e. restoring it to a previous
1248 explicitly read the dirstate again (i.e. restoring it to a previous
1243 known good state).'''
1249 known good state).'''
1244 if hasunfilteredcache(self, 'dirstate'):
1250 if hasunfilteredcache(self, 'dirstate'):
1245 for k in self.dirstate._filecache:
1251 for k in self.dirstate._filecache:
1246 try:
1252 try:
1247 delattr(self.dirstate, k)
1253 delattr(self.dirstate, k)
1248 except AttributeError:
1254 except AttributeError:
1249 pass
1255 pass
1250 delattr(self.unfiltered(), 'dirstate')
1256 delattr(self.unfiltered(), 'dirstate')
1251
1257
1252 def invalidate(self, clearfilecache=False):
1258 def invalidate(self, clearfilecache=False):
1253 '''Invalidates both store and non-store parts other than dirstate
1259 '''Invalidates both store and non-store parts other than dirstate
1254
1260
1255 If a transaction is running, invalidation of store is omitted,
1261 If a transaction is running, invalidation of store is omitted,
1256 because discarding in-memory changes might cause inconsistency
1262 because discarding in-memory changes might cause inconsistency
1257 (e.g. incomplete fncache causes unintentional failure, but
1263 (e.g. incomplete fncache causes unintentional failure, but
1258 redundant one doesn't).
1264 redundant one doesn't).
1259 '''
1265 '''
1260 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1266 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1261 for k in self._filecache.keys():
1267 for k in self._filecache.keys():
1262 # dirstate is invalidated separately in invalidatedirstate()
1268 # dirstate is invalidated separately in invalidatedirstate()
1263 if k == 'dirstate':
1269 if k == 'dirstate':
1264 continue
1270 continue
1265
1271
1266 if clearfilecache:
1272 if clearfilecache:
1267 del self._filecache[k]
1273 del self._filecache[k]
1268 try:
1274 try:
1269 delattr(unfiltered, k)
1275 delattr(unfiltered, k)
1270 except AttributeError:
1276 except AttributeError:
1271 pass
1277 pass
1272 self.invalidatecaches()
1278 self.invalidatecaches()
1273 if not self.currenttransaction():
1279 if not self.currenttransaction():
1274 # TODO: Changing contents of store outside transaction
1280 # TODO: Changing contents of store outside transaction
1275 # causes inconsistency. We should make in-memory store
1281 # causes inconsistency. We should make in-memory store
1276 # changes detectable, and abort if changed.
1282 # changes detectable, and abort if changed.
1277 self.store.invalidatecaches()
1283 self.store.invalidatecaches()
1278
1284
1279 def invalidateall(self):
1285 def invalidateall(self):
1280 '''Fully invalidates both store and non-store parts, causing the
1286 '''Fully invalidates both store and non-store parts, causing the
1281 subsequent operation to reread any outside changes.'''
1287 subsequent operation to reread any outside changes.'''
1282 # extension should hook this to invalidate its caches
1288 # extension should hook this to invalidate its caches
1283 self.invalidate()
1289 self.invalidate()
1284 self.invalidatedirstate()
1290 self.invalidatedirstate()
1285
1291
1286 @unfilteredmethod
1292 @unfilteredmethod
1287 def _refreshfilecachestats(self, tr):
1293 def _refreshfilecachestats(self, tr):
1288 """Reload stats of cached files so that they are flagged as valid"""
1294 """Reload stats of cached files so that they are flagged as valid"""
1289 for k, ce in self._filecache.items():
1295 for k, ce in self._filecache.items():
1290 if k == 'dirstate' or k not in self.__dict__:
1296 if k == 'dirstate' or k not in self.__dict__:
1291 continue
1297 continue
1292 ce.refresh()
1298 ce.refresh()
1293
1299
1294 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1300 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1295 inheritchecker=None, parentenvvar=None):
1301 inheritchecker=None, parentenvvar=None):
1296 parentlock = None
1302 parentlock = None
1297 # the contents of parentenvvar are used by the underlying lock to
1303 # the contents of parentenvvar are used by the underlying lock to
1298 # determine whether it can be inherited
1304 # determine whether it can be inherited
1299 if parentenvvar is not None:
1305 if parentenvvar is not None:
1300 parentlock = os.environ.get(parentenvvar)
1306 parentlock = os.environ.get(parentenvvar)
1301 try:
1307 try:
1302 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1308 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1303 acquirefn=acquirefn, desc=desc,
1309 acquirefn=acquirefn, desc=desc,
1304 inheritchecker=inheritchecker,
1310 inheritchecker=inheritchecker,
1305 parentlock=parentlock)
1311 parentlock=parentlock)
1306 except error.LockHeld as inst:
1312 except error.LockHeld as inst:
1307 if not wait:
1313 if not wait:
1308 raise
1314 raise
1309 # show more details for new-style locks
1315 # show more details for new-style locks
1310 if ':' in inst.locker:
1316 if ':' in inst.locker:
1311 host, pid = inst.locker.split(":", 1)
1317 host, pid = inst.locker.split(":", 1)
1312 self.ui.warn(
1318 self.ui.warn(
1313 _("waiting for lock on %s held by process %r "
1319 _("waiting for lock on %s held by process %r "
1314 "on host %r\n") % (desc, pid, host))
1320 "on host %r\n") % (desc, pid, host))
1315 else:
1321 else:
1316 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1322 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1317 (desc, inst.locker))
1323 (desc, inst.locker))
1318 # default to 600 seconds timeout
1324 # default to 600 seconds timeout
1319 l = lockmod.lock(vfs, lockname,
1325 l = lockmod.lock(vfs, lockname,
1320 int(self.ui.config("ui", "timeout", "600")),
1326 int(self.ui.config("ui", "timeout", "600")),
1321 releasefn=releasefn, acquirefn=acquirefn,
1327 releasefn=releasefn, acquirefn=acquirefn,
1322 desc=desc)
1328 desc=desc)
1323 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1329 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1324 return l
1330 return l
1325
1331
1326 def _afterlock(self, callback):
1332 def _afterlock(self, callback):
1327 """add a callback to be run when the repository is fully unlocked
1333 """add a callback to be run when the repository is fully unlocked
1328
1334
1329 The callback will be executed when the outermost lock is released
1335 The callback will be executed when the outermost lock is released
1330 (with wlock being higher level than 'lock')."""
1336 (with wlock being higher level than 'lock')."""
1331 for ref in (self._wlockref, self._lockref):
1337 for ref in (self._wlockref, self._lockref):
1332 l = ref and ref()
1338 l = ref and ref()
1333 if l and l.held:
1339 if l and l.held:
1334 l.postrelease.append(callback)
1340 l.postrelease.append(callback)
1335 break
1341 break
1336 else: # no lock have been found.
1342 else: # no lock have been found.
1337 callback()
1343 callback()
1338
1344
1339 def lock(self, wait=True):
1345 def lock(self, wait=True):
1340 '''Lock the repository store (.hg/store) and return a weak reference
1346 '''Lock the repository store (.hg/store) and return a weak reference
1341 to the lock. Use this before modifying the store (e.g. committing or
1347 to the lock. Use this before modifying the store (e.g. committing or
1342 stripping). If you are opening a transaction, get a lock as well.)
1348 stripping). If you are opening a transaction, get a lock as well.)
1343
1349
1344 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1350 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1345 'wlock' first to avoid a dead-lock hazard.'''
1351 'wlock' first to avoid a dead-lock hazard.'''
1346 l = self._currentlock(self._lockref)
1352 l = self._currentlock(self._lockref)
1347 if l is not None:
1353 if l is not None:
1348 l.lock()
1354 l.lock()
1349 return l
1355 return l
1350
1356
1351 l = self._lock(self.svfs, "lock", wait, None,
1357 l = self._lock(self.svfs, "lock", wait, None,
1352 self.invalidate, _('repository %s') % self.origroot)
1358 self.invalidate, _('repository %s') % self.origroot)
1353 self._lockref = weakref.ref(l)
1359 self._lockref = weakref.ref(l)
1354 return l
1360 return l
1355
1361
1356 def _wlockchecktransaction(self):
1362 def _wlockchecktransaction(self):
1357 if self.currenttransaction() is not None:
1363 if self.currenttransaction() is not None:
1358 raise error.LockInheritanceContractViolation(
1364 raise error.LockInheritanceContractViolation(
1359 'wlock cannot be inherited in the middle of a transaction')
1365 'wlock cannot be inherited in the middle of a transaction')
1360
1366
1361 def wlock(self, wait=True):
1367 def wlock(self, wait=True):
1362 '''Lock the non-store parts of the repository (everything under
1368 '''Lock the non-store parts of the repository (everything under
1363 .hg except .hg/store) and return a weak reference to the lock.
1369 .hg except .hg/store) and return a weak reference to the lock.
1364
1370
1365 Use this before modifying files in .hg.
1371 Use this before modifying files in .hg.
1366
1372
1367 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1373 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1368 'wlock' first to avoid a dead-lock hazard.'''
1374 'wlock' first to avoid a dead-lock hazard.'''
1369 l = self._wlockref and self._wlockref()
1375 l = self._wlockref and self._wlockref()
1370 if l is not None and l.held:
1376 if l is not None and l.held:
1371 l.lock()
1377 l.lock()
1372 return l
1378 return l
1373
1379
1374 # We do not need to check for non-waiting lock acquisition. Such
1380 # We do not need to check for non-waiting lock acquisition. Such
1375 # acquisition would not cause dead-lock as they would just fail.
1381 # acquisition would not cause dead-lock as they would just fail.
1376 if wait and (self.ui.configbool('devel', 'all-warnings')
1382 if wait and (self.ui.configbool('devel', 'all-warnings')
1377 or self.ui.configbool('devel', 'check-locks')):
1383 or self.ui.configbool('devel', 'check-locks')):
1378 if self._currentlock(self._lockref) is not None:
1384 if self._currentlock(self._lockref) is not None:
1379 self.ui.develwarn('"wlock" acquired after "lock"')
1385 self.ui.develwarn('"wlock" acquired after "lock"')
1380
1386
1381 def unlock():
1387 def unlock():
1382 if self.dirstate.pendingparentchange():
1388 if self.dirstate.pendingparentchange():
1383 self.dirstate.invalidate()
1389 self.dirstate.invalidate()
1384 else:
1390 else:
1385 self.dirstate.write(None)
1391 self.dirstate.write(None)
1386
1392
1387 self._filecache['dirstate'].refresh()
1393 self._filecache['dirstate'].refresh()
1388
1394
1389 l = self._lock(self.vfs, "wlock", wait, unlock,
1395 l = self._lock(self.vfs, "wlock", wait, unlock,
1390 self.invalidatedirstate, _('working directory of %s') %
1396 self.invalidatedirstate, _('working directory of %s') %
1391 self.origroot,
1397 self.origroot,
1392 inheritchecker=self._wlockchecktransaction,
1398 inheritchecker=self._wlockchecktransaction,
1393 parentenvvar='HG_WLOCK_LOCKER')
1399 parentenvvar='HG_WLOCK_LOCKER')
1394 self._wlockref = weakref.ref(l)
1400 self._wlockref = weakref.ref(l)
1395 return l
1401 return l
1396
1402
1397 def _currentlock(self, lockref):
1403 def _currentlock(self, lockref):
1398 """Returns the lock if it's held, or None if it's not."""
1404 """Returns the lock if it's held, or None if it's not."""
1399 if lockref is None:
1405 if lockref is None:
1400 return None
1406 return None
1401 l = lockref()
1407 l = lockref()
1402 if l is None or not l.held:
1408 if l is None or not l.held:
1403 return None
1409 return None
1404 return l
1410 return l
1405
1411
1406 def currentwlock(self):
1412 def currentwlock(self):
1407 """Returns the wlock if it's held, or None if it's not."""
1413 """Returns the wlock if it's held, or None if it's not."""
1408 return self._currentlock(self._wlockref)
1414 return self._currentlock(self._wlockref)
1409
1415
1410 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1416 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1411 """
1417 """
1412 commit an individual file as part of a larger transaction
1418 commit an individual file as part of a larger transaction
1413 """
1419 """
1414
1420
1415 fname = fctx.path()
1421 fname = fctx.path()
1416 fparent1 = manifest1.get(fname, nullid)
1422 fparent1 = manifest1.get(fname, nullid)
1417 fparent2 = manifest2.get(fname, nullid)
1423 fparent2 = manifest2.get(fname, nullid)
1418 if isinstance(fctx, context.filectx):
1424 if isinstance(fctx, context.filectx):
1419 node = fctx.filenode()
1425 node = fctx.filenode()
1420 if node in [fparent1, fparent2]:
1426 if node in [fparent1, fparent2]:
1421 self.ui.debug('reusing %s filelog entry\n' % fname)
1427 self.ui.debug('reusing %s filelog entry\n' % fname)
1422 if manifest1.flags(fname) != fctx.flags():
1428 if manifest1.flags(fname) != fctx.flags():
1423 changelist.append(fname)
1429 changelist.append(fname)
1424 return node
1430 return node
1425
1431
1426 flog = self.file(fname)
1432 flog = self.file(fname)
1427 meta = {}
1433 meta = {}
1428 copy = fctx.renamed()
1434 copy = fctx.renamed()
1429 if copy and copy[0] != fname:
1435 if copy and copy[0] != fname:
1430 # Mark the new revision of this file as a copy of another
1436 # Mark the new revision of this file as a copy of another
1431 # file. This copy data will effectively act as a parent
1437 # file. This copy data will effectively act as a parent
1432 # of this new revision. If this is a merge, the first
1438 # of this new revision. If this is a merge, the first
1433 # parent will be the nullid (meaning "look up the copy data")
1439 # parent will be the nullid (meaning "look up the copy data")
1434 # and the second one will be the other parent. For example:
1440 # and the second one will be the other parent. For example:
1435 #
1441 #
1436 # 0 --- 1 --- 3 rev1 changes file foo
1442 # 0 --- 1 --- 3 rev1 changes file foo
1437 # \ / rev2 renames foo to bar and changes it
1443 # \ / rev2 renames foo to bar and changes it
1438 # \- 2 -/ rev3 should have bar with all changes and
1444 # \- 2 -/ rev3 should have bar with all changes and
1439 # should record that bar descends from
1445 # should record that bar descends from
1440 # bar in rev2 and foo in rev1
1446 # bar in rev2 and foo in rev1
1441 #
1447 #
1442 # this allows this merge to succeed:
1448 # this allows this merge to succeed:
1443 #
1449 #
1444 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1450 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1445 # \ / merging rev3 and rev4 should use bar@rev2
1451 # \ / merging rev3 and rev4 should use bar@rev2
1446 # \- 2 --- 4 as the merge base
1452 # \- 2 --- 4 as the merge base
1447 #
1453 #
1448
1454
1449 cfname = copy[0]
1455 cfname = copy[0]
1450 crev = manifest1.get(cfname)
1456 crev = manifest1.get(cfname)
1451 newfparent = fparent2
1457 newfparent = fparent2
1452
1458
1453 if manifest2: # branch merge
1459 if manifest2: # branch merge
1454 if fparent2 == nullid or crev is None: # copied on remote side
1460 if fparent2 == nullid or crev is None: # copied on remote side
1455 if cfname in manifest2:
1461 if cfname in manifest2:
1456 crev = manifest2[cfname]
1462 crev = manifest2[cfname]
1457 newfparent = fparent1
1463 newfparent = fparent1
1458
1464
1459 # Here, we used to search backwards through history to try to find
1465 # Here, we used to search backwards through history to try to find
1460 # where the file copy came from if the source of a copy was not in
1466 # where the file copy came from if the source of a copy was not in
1461 # the parent directory. However, this doesn't actually make sense to
1467 # the parent directory. However, this doesn't actually make sense to
1462 # do (what does a copy from something not in your working copy even
1468 # do (what does a copy from something not in your working copy even
1463 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1469 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1464 # the user that copy information was dropped, so if they didn't
1470 # the user that copy information was dropped, so if they didn't
1465 # expect this outcome it can be fixed, but this is the correct
1471 # expect this outcome it can be fixed, but this is the correct
1466 # behavior in this circumstance.
1472 # behavior in this circumstance.
1467
1473
1468 if crev:
1474 if crev:
1469 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1475 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1470 meta["copy"] = cfname
1476 meta["copy"] = cfname
1471 meta["copyrev"] = hex(crev)
1477 meta["copyrev"] = hex(crev)
1472 fparent1, fparent2 = nullid, newfparent
1478 fparent1, fparent2 = nullid, newfparent
1473 else:
1479 else:
1474 self.ui.warn(_("warning: can't find ancestor for '%s' "
1480 self.ui.warn(_("warning: can't find ancestor for '%s' "
1475 "copied from '%s'!\n") % (fname, cfname))
1481 "copied from '%s'!\n") % (fname, cfname))
1476
1482
1477 elif fparent1 == nullid:
1483 elif fparent1 == nullid:
1478 fparent1, fparent2 = fparent2, nullid
1484 fparent1, fparent2 = fparent2, nullid
1479 elif fparent2 != nullid:
1485 elif fparent2 != nullid:
1480 # is one parent an ancestor of the other?
1486 # is one parent an ancestor of the other?
1481 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1487 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1482 if fparent1 in fparentancestors:
1488 if fparent1 in fparentancestors:
1483 fparent1, fparent2 = fparent2, nullid
1489 fparent1, fparent2 = fparent2, nullid
1484 elif fparent2 in fparentancestors:
1490 elif fparent2 in fparentancestors:
1485 fparent2 = nullid
1491 fparent2 = nullid
1486
1492
1487 # is the file changed?
1493 # is the file changed?
1488 text = fctx.data()
1494 text = fctx.data()
1489 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1495 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1490 changelist.append(fname)
1496 changelist.append(fname)
1491 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1497 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1492 # are just the flags changed during merge?
1498 # are just the flags changed during merge?
1493 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1499 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1494 changelist.append(fname)
1500 changelist.append(fname)
1495
1501
1496 return fparent1
1502 return fparent1
1497
1503
1498 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1504 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1499 """check for commit arguments that aren't commitable"""
1505 """check for commit arguments that aren't commitable"""
1500 if match.isexact() or match.prefix():
1506 if match.isexact() or match.prefix():
1501 matched = set(status.modified + status.added + status.removed)
1507 matched = set(status.modified + status.added + status.removed)
1502
1508
1503 for f in match.files():
1509 for f in match.files():
1504 f = self.dirstate.normalize(f)
1510 f = self.dirstate.normalize(f)
1505 if f == '.' or f in matched or f in wctx.substate:
1511 if f == '.' or f in matched or f in wctx.substate:
1506 continue
1512 continue
1507 if f in status.deleted:
1513 if f in status.deleted:
1508 fail(f, _('file not found!'))
1514 fail(f, _('file not found!'))
1509 if f in vdirs: # visited directory
1515 if f in vdirs: # visited directory
1510 d = f + '/'
1516 d = f + '/'
1511 for mf in matched:
1517 for mf in matched:
1512 if mf.startswith(d):
1518 if mf.startswith(d):
1513 break
1519 break
1514 else:
1520 else:
1515 fail(f, _("no match under directory!"))
1521 fail(f, _("no match under directory!"))
1516 elif f not in self.dirstate:
1522 elif f not in self.dirstate:
1517 fail(f, _("file not tracked!"))
1523 fail(f, _("file not tracked!"))
1518
1524
1519 @unfilteredmethod
1525 @unfilteredmethod
1520 def commit(self, text="", user=None, date=None, match=None, force=False,
1526 def commit(self, text="", user=None, date=None, match=None, force=False,
1521 editor=False, extra=None):
1527 editor=False, extra=None):
1522 """Add a new revision to current repository.
1528 """Add a new revision to current repository.
1523
1529
1524 Revision information is gathered from the working directory,
1530 Revision information is gathered from the working directory,
1525 match can be used to filter the committed files. If editor is
1531 match can be used to filter the committed files. If editor is
1526 supplied, it is called to get a commit message.
1532 supplied, it is called to get a commit message.
1527 """
1533 """
1528 if extra is None:
1534 if extra is None:
1529 extra = {}
1535 extra = {}
1530
1536
1531 def fail(f, msg):
1537 def fail(f, msg):
1532 raise error.Abort('%s: %s' % (f, msg))
1538 raise error.Abort('%s: %s' % (f, msg))
1533
1539
1534 if not match:
1540 if not match:
1535 match = matchmod.always(self.root, '')
1541 match = matchmod.always(self.root, '')
1536
1542
1537 if not force:
1543 if not force:
1538 vdirs = []
1544 vdirs = []
1539 match.explicitdir = vdirs.append
1545 match.explicitdir = vdirs.append
1540 match.bad = fail
1546 match.bad = fail
1541
1547
1542 wlock = lock = tr = None
1548 wlock = lock = tr = None
1543 try:
1549 try:
1544 wlock = self.wlock()
1550 wlock = self.wlock()
1545 lock = self.lock() # for recent changelog (see issue4368)
1551 lock = self.lock() # for recent changelog (see issue4368)
1546
1552
1547 wctx = self[None]
1553 wctx = self[None]
1548 merge = len(wctx.parents()) > 1
1554 merge = len(wctx.parents()) > 1
1549
1555
1550 if not force and merge and match.ispartial():
1556 if not force and merge and match.ispartial():
1551 raise error.Abort(_('cannot partially commit a merge '
1557 raise error.Abort(_('cannot partially commit a merge '
1552 '(do not specify files or patterns)'))
1558 '(do not specify files or patterns)'))
1553
1559
1554 status = self.status(match=match, clean=force)
1560 status = self.status(match=match, clean=force)
1555 if force:
1561 if force:
1556 status.modified.extend(status.clean) # mq may commit clean files
1562 status.modified.extend(status.clean) # mq may commit clean files
1557
1563
1558 # check subrepos
1564 # check subrepos
1559 subs = []
1565 subs = []
1560 commitsubs = set()
1566 commitsubs = set()
1561 newstate = wctx.substate.copy()
1567 newstate = wctx.substate.copy()
1562 # only manage subrepos and .hgsubstate if .hgsub is present
1568 # only manage subrepos and .hgsubstate if .hgsub is present
1563 if '.hgsub' in wctx:
1569 if '.hgsub' in wctx:
1564 # we'll decide whether to track this ourselves, thanks
1570 # we'll decide whether to track this ourselves, thanks
1565 for c in status.modified, status.added, status.removed:
1571 for c in status.modified, status.added, status.removed:
1566 if '.hgsubstate' in c:
1572 if '.hgsubstate' in c:
1567 c.remove('.hgsubstate')
1573 c.remove('.hgsubstate')
1568
1574
1569 # compare current state to last committed state
1575 # compare current state to last committed state
1570 # build new substate based on last committed state
1576 # build new substate based on last committed state
1571 oldstate = wctx.p1().substate
1577 oldstate = wctx.p1().substate
1572 for s in sorted(newstate.keys()):
1578 for s in sorted(newstate.keys()):
1573 if not match(s):
1579 if not match(s):
1574 # ignore working copy, use old state if present
1580 # ignore working copy, use old state if present
1575 if s in oldstate:
1581 if s in oldstate:
1576 newstate[s] = oldstate[s]
1582 newstate[s] = oldstate[s]
1577 continue
1583 continue
1578 if not force:
1584 if not force:
1579 raise error.Abort(
1585 raise error.Abort(
1580 _("commit with new subrepo %s excluded") % s)
1586 _("commit with new subrepo %s excluded") % s)
1581 dirtyreason = wctx.sub(s).dirtyreason(True)
1587 dirtyreason = wctx.sub(s).dirtyreason(True)
1582 if dirtyreason:
1588 if dirtyreason:
1583 if not self.ui.configbool('ui', 'commitsubrepos'):
1589 if not self.ui.configbool('ui', 'commitsubrepos'):
1584 raise error.Abort(dirtyreason,
1590 raise error.Abort(dirtyreason,
1585 hint=_("use --subrepos for recursive commit"))
1591 hint=_("use --subrepos for recursive commit"))
1586 subs.append(s)
1592 subs.append(s)
1587 commitsubs.add(s)
1593 commitsubs.add(s)
1588 else:
1594 else:
1589 bs = wctx.sub(s).basestate()
1595 bs = wctx.sub(s).basestate()
1590 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1596 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1591 if oldstate.get(s, (None, None, None))[1] != bs:
1597 if oldstate.get(s, (None, None, None))[1] != bs:
1592 subs.append(s)
1598 subs.append(s)
1593
1599
1594 # check for removed subrepos
1600 # check for removed subrepos
1595 for p in wctx.parents():
1601 for p in wctx.parents():
1596 r = [s for s in p.substate if s not in newstate]
1602 r = [s for s in p.substate if s not in newstate]
1597 subs += [s for s in r if match(s)]
1603 subs += [s for s in r if match(s)]
1598 if subs:
1604 if subs:
1599 if (not match('.hgsub') and
1605 if (not match('.hgsub') and
1600 '.hgsub' in (wctx.modified() + wctx.added())):
1606 '.hgsub' in (wctx.modified() + wctx.added())):
1601 raise error.Abort(
1607 raise error.Abort(
1602 _("can't commit subrepos without .hgsub"))
1608 _("can't commit subrepos without .hgsub"))
1603 status.modified.insert(0, '.hgsubstate')
1609 status.modified.insert(0, '.hgsubstate')
1604
1610
1605 elif '.hgsub' in status.removed:
1611 elif '.hgsub' in status.removed:
1606 # clean up .hgsubstate when .hgsub is removed
1612 # clean up .hgsubstate when .hgsub is removed
1607 if ('.hgsubstate' in wctx and
1613 if ('.hgsubstate' in wctx and
1608 '.hgsubstate' not in (status.modified + status.added +
1614 '.hgsubstate' not in (status.modified + status.added +
1609 status.removed)):
1615 status.removed)):
1610 status.removed.insert(0, '.hgsubstate')
1616 status.removed.insert(0, '.hgsubstate')
1611
1617
1612 # make sure all explicit patterns are matched
1618 # make sure all explicit patterns are matched
1613 if not force:
1619 if not force:
1614 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1620 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1615
1621
1616 cctx = context.workingcommitctx(self, status,
1622 cctx = context.workingcommitctx(self, status,
1617 text, user, date, extra)
1623 text, user, date, extra)
1618
1624
1619 # internal config: ui.allowemptycommit
1625 # internal config: ui.allowemptycommit
1620 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1626 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1621 or extra.get('close') or merge or cctx.files()
1627 or extra.get('close') or merge or cctx.files()
1622 or self.ui.configbool('ui', 'allowemptycommit'))
1628 or self.ui.configbool('ui', 'allowemptycommit'))
1623 if not allowemptycommit:
1629 if not allowemptycommit:
1624 return None
1630 return None
1625
1631
1626 if merge and cctx.deleted():
1632 if merge and cctx.deleted():
1627 raise error.Abort(_("cannot commit merge with missing files"))
1633 raise error.Abort(_("cannot commit merge with missing files"))
1628
1634
1629 ms = mergemod.mergestate.read(self)
1635 ms = mergemod.mergestate.read(self)
1630
1636
1631 if list(ms.unresolved()):
1637 if list(ms.unresolved()):
1632 raise error.Abort(_("unresolved merge conflicts "
1638 raise error.Abort(_("unresolved merge conflicts "
1633 "(see 'hg help resolve')"))
1639 "(see 'hg help resolve')"))
1634 if ms.mdstate() != 's' or list(ms.driverresolved()):
1640 if ms.mdstate() != 's' or list(ms.driverresolved()):
1635 raise error.Abort(_('driver-resolved merge conflicts'),
1641 raise error.Abort(_('driver-resolved merge conflicts'),
1636 hint=_('run "hg resolve --all" to resolve'))
1642 hint=_('run "hg resolve --all" to resolve'))
1637
1643
1638 if editor:
1644 if editor:
1639 cctx._text = editor(self, cctx, subs)
1645 cctx._text = editor(self, cctx, subs)
1640 edited = (text != cctx._text)
1646 edited = (text != cctx._text)
1641
1647
1642 # Save commit message in case this transaction gets rolled back
1648 # Save commit message in case this transaction gets rolled back
1643 # (e.g. by a pretxncommit hook). Leave the content alone on
1649 # (e.g. by a pretxncommit hook). Leave the content alone on
1644 # the assumption that the user will use the same editor again.
1650 # the assumption that the user will use the same editor again.
1645 msgfn = self.savecommitmessage(cctx._text)
1651 msgfn = self.savecommitmessage(cctx._text)
1646
1652
1647 # commit subs and write new state
1653 # commit subs and write new state
1648 if subs:
1654 if subs:
1649 for s in sorted(commitsubs):
1655 for s in sorted(commitsubs):
1650 sub = wctx.sub(s)
1656 sub = wctx.sub(s)
1651 self.ui.status(_('committing subrepository %s\n') %
1657 self.ui.status(_('committing subrepository %s\n') %
1652 subrepo.subrelpath(sub))
1658 subrepo.subrelpath(sub))
1653 sr = sub.commit(cctx._text, user, date)
1659 sr = sub.commit(cctx._text, user, date)
1654 newstate[s] = (newstate[s][0], sr)
1660 newstate[s] = (newstate[s][0], sr)
1655 subrepo.writestate(self, newstate)
1661 subrepo.writestate(self, newstate)
1656
1662
1657 p1, p2 = self.dirstate.parents()
1663 p1, p2 = self.dirstate.parents()
1658 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1664 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1659 try:
1665 try:
1660 self.hook("precommit", throw=True, parent1=hookp1,
1666 self.hook("precommit", throw=True, parent1=hookp1,
1661 parent2=hookp2)
1667 parent2=hookp2)
1662 tr = self.transaction('commit')
1668 tr = self.transaction('commit')
1663 ret = self.commitctx(cctx, True)
1669 ret = self.commitctx(cctx, True)
1664 except: # re-raises
1670 except: # re-raises
1665 if edited:
1671 if edited:
1666 self.ui.write(
1672 self.ui.write(
1667 _('note: commit message saved in %s\n') % msgfn)
1673 _('note: commit message saved in %s\n') % msgfn)
1668 raise
1674 raise
1669 # update bookmarks, dirstate and mergestate
1675 # update bookmarks, dirstate and mergestate
1670 bookmarks.update(self, [p1, p2], ret)
1676 bookmarks.update(self, [p1, p2], ret)
1671 cctx.markcommitted(ret)
1677 cctx.markcommitted(ret)
1672 ms.reset()
1678 ms.reset()
1673 tr.close()
1679 tr.close()
1674
1680
1675 finally:
1681 finally:
1676 lockmod.release(tr, lock, wlock)
1682 lockmod.release(tr, lock, wlock)
1677
1683
1678 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1684 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1679 # hack for command that use a temporary commit (eg: histedit)
1685 # hack for command that use a temporary commit (eg: histedit)
1680 # temporary commit got stripped before hook release
1686 # temporary commit got stripped before hook release
1681 if self.changelog.hasnode(ret):
1687 if self.changelog.hasnode(ret):
1682 self.hook("commit", node=node, parent1=parent1,
1688 self.hook("commit", node=node, parent1=parent1,
1683 parent2=parent2)
1689 parent2=parent2)
1684 self._afterlock(commithook)
1690 self._afterlock(commithook)
1685 return ret
1691 return ret
1686
1692
1687 @unfilteredmethod
1693 @unfilteredmethod
1688 def commitctx(self, ctx, error=False):
1694 def commitctx(self, ctx, error=False):
1689 """Add a new revision to current repository.
1695 """Add a new revision to current repository.
1690 Revision information is passed via the context argument.
1696 Revision information is passed via the context argument.
1691 """
1697 """
1692
1698
1693 tr = None
1699 tr = None
1694 p1, p2 = ctx.p1(), ctx.p2()
1700 p1, p2 = ctx.p1(), ctx.p2()
1695 user = ctx.user()
1701 user = ctx.user()
1696
1702
1697 lock = self.lock()
1703 lock = self.lock()
1698 try:
1704 try:
1699 tr = self.transaction("commit")
1705 tr = self.transaction("commit")
1700 trp = weakref.proxy(tr)
1706 trp = weakref.proxy(tr)
1701
1707
1702 if ctx.files():
1708 if ctx.files():
1703 m1 = p1.manifest()
1709 m1 = p1.manifest()
1704 m2 = p2.manifest()
1710 m2 = p2.manifest()
1705 m = m1.copy()
1711 m = m1.copy()
1706
1712
1707 # check in files
1713 # check in files
1708 added = []
1714 added = []
1709 changed = []
1715 changed = []
1710 removed = list(ctx.removed())
1716 removed = list(ctx.removed())
1711 linkrev = len(self)
1717 linkrev = len(self)
1712 self.ui.note(_("committing files:\n"))
1718 self.ui.note(_("committing files:\n"))
1713 for f in sorted(ctx.modified() + ctx.added()):
1719 for f in sorted(ctx.modified() + ctx.added()):
1714 self.ui.note(f + "\n")
1720 self.ui.note(f + "\n")
1715 try:
1721 try:
1716 fctx = ctx[f]
1722 fctx = ctx[f]
1717 if fctx is None:
1723 if fctx is None:
1718 removed.append(f)
1724 removed.append(f)
1719 else:
1725 else:
1720 added.append(f)
1726 added.append(f)
1721 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1727 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1722 trp, changed)
1728 trp, changed)
1723 m.setflag(f, fctx.flags())
1729 m.setflag(f, fctx.flags())
1724 except OSError as inst:
1730 except OSError as inst:
1725 self.ui.warn(_("trouble committing %s!\n") % f)
1731 self.ui.warn(_("trouble committing %s!\n") % f)
1726 raise
1732 raise
1727 except IOError as inst:
1733 except IOError as inst:
1728 errcode = getattr(inst, 'errno', errno.ENOENT)
1734 errcode = getattr(inst, 'errno', errno.ENOENT)
1729 if error or errcode and errcode != errno.ENOENT:
1735 if error or errcode and errcode != errno.ENOENT:
1730 self.ui.warn(_("trouble committing %s!\n") % f)
1736 self.ui.warn(_("trouble committing %s!\n") % f)
1731 raise
1737 raise
1732
1738
1733 # update manifest
1739 # update manifest
1734 self.ui.note(_("committing manifest\n"))
1740 self.ui.note(_("committing manifest\n"))
1735 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1741 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1736 drop = [f for f in removed if f in m]
1742 drop = [f for f in removed if f in m]
1737 for f in drop:
1743 for f in drop:
1738 del m[f]
1744 del m[f]
1739 mn = self.manifestlog.add(m, trp, linkrev,
1745 mn = self.manifestlog.add(m, trp, linkrev,
1740 p1.manifestnode(), p2.manifestnode(),
1746 p1.manifestnode(), p2.manifestnode(),
1741 added, drop)
1747 added, drop)
1742 files = changed + removed
1748 files = changed + removed
1743 else:
1749 else:
1744 mn = p1.manifestnode()
1750 mn = p1.manifestnode()
1745 files = []
1751 files = []
1746
1752
1747 # update changelog
1753 # update changelog
1748 self.ui.note(_("committing changelog\n"))
1754 self.ui.note(_("committing changelog\n"))
1749 self.changelog.delayupdate(tr)
1755 self.changelog.delayupdate(tr)
1750 n = self.changelog.add(mn, files, ctx.description(),
1756 n = self.changelog.add(mn, files, ctx.description(),
1751 trp, p1.node(), p2.node(),
1757 trp, p1.node(), p2.node(),
1752 user, ctx.date(), ctx.extra().copy())
1758 user, ctx.date(), ctx.extra().copy())
1753 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1759 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1754 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1760 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1755 parent2=xp2)
1761 parent2=xp2)
1756 # set the new commit is proper phase
1762 # set the new commit is proper phase
1757 targetphase = subrepo.newcommitphase(self.ui, ctx)
1763 targetphase = subrepo.newcommitphase(self.ui, ctx)
1758 if targetphase:
1764 if targetphase:
1759 # retract boundary do not alter parent changeset.
1765 # retract boundary do not alter parent changeset.
1760 # if a parent have higher the resulting phase will
1766 # if a parent have higher the resulting phase will
1761 # be compliant anyway
1767 # be compliant anyway
1762 #
1768 #
1763 # if minimal phase was 0 we don't need to retract anything
1769 # if minimal phase was 0 we don't need to retract anything
1764 phases.retractboundary(self, tr, targetphase, [n])
1770 phases.retractboundary(self, tr, targetphase, [n])
1765 tr.close()
1771 tr.close()
1766 branchmap.updatecache(self.filtered('served'))
1772 branchmap.updatecache(self.filtered('served'))
1767 return n
1773 return n
1768 finally:
1774 finally:
1769 if tr:
1775 if tr:
1770 tr.release()
1776 tr.release()
1771 lock.release()
1777 lock.release()
1772
1778
1773 @unfilteredmethod
1779 @unfilteredmethod
1774 def destroying(self):
1780 def destroying(self):
1775 '''Inform the repository that nodes are about to be destroyed.
1781 '''Inform the repository that nodes are about to be destroyed.
1776 Intended for use by strip and rollback, so there's a common
1782 Intended for use by strip and rollback, so there's a common
1777 place for anything that has to be done before destroying history.
1783 place for anything that has to be done before destroying history.
1778
1784
1779 This is mostly useful for saving state that is in memory and waiting
1785 This is mostly useful for saving state that is in memory and waiting
1780 to be flushed when the current lock is released. Because a call to
1786 to be flushed when the current lock is released. Because a call to
1781 destroyed is imminent, the repo will be invalidated causing those
1787 destroyed is imminent, the repo will be invalidated causing those
1782 changes to stay in memory (waiting for the next unlock), or vanish
1788 changes to stay in memory (waiting for the next unlock), or vanish
1783 completely.
1789 completely.
1784 '''
1790 '''
1785 # When using the same lock to commit and strip, the phasecache is left
1791 # When using the same lock to commit and strip, the phasecache is left
1786 # dirty after committing. Then when we strip, the repo is invalidated,
1792 # dirty after committing. Then when we strip, the repo is invalidated,
1787 # causing those changes to disappear.
1793 # causing those changes to disappear.
1788 if '_phasecache' in vars(self):
1794 if '_phasecache' in vars(self):
1789 self._phasecache.write()
1795 self._phasecache.write()
1790
1796
1791 @unfilteredmethod
1797 @unfilteredmethod
1792 def destroyed(self):
1798 def destroyed(self):
1793 '''Inform the repository that nodes have been destroyed.
1799 '''Inform the repository that nodes have been destroyed.
1794 Intended for use by strip and rollback, so there's a common
1800 Intended for use by strip and rollback, so there's a common
1795 place for anything that has to be done after destroying history.
1801 place for anything that has to be done after destroying history.
1796 '''
1802 '''
1797 # When one tries to:
1803 # When one tries to:
1798 # 1) destroy nodes thus calling this method (e.g. strip)
1804 # 1) destroy nodes thus calling this method (e.g. strip)
1799 # 2) use phasecache somewhere (e.g. commit)
1805 # 2) use phasecache somewhere (e.g. commit)
1800 #
1806 #
1801 # then 2) will fail because the phasecache contains nodes that were
1807 # then 2) will fail because the phasecache contains nodes that were
1802 # removed. We can either remove phasecache from the filecache,
1808 # removed. We can either remove phasecache from the filecache,
1803 # causing it to reload next time it is accessed, or simply filter
1809 # causing it to reload next time it is accessed, or simply filter
1804 # the removed nodes now and write the updated cache.
1810 # the removed nodes now and write the updated cache.
1805 self._phasecache.filterunknown(self)
1811 self._phasecache.filterunknown(self)
1806 self._phasecache.write()
1812 self._phasecache.write()
1807
1813
1808 # update the 'served' branch cache to help read only server process
1814 # update the 'served' branch cache to help read only server process
1809 # Thanks to branchcache collaboration this is done from the nearest
1815 # Thanks to branchcache collaboration this is done from the nearest
1810 # filtered subset and it is expected to be fast.
1816 # filtered subset and it is expected to be fast.
1811 branchmap.updatecache(self.filtered('served'))
1817 branchmap.updatecache(self.filtered('served'))
1812
1818
1813 # Ensure the persistent tag cache is updated. Doing it now
1819 # Ensure the persistent tag cache is updated. Doing it now
1814 # means that the tag cache only has to worry about destroyed
1820 # means that the tag cache only has to worry about destroyed
1815 # heads immediately after a strip/rollback. That in turn
1821 # heads immediately after a strip/rollback. That in turn
1816 # guarantees that "cachetip == currenttip" (comparing both rev
1822 # guarantees that "cachetip == currenttip" (comparing both rev
1817 # and node) always means no nodes have been added or destroyed.
1823 # and node) always means no nodes have been added or destroyed.
1818
1824
1819 # XXX this is suboptimal when qrefresh'ing: we strip the current
1825 # XXX this is suboptimal when qrefresh'ing: we strip the current
1820 # head, refresh the tag cache, then immediately add a new head.
1826 # head, refresh the tag cache, then immediately add a new head.
1821 # But I think doing it this way is necessary for the "instant
1827 # But I think doing it this way is necessary for the "instant
1822 # tag cache retrieval" case to work.
1828 # tag cache retrieval" case to work.
1823 self.invalidate()
1829 self.invalidate()
1824
1830
1825 def walk(self, match, node=None):
1831 def walk(self, match, node=None):
1826 '''
1832 '''
1827 walk recursively through the directory tree or a given
1833 walk recursively through the directory tree or a given
1828 changeset, finding all files matched by the match
1834 changeset, finding all files matched by the match
1829 function
1835 function
1830 '''
1836 '''
1831 return self[node].walk(match)
1837 return self[node].walk(match)
1832
1838
1833 def status(self, node1='.', node2=None, match=None,
1839 def status(self, node1='.', node2=None, match=None,
1834 ignored=False, clean=False, unknown=False,
1840 ignored=False, clean=False, unknown=False,
1835 listsubrepos=False):
1841 listsubrepos=False):
1836 '''a convenience method that calls node1.status(node2)'''
1842 '''a convenience method that calls node1.status(node2)'''
1837 return self[node1].status(node2, match, ignored, clean, unknown,
1843 return self[node1].status(node2, match, ignored, clean, unknown,
1838 listsubrepos)
1844 listsubrepos)
1839
1845
1840 def heads(self, start=None):
1846 def heads(self, start=None):
1841 heads = self.changelog.heads(start)
1847 heads = self.changelog.heads(start)
1842 # sort the output in rev descending order
1848 # sort the output in rev descending order
1843 return sorted(heads, key=self.changelog.rev, reverse=True)
1849 return sorted(heads, key=self.changelog.rev, reverse=True)
1844
1850
1845 def branchheads(self, branch=None, start=None, closed=False):
1851 def branchheads(self, branch=None, start=None, closed=False):
1846 '''return a (possibly filtered) list of heads for the given branch
1852 '''return a (possibly filtered) list of heads for the given branch
1847
1853
1848 Heads are returned in topological order, from newest to oldest.
1854 Heads are returned in topological order, from newest to oldest.
1849 If branch is None, use the dirstate branch.
1855 If branch is None, use the dirstate branch.
1850 If start is not None, return only heads reachable from start.
1856 If start is not None, return only heads reachable from start.
1851 If closed is True, return heads that are marked as closed as well.
1857 If closed is True, return heads that are marked as closed as well.
1852 '''
1858 '''
1853 if branch is None:
1859 if branch is None:
1854 branch = self[None].branch()
1860 branch = self[None].branch()
1855 branches = self.branchmap()
1861 branches = self.branchmap()
1856 if branch not in branches:
1862 if branch not in branches:
1857 return []
1863 return []
1858 # the cache returns heads ordered lowest to highest
1864 # the cache returns heads ordered lowest to highest
1859 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1865 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1860 if start is not None:
1866 if start is not None:
1861 # filter out the heads that cannot be reached from startrev
1867 # filter out the heads that cannot be reached from startrev
1862 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1868 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1863 bheads = [h for h in bheads if h in fbheads]
1869 bheads = [h for h in bheads if h in fbheads]
1864 return bheads
1870 return bheads
1865
1871
1866 def branches(self, nodes):
1872 def branches(self, nodes):
1867 if not nodes:
1873 if not nodes:
1868 nodes = [self.changelog.tip()]
1874 nodes = [self.changelog.tip()]
1869 b = []
1875 b = []
1870 for n in nodes:
1876 for n in nodes:
1871 t = n
1877 t = n
1872 while True:
1878 while True:
1873 p = self.changelog.parents(n)
1879 p = self.changelog.parents(n)
1874 if p[1] != nullid or p[0] == nullid:
1880 if p[1] != nullid or p[0] == nullid:
1875 b.append((t, n, p[0], p[1]))
1881 b.append((t, n, p[0], p[1]))
1876 break
1882 break
1877 n = p[0]
1883 n = p[0]
1878 return b
1884 return b
1879
1885
1880 def between(self, pairs):
1886 def between(self, pairs):
1881 r = []
1887 r = []
1882
1888
1883 for top, bottom in pairs:
1889 for top, bottom in pairs:
1884 n, l, i = top, [], 0
1890 n, l, i = top, [], 0
1885 f = 1
1891 f = 1
1886
1892
1887 while n != bottom and n != nullid:
1893 while n != bottom and n != nullid:
1888 p = self.changelog.parents(n)[0]
1894 p = self.changelog.parents(n)[0]
1889 if i == f:
1895 if i == f:
1890 l.append(n)
1896 l.append(n)
1891 f = f * 2
1897 f = f * 2
1892 n = p
1898 n = p
1893 i += 1
1899 i += 1
1894
1900
1895 r.append(l)
1901 r.append(l)
1896
1902
1897 return r
1903 return r
1898
1904
1899 def checkpush(self, pushop):
1905 def checkpush(self, pushop):
1900 """Extensions can override this function if additional checks have
1906 """Extensions can override this function if additional checks have
1901 to be performed before pushing, or call it if they override push
1907 to be performed before pushing, or call it if they override push
1902 command.
1908 command.
1903 """
1909 """
1904 pass
1910 pass
1905
1911
1906 @unfilteredpropertycache
1912 @unfilteredpropertycache
1907 def prepushoutgoinghooks(self):
1913 def prepushoutgoinghooks(self):
1908 """Return util.hooks consists of a pushop with repo, remote, outgoing
1914 """Return util.hooks consists of a pushop with repo, remote, outgoing
1909 methods, which are called before pushing changesets.
1915 methods, which are called before pushing changesets.
1910 """
1916 """
1911 return util.hooks()
1917 return util.hooks()
1912
1918
1913 def pushkey(self, namespace, key, old, new):
1919 def pushkey(self, namespace, key, old, new):
1914 try:
1920 try:
1915 tr = self.currenttransaction()
1921 tr = self.currenttransaction()
1916 hookargs = {}
1922 hookargs = {}
1917 if tr is not None:
1923 if tr is not None:
1918 hookargs.update(tr.hookargs)
1924 hookargs.update(tr.hookargs)
1919 hookargs['namespace'] = namespace
1925 hookargs['namespace'] = namespace
1920 hookargs['key'] = key
1926 hookargs['key'] = key
1921 hookargs['old'] = old
1927 hookargs['old'] = old
1922 hookargs['new'] = new
1928 hookargs['new'] = new
1923 self.hook('prepushkey', throw=True, **hookargs)
1929 self.hook('prepushkey', throw=True, **hookargs)
1924 except error.HookAbort as exc:
1930 except error.HookAbort as exc:
1925 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1931 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1926 if exc.hint:
1932 if exc.hint:
1927 self.ui.write_err(_("(%s)\n") % exc.hint)
1933 self.ui.write_err(_("(%s)\n") % exc.hint)
1928 return False
1934 return False
1929 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1935 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1930 ret = pushkey.push(self, namespace, key, old, new)
1936 ret = pushkey.push(self, namespace, key, old, new)
1931 def runhook():
1937 def runhook():
1932 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1938 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1933 ret=ret)
1939 ret=ret)
1934 self._afterlock(runhook)
1940 self._afterlock(runhook)
1935 return ret
1941 return ret
1936
1942
1937 def listkeys(self, namespace):
1943 def listkeys(self, namespace):
1938 self.hook('prelistkeys', throw=True, namespace=namespace)
1944 self.hook('prelistkeys', throw=True, namespace=namespace)
1939 self.ui.debug('listing keys for "%s"\n' % namespace)
1945 self.ui.debug('listing keys for "%s"\n' % namespace)
1940 values = pushkey.list(self, namespace)
1946 values = pushkey.list(self, namespace)
1941 self.hook('listkeys', namespace=namespace, values=values)
1947 self.hook('listkeys', namespace=namespace, values=values)
1942 return values
1948 return values
1943
1949
1944 def debugwireargs(self, one, two, three=None, four=None, five=None):
1950 def debugwireargs(self, one, two, three=None, four=None, five=None):
1945 '''used to test argument passing over the wire'''
1951 '''used to test argument passing over the wire'''
1946 return "%s %s %s %s %s" % (one, two, three, four, five)
1952 return "%s %s %s %s %s" % (one, two, three, four, five)
1947
1953
1948 def savecommitmessage(self, text):
1954 def savecommitmessage(self, text):
1949 fp = self.vfs('last-message.txt', 'wb')
1955 fp = self.vfs('last-message.txt', 'wb')
1950 try:
1956 try:
1951 fp.write(text)
1957 fp.write(text)
1952 finally:
1958 finally:
1953 fp.close()
1959 fp.close()
1954 return self.pathto(fp.name[len(self.root) + 1:])
1960 return self.pathto(fp.name[len(self.root) + 1:])
1955
1961
1956 # used to avoid circular references so destructors work
1962 # used to avoid circular references so destructors work
1957 def aftertrans(files):
1963 def aftertrans(files):
1958 renamefiles = [tuple(t) for t in files]
1964 renamefiles = [tuple(t) for t in files]
1959 def a():
1965 def a():
1960 for vfs, src, dest in renamefiles:
1966 for vfs, src, dest in renamefiles:
1961 try:
1967 try:
1962 vfs.rename(src, dest)
1968 vfs.rename(src, dest)
1963 except OSError: # journal file does not yet exist
1969 except OSError: # journal file does not yet exist
1964 pass
1970 pass
1965 return a
1971 return a
1966
1972
1967 def undoname(fn):
1973 def undoname(fn):
1968 base, name = os.path.split(fn)
1974 base, name = os.path.split(fn)
1969 assert name.startswith('journal')
1975 assert name.startswith('journal')
1970 return os.path.join(base, name.replace('journal', 'undo', 1))
1976 return os.path.join(base, name.replace('journal', 'undo', 1))
1971
1977
1972 def instance(ui, path, create):
1978 def instance(ui, path, create):
1973 return localrepository(ui, util.urllocalpath(path), create)
1979 return localrepository(ui, util.urllocalpath(path), create)
1974
1980
1975 def islocal(path):
1981 def islocal(path):
1976 return True
1982 return True
1977
1983
1978 def newreporequirements(repo):
1984 def newreporequirements(repo):
1979 """Determine the set of requirements for a new local repository.
1985 """Determine the set of requirements for a new local repository.
1980
1986
1981 Extensions can wrap this function to specify custom requirements for
1987 Extensions can wrap this function to specify custom requirements for
1982 new repositories.
1988 new repositories.
1983 """
1989 """
1984 ui = repo.ui
1990 ui = repo.ui
1985 requirements = set(['revlogv1'])
1991 requirements = set(['revlogv1'])
1986 if ui.configbool('format', 'usestore', True):
1992 if ui.configbool('format', 'usestore', True):
1987 requirements.add('store')
1993 requirements.add('store')
1988 if ui.configbool('format', 'usefncache', True):
1994 if ui.configbool('format', 'usefncache', True):
1989 requirements.add('fncache')
1995 requirements.add('fncache')
1990 if ui.configbool('format', 'dotencode', True):
1996 if ui.configbool('format', 'dotencode', True):
1991 requirements.add('dotencode')
1997 requirements.add('dotencode')
1992
1998
1993 if scmutil.gdinitconfig(ui):
1999 if scmutil.gdinitconfig(ui):
1994 requirements.add('generaldelta')
2000 requirements.add('generaldelta')
1995 if ui.configbool('experimental', 'treemanifest', False):
2001 if ui.configbool('experimental', 'treemanifest', False):
1996 requirements.add('treemanifest')
2002 requirements.add('treemanifest')
1997 if ui.configbool('experimental', 'manifestv2', False):
2003 if ui.configbool('experimental', 'manifestv2', False):
1998 requirements.add('manifestv2')
2004 requirements.add('manifestv2')
1999
2005
2000 return requirements
2006 return requirements
@@ -1,262 +1,261 b''
1 # unionrepo.py - repository class for viewing union of repository changesets
1 # unionrepo.py - repository class for viewing union of repository changesets
2 #
2 #
3 # Derived from bundlerepo.py
3 # Derived from bundlerepo.py
4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Repository class for "in-memory pull" of one local repository to another,
10 """Repository class for "in-memory pull" of one local repository to another,
11 allowing operations like diff and log with revsets.
11 allowing operations like diff and log with revsets.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import nullid
19 from .node import nullid
20
20
21 from . import (
21 from . import (
22 changelog,
22 changelog,
23 cmdutil,
23 cmdutil,
24 error,
24 error,
25 filelog,
25 filelog,
26 localrepo,
26 localrepo,
27 manifest,
27 manifest,
28 mdiff,
28 mdiff,
29 pathutil,
29 pathutil,
30 revlog,
30 revlog,
31 scmutil,
31 scmutil,
32 util,
32 util,
33 )
33 )
34
34
35 class unionrevlog(revlog.revlog):
35 class unionrevlog(revlog.revlog):
36 def __init__(self, opener, indexfile, revlog2, linkmapper):
36 def __init__(self, opener, indexfile, revlog2, linkmapper):
37 # How it works:
37 # How it works:
38 # To retrieve a revision, we just need to know the node id so we can
38 # To retrieve a revision, we just need to know the node id so we can
39 # look it up in revlog2.
39 # look it up in revlog2.
40 #
40 #
41 # To differentiate a rev in the second revlog from a rev in the revlog,
41 # To differentiate a rev in the second revlog from a rev in the revlog,
42 # we check revision against repotiprev.
42 # we check revision against repotiprev.
43 opener = scmutil.readonlyvfs(opener)
43 opener = scmutil.readonlyvfs(opener)
44 revlog.revlog.__init__(self, opener, indexfile)
44 revlog.revlog.__init__(self, opener, indexfile)
45 self.revlog2 = revlog2
45 self.revlog2 = revlog2
46
46
47 n = len(self)
47 n = len(self)
48 self.repotiprev = n - 1
48 self.repotiprev = n - 1
49 self.bundlerevs = set() # used by 'bundle()' revset expression
49 self.bundlerevs = set() # used by 'bundle()' revset expression
50 for rev2 in self.revlog2:
50 for rev2 in self.revlog2:
51 rev = self.revlog2.index[rev2]
51 rev = self.revlog2.index[rev2]
52 # rev numbers - in revlog2, very different from self.rev
52 # rev numbers - in revlog2, very different from self.rev
53 _start, _csize, _rsize, base, linkrev, p1rev, p2rev, node = rev
53 _start, _csize, _rsize, base, linkrev, p1rev, p2rev, node = rev
54 flags = _start & 0xFFFF
54 flags = _start & 0xFFFF
55
55
56 if linkmapper is None: # link is to same revlog
56 if linkmapper is None: # link is to same revlog
57 assert linkrev == rev2 # we never link back
57 assert linkrev == rev2 # we never link back
58 link = n
58 link = n
59 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
59 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
60 link = linkmapper(linkrev)
60 link = linkmapper(linkrev)
61
61
62 if linkmapper is not None: # link is to same revlog
62 if linkmapper is not None: # link is to same revlog
63 base = linkmapper(base)
63 base = linkmapper(base)
64
64
65 if node in self.nodemap:
65 if node in self.nodemap:
66 # this happens for the common revlog revisions
66 # this happens for the common revlog revisions
67 self.bundlerevs.add(self.nodemap[node])
67 self.bundlerevs.add(self.nodemap[node])
68 continue
68 continue
69
69
70 p1node = self.revlog2.node(p1rev)
70 p1node = self.revlog2.node(p1rev)
71 p2node = self.revlog2.node(p2rev)
71 p2node = self.revlog2.node(p2rev)
72
72
73 e = (flags, None, None, base,
73 e = (flags, None, None, base,
74 link, self.rev(p1node), self.rev(p2node), node)
74 link, self.rev(p1node), self.rev(p2node), node)
75 self.index.insert(-1, e)
75 self.index.insert(-1, e)
76 self.nodemap[node] = n
76 self.nodemap[node] = n
77 self.bundlerevs.add(n)
77 self.bundlerevs.add(n)
78 n += 1
78 n += 1
79
79
80 def _chunk(self, rev):
80 def _chunk(self, rev):
81 if rev <= self.repotiprev:
81 if rev <= self.repotiprev:
82 return revlog.revlog._chunk(self, rev)
82 return revlog.revlog._chunk(self, rev)
83 return self.revlog2._chunk(self.node(rev))
83 return self.revlog2._chunk(self.node(rev))
84
84
85 def revdiff(self, rev1, rev2):
85 def revdiff(self, rev1, rev2):
86 """return or calculate a delta between two revisions"""
86 """return or calculate a delta between two revisions"""
87 if rev1 > self.repotiprev and rev2 > self.repotiprev:
87 if rev1 > self.repotiprev and rev2 > self.repotiprev:
88 return self.revlog2.revdiff(
88 return self.revlog2.revdiff(
89 self.revlog2.rev(self.node(rev1)),
89 self.revlog2.rev(self.node(rev1)),
90 self.revlog2.rev(self.node(rev2)))
90 self.revlog2.rev(self.node(rev2)))
91 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
91 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
92 return self.baserevdiff(rev1, rev2)
92 return self.baserevdiff(rev1, rev2)
93
93
94 return mdiff.textdiff(self.revision(self.node(rev1)),
94 return mdiff.textdiff(self.revision(self.node(rev1)),
95 self.revision(self.node(rev2)))
95 self.revision(self.node(rev2)))
96
96
97 def revision(self, nodeorrev):
97 def revision(self, nodeorrev):
98 """return an uncompressed revision of a given node or revision
98 """return an uncompressed revision of a given node or revision
99 number.
99 number.
100 """
100 """
101 if isinstance(nodeorrev, int):
101 if isinstance(nodeorrev, int):
102 rev = nodeorrev
102 rev = nodeorrev
103 node = self.node(rev)
103 node = self.node(rev)
104 else:
104 else:
105 node = nodeorrev
105 node = nodeorrev
106 rev = self.rev(node)
106 rev = self.rev(node)
107
107
108 if node == nullid:
108 if node == nullid:
109 return ""
109 return ""
110
110
111 if rev > self.repotiprev:
111 if rev > self.repotiprev:
112 text = self.revlog2.revision(node)
112 text = self.revlog2.revision(node)
113 self._cache = (node, rev, text)
113 self._cache = (node, rev, text)
114 else:
114 else:
115 text = self.baserevision(rev)
115 text = self.baserevision(rev)
116 # already cached
116 # already cached
117 return text
117 return text
118
118
119 def baserevision(self, nodeorrev):
119 def baserevision(self, nodeorrev):
120 # Revlog subclasses may override 'revision' method to modify format of
120 # Revlog subclasses may override 'revision' method to modify format of
121 # content retrieved from revlog. To use unionrevlog with such class one
121 # content retrieved from revlog. To use unionrevlog with such class one
122 # needs to override 'baserevision' and make more specific call here.
122 # needs to override 'baserevision' and make more specific call here.
123 return revlog.revlog.revision(self, nodeorrev)
123 return revlog.revlog.revision(self, nodeorrev)
124
124
125 def baserevdiff(self, rev1, rev2):
125 def baserevdiff(self, rev1, rev2):
126 # Exists for the same purpose as baserevision.
126 # Exists for the same purpose as baserevision.
127 return revlog.revlog.revdiff(self, rev1, rev2)
127 return revlog.revlog.revdiff(self, rev1, rev2)
128
128
129 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
129 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
130 raise NotImplementedError
130 raise NotImplementedError
131 def addgroup(self, revs, linkmapper, transaction):
131 def addgroup(self, revs, linkmapper, transaction):
132 raise NotImplementedError
132 raise NotImplementedError
133 def strip(self, rev, minlink):
133 def strip(self, rev, minlink):
134 raise NotImplementedError
134 raise NotImplementedError
135 def checksize(self):
135 def checksize(self):
136 raise NotImplementedError
136 raise NotImplementedError
137
137
138 class unionchangelog(unionrevlog, changelog.changelog):
138 class unionchangelog(unionrevlog, changelog.changelog):
139 def __init__(self, opener, opener2):
139 def __init__(self, opener, opener2):
140 changelog.changelog.__init__(self, opener)
140 changelog.changelog.__init__(self, opener)
141 linkmapper = None
141 linkmapper = None
142 changelog2 = changelog.changelog(opener2)
142 changelog2 = changelog.changelog(opener2)
143 unionrevlog.__init__(self, opener, self.indexfile, changelog2,
143 unionrevlog.__init__(self, opener, self.indexfile, changelog2,
144 linkmapper)
144 linkmapper)
145
145
146 def baserevision(self, nodeorrev):
146 def baserevision(self, nodeorrev):
147 # Although changelog doesn't override 'revision' method, some extensions
147 # Although changelog doesn't override 'revision' method, some extensions
148 # may replace this class with another that does. Same story with
148 # may replace this class with another that does. Same story with
149 # manifest and filelog classes.
149 # manifest and filelog classes.
150 return changelog.changelog.revision(self, nodeorrev)
150 return changelog.changelog.revision(self, nodeorrev)
151
151
152 def baserevdiff(self, rev1, rev2):
152 def baserevdiff(self, rev1, rev2):
153 return changelog.changelog.revdiff(self, rev1, rev2)
153 return changelog.changelog.revdiff(self, rev1, rev2)
154
154
155 class unionmanifest(unionrevlog, manifest.manifest):
155 class unionmanifest(unionrevlog, manifest.manifest):
156 def __init__(self, opener, opener2, linkmapper):
156 def __init__(self, opener, opener2, linkmapper):
157 manifest.manifest.__init__(self, opener)
157 manifest.manifest.__init__(self, opener)
158 manifest2 = manifest.manifest(opener2)
158 manifest2 = manifest.manifest(opener2)
159 unionrevlog.__init__(self, opener, self.indexfile, manifest2,
159 unionrevlog.__init__(self, opener, self.indexfile, manifest2,
160 linkmapper)
160 linkmapper)
161
161
162 def baserevision(self, nodeorrev):
162 def baserevision(self, nodeorrev):
163 return manifest.manifest.revision(self, nodeorrev)
163 return manifest.manifest.revision(self, nodeorrev)
164
164
165 def baserevdiff(self, rev1, rev2):
165 def baserevdiff(self, rev1, rev2):
166 return manifest.manifest.revdiff(self, rev1, rev2)
166 return manifest.manifest.revdiff(self, rev1, rev2)
167
167
168 class unionfilelog(unionrevlog, filelog.filelog):
168 class unionfilelog(unionrevlog, filelog.filelog):
169 def __init__(self, opener, path, opener2, linkmapper, repo):
169 def __init__(self, opener, path, opener2, linkmapper, repo):
170 filelog.filelog.__init__(self, opener, path)
170 filelog.filelog.__init__(self, opener, path)
171 filelog2 = filelog.filelog(opener2, path)
171 filelog2 = filelog.filelog(opener2, path)
172 unionrevlog.__init__(self, opener, self.indexfile, filelog2,
172 unionrevlog.__init__(self, opener, self.indexfile, filelog2,
173 linkmapper)
173 linkmapper)
174 self._repo = repo
174 self._repo = repo
175
175
176 def baserevision(self, nodeorrev):
176 def baserevision(self, nodeorrev):
177 return filelog.filelog.revision(self, nodeorrev)
177 return filelog.filelog.revision(self, nodeorrev)
178
178
179 def baserevdiff(self, rev1, rev2):
179 def baserevdiff(self, rev1, rev2):
180 return filelog.filelog.revdiff(self, rev1, rev2)
180 return filelog.filelog.revdiff(self, rev1, rev2)
181
181
182 def iscensored(self, rev):
182 def iscensored(self, rev):
183 """Check if a revision is censored."""
183 """Check if a revision is censored."""
184 if rev <= self.repotiprev:
184 if rev <= self.repotiprev:
185 return filelog.filelog.iscensored(self, rev)
185 return filelog.filelog.iscensored(self, rev)
186 node = self.node(rev)
186 node = self.node(rev)
187 return self.revlog2.iscensored(self.revlog2.rev(node))
187 return self.revlog2.iscensored(self.revlog2.rev(node))
188
188
189 class unionpeer(localrepo.localpeer):
189 class unionpeer(localrepo.localpeer):
190 def canpush(self):
190 def canpush(self):
191 return False
191 return False
192
192
193 class unionrepository(localrepo.localrepository):
193 class unionrepository(localrepo.localrepository):
194 def __init__(self, ui, path, path2):
194 def __init__(self, ui, path, path2):
195 localrepo.localrepository.__init__(self, ui, path)
195 localrepo.localrepository.__init__(self, ui, path)
196 self.ui.setconfig('phases', 'publish', False, 'unionrepo')
196 self.ui.setconfig('phases', 'publish', False, 'unionrepo')
197
197
198 self._url = 'union:%s+%s' % (util.expandpath(path),
198 self._url = 'union:%s+%s' % (util.expandpath(path),
199 util.expandpath(path2))
199 util.expandpath(path2))
200 self.repo2 = localrepo.localrepository(ui, path2)
200 self.repo2 = localrepo.localrepository(ui, path2)
201
201
202 @localrepo.unfilteredpropertycache
202 @localrepo.unfilteredpropertycache
203 def changelog(self):
203 def changelog(self):
204 return unionchangelog(self.svfs, self.repo2.svfs)
204 return unionchangelog(self.svfs, self.repo2.svfs)
205
205
206 def _clrev(self, rev2):
206 def _clrev(self, rev2):
207 """map from repo2 changelog rev to temporary rev in self.changelog"""
207 """map from repo2 changelog rev to temporary rev in self.changelog"""
208 node = self.repo2.changelog.node(rev2)
208 node = self.repo2.changelog.node(rev2)
209 return self.changelog.rev(node)
209 return self.changelog.rev(node)
210
210
211 @localrepo.unfilteredpropertycache
211 def _constructmanifest(self):
212 def manifest(self):
213 return unionmanifest(self.svfs, self.repo2.svfs,
212 return unionmanifest(self.svfs, self.repo2.svfs,
214 self.unfiltered()._clrev)
213 self.unfiltered()._clrev)
215
214
216 def url(self):
215 def url(self):
217 return self._url
216 return self._url
218
217
219 def file(self, f):
218 def file(self, f):
220 return unionfilelog(self.svfs, f, self.repo2.svfs,
219 return unionfilelog(self.svfs, f, self.repo2.svfs,
221 self.unfiltered()._clrev, self)
220 self.unfiltered()._clrev, self)
222
221
223 def close(self):
222 def close(self):
224 self.repo2.close()
223 self.repo2.close()
225
224
226 def cancopy(self):
225 def cancopy(self):
227 return False
226 return False
228
227
229 def peer(self):
228 def peer(self):
230 return unionpeer(self)
229 return unionpeer(self)
231
230
232 def getcwd(self):
231 def getcwd(self):
233 return os.getcwd() # always outside the repo
232 return os.getcwd() # always outside the repo
234
233
235 def instance(ui, path, create):
234 def instance(ui, path, create):
236 if create:
235 if create:
237 raise error.Abort(_('cannot create new union repository'))
236 raise error.Abort(_('cannot create new union repository'))
238 parentpath = ui.config("bundle", "mainreporoot", "")
237 parentpath = ui.config("bundle", "mainreporoot", "")
239 if not parentpath:
238 if not parentpath:
240 # try to find the correct path to the working directory repo
239 # try to find the correct path to the working directory repo
241 parentpath = cmdutil.findrepo(os.getcwd())
240 parentpath = cmdutil.findrepo(os.getcwd())
242 if parentpath is None:
241 if parentpath is None:
243 parentpath = ''
242 parentpath = ''
244 if parentpath:
243 if parentpath:
245 # Try to make the full path relative so we get a nice, short URL.
244 # Try to make the full path relative so we get a nice, short URL.
246 # In particular, we don't want temp dir names in test outputs.
245 # In particular, we don't want temp dir names in test outputs.
247 cwd = os.getcwd()
246 cwd = os.getcwd()
248 if parentpath == cwd:
247 if parentpath == cwd:
249 parentpath = ''
248 parentpath = ''
250 else:
249 else:
251 cwd = pathutil.normasprefix(cwd)
250 cwd = pathutil.normasprefix(cwd)
252 if parentpath.startswith(cwd):
251 if parentpath.startswith(cwd):
253 parentpath = parentpath[len(cwd):]
252 parentpath = parentpath[len(cwd):]
254 if path.startswith('union:'):
253 if path.startswith('union:'):
255 s = path.split(":", 1)[1].split("+", 1)
254 s = path.split(":", 1)[1].split("+", 1)
256 if len(s) == 1:
255 if len(s) == 1:
257 repopath, repopath2 = parentpath, s[0]
256 repopath, repopath2 = parentpath, s[0]
258 else:
257 else:
259 repopath, repopath2 = s
258 repopath, repopath2 = s
260 else:
259 else:
261 repopath, repopath2 = parentpath, path
260 repopath, repopath2 = parentpath, path
262 return unionrepository(ui, repopath, repopath2)
261 return unionrepository(ui, repopath, repopath2)
General Comments 0
You need to be logged in to leave comments. Login now