##// END OF EJS Templates
wireproto: remove iterbatch() from peer interface (API)...
Gregory Szorc -
r37651:33a6eee0 default
parent child Browse files
Show More
@@ -1,2392 +1,2374 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from .thirdparty.zope import (
24 from .thirdparty.zope import (
25 interface as zi,
25 interface as zi,
26 )
26 )
27 from . import (
27 from . import (
28 bookmarks,
28 bookmarks,
29 branchmap,
29 branchmap,
30 bundle2,
30 bundle2,
31 changegroup,
31 changegroup,
32 changelog,
32 changelog,
33 color,
33 color,
34 context,
34 context,
35 dirstate,
35 dirstate,
36 dirstateguard,
36 dirstateguard,
37 discovery,
37 discovery,
38 encoding,
38 encoding,
39 error,
39 error,
40 exchange,
40 exchange,
41 extensions,
41 extensions,
42 filelog,
42 filelog,
43 hook,
43 hook,
44 lock as lockmod,
44 lock as lockmod,
45 manifest,
45 manifest,
46 match as matchmod,
46 match as matchmod,
47 merge as mergemod,
47 merge as mergemod,
48 mergeutil,
48 mergeutil,
49 namespaces,
49 namespaces,
50 narrowspec,
50 narrowspec,
51 obsolete,
51 obsolete,
52 pathutil,
52 pathutil,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 repository,
56 repository,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store,
62 store,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 wireprotov1peer,
70 )
69 )
71 from .utils import (
70 from .utils import (
72 procutil,
71 procutil,
73 stringutil,
72 stringutil,
74 )
73 )
75
74
76 release = lockmod.release
75 release = lockmod.release
77 urlerr = util.urlerr
76 urlerr = util.urlerr
78 urlreq = util.urlreq
77 urlreq = util.urlreq
79
78
80 # set of (path, vfs-location) tuples. vfs-location is:
79 # set of (path, vfs-location) tuples. vfs-location is:
81 # - 'plain for vfs relative paths
80 # - 'plain for vfs relative paths
82 # - '' for svfs relative paths
81 # - '' for svfs relative paths
83 _cachedfiles = set()
82 _cachedfiles = set()
84
83
85 class _basefilecache(scmutil.filecache):
84 class _basefilecache(scmutil.filecache):
86 """All filecache usage on repo are done for logic that should be unfiltered
85 """All filecache usage on repo are done for logic that should be unfiltered
87 """
86 """
88 def __get__(self, repo, type=None):
87 def __get__(self, repo, type=None):
89 if repo is None:
88 if repo is None:
90 return self
89 return self
91 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 def __set__(self, repo, value):
91 def __set__(self, repo, value):
93 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 def __delete__(self, repo):
93 def __delete__(self, repo):
95 return super(_basefilecache, self).__delete__(repo.unfiltered())
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
96
95
97 class repofilecache(_basefilecache):
96 class repofilecache(_basefilecache):
98 """filecache for files in .hg but outside of .hg/store"""
97 """filecache for files in .hg but outside of .hg/store"""
99 def __init__(self, *paths):
98 def __init__(self, *paths):
100 super(repofilecache, self).__init__(*paths)
99 super(repofilecache, self).__init__(*paths)
101 for path in paths:
100 for path in paths:
102 _cachedfiles.add((path, 'plain'))
101 _cachedfiles.add((path, 'plain'))
103
102
104 def join(self, obj, fname):
103 def join(self, obj, fname):
105 return obj.vfs.join(fname)
104 return obj.vfs.join(fname)
106
105
107 class storecache(_basefilecache):
106 class storecache(_basefilecache):
108 """filecache for files in the store"""
107 """filecache for files in the store"""
109 def __init__(self, *paths):
108 def __init__(self, *paths):
110 super(storecache, self).__init__(*paths)
109 super(storecache, self).__init__(*paths)
111 for path in paths:
110 for path in paths:
112 _cachedfiles.add((path, ''))
111 _cachedfiles.add((path, ''))
113
112
114 def join(self, obj, fname):
113 def join(self, obj, fname):
115 return obj.sjoin(fname)
114 return obj.sjoin(fname)
116
115
117 def isfilecached(repo, name):
116 def isfilecached(repo, name):
118 """check if a repo has already cached "name" filecache-ed property
117 """check if a repo has already cached "name" filecache-ed property
119
118
120 This returns (cachedobj-or-None, iscached) tuple.
119 This returns (cachedobj-or-None, iscached) tuple.
121 """
120 """
122 cacheentry = repo.unfiltered()._filecache.get(name, None)
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 if not cacheentry:
122 if not cacheentry:
124 return None, False
123 return None, False
125 return cacheentry.obj, True
124 return cacheentry.obj, True
126
125
127 class unfilteredpropertycache(util.propertycache):
126 class unfilteredpropertycache(util.propertycache):
128 """propertycache that apply to unfiltered repo only"""
127 """propertycache that apply to unfiltered repo only"""
129
128
130 def __get__(self, repo, type=None):
129 def __get__(self, repo, type=None):
131 unfi = repo.unfiltered()
130 unfi = repo.unfiltered()
132 if unfi is repo:
131 if unfi is repo:
133 return super(unfilteredpropertycache, self).__get__(unfi)
132 return super(unfilteredpropertycache, self).__get__(unfi)
134 return getattr(unfi, self.name)
133 return getattr(unfi, self.name)
135
134
136 class filteredpropertycache(util.propertycache):
135 class filteredpropertycache(util.propertycache):
137 """propertycache that must take filtering in account"""
136 """propertycache that must take filtering in account"""
138
137
139 def cachevalue(self, obj, value):
138 def cachevalue(self, obj, value):
140 object.__setattr__(obj, self.name, value)
139 object.__setattr__(obj, self.name, value)
141
140
142
141
143 def hasunfilteredcache(repo, name):
142 def hasunfilteredcache(repo, name):
144 """check if a repo has an unfilteredpropertycache value for <name>"""
143 """check if a repo has an unfilteredpropertycache value for <name>"""
145 return name in vars(repo.unfiltered())
144 return name in vars(repo.unfiltered())
146
145
147 def unfilteredmethod(orig):
146 def unfilteredmethod(orig):
148 """decorate method that always need to be run on unfiltered version"""
147 """decorate method that always need to be run on unfiltered version"""
149 def wrapper(repo, *args, **kwargs):
148 def wrapper(repo, *args, **kwargs):
150 return orig(repo.unfiltered(), *args, **kwargs)
149 return orig(repo.unfiltered(), *args, **kwargs)
151 return wrapper
150 return wrapper
152
151
153 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 'unbundle'}
153 'unbundle'}
155 legacycaps = moderncaps.union({'changegroupsubset'})
154 legacycaps = moderncaps.union({'changegroupsubset'})
156
155
157 class localiterbatcher(wireprotov1peer.iterbatcher):
158 def __init__(self, local):
159 super(localiterbatcher, self).__init__()
160 self.local = local
161
162 def submit(self):
163 # submit for a local iter batcher is a noop
164 pass
165
166 def results(self):
167 for name, args, opts, resref in self.calls:
168 resref.set(getattr(self.local, name)(*args, **opts))
169 yield resref.value
170
171 @zi.implementer(repository.ipeercommandexecutor)
156 @zi.implementer(repository.ipeercommandexecutor)
172 class localcommandexecutor(object):
157 class localcommandexecutor(object):
173 def __init__(self, peer):
158 def __init__(self, peer):
174 self._peer = peer
159 self._peer = peer
175 self._sent = False
160 self._sent = False
176 self._closed = False
161 self._closed = False
177
162
178 def __enter__(self):
163 def __enter__(self):
179 return self
164 return self
180
165
181 def __exit__(self, exctype, excvalue, exctb):
166 def __exit__(self, exctype, excvalue, exctb):
182 self.close()
167 self.close()
183
168
184 def callcommand(self, command, args):
169 def callcommand(self, command, args):
185 if self._sent:
170 if self._sent:
186 raise error.ProgrammingError('callcommand() cannot be used after '
171 raise error.ProgrammingError('callcommand() cannot be used after '
187 'sendcommands()')
172 'sendcommands()')
188
173
189 if self._closed:
174 if self._closed:
190 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
191 'close()')
176 'close()')
192
177
193 # We don't need to support anything fancy. Just call the named
178 # We don't need to support anything fancy. Just call the named
194 # method on the peer and return a resolved future.
179 # method on the peer and return a resolved future.
195 fn = getattr(self._peer, pycompat.sysstr(command))
180 fn = getattr(self._peer, pycompat.sysstr(command))
196
181
197 f = pycompat.futures.Future()
182 f = pycompat.futures.Future()
198
183
199 try:
184 try:
200 result = fn(**args)
185 result = fn(**args)
201 except Exception:
186 except Exception:
202 f.set_exception_info(*sys.exc_info()[1:])
187 f.set_exception_info(*sys.exc_info()[1:])
203 else:
188 else:
204 f.set_result(result)
189 f.set_result(result)
205
190
206 return f
191 return f
207
192
208 def sendcommands(self):
193 def sendcommands(self):
209 self._sent = True
194 self._sent = True
210
195
211 def close(self):
196 def close(self):
212 self._closed = True
197 self._closed = True
213
198
214 class localpeer(repository.peer):
199 class localpeer(repository.peer):
215 '''peer for a local repo; reflects only the most recent API'''
200 '''peer for a local repo; reflects only the most recent API'''
216
201
217 def __init__(self, repo, caps=None):
202 def __init__(self, repo, caps=None):
218 super(localpeer, self).__init__()
203 super(localpeer, self).__init__()
219
204
220 if caps is None:
205 if caps is None:
221 caps = moderncaps.copy()
206 caps = moderncaps.copy()
222 self._repo = repo.filtered('served')
207 self._repo = repo.filtered('served')
223 self.ui = repo.ui
208 self.ui = repo.ui
224 self._caps = repo._restrictcapabilities(caps)
209 self._caps = repo._restrictcapabilities(caps)
225
210
226 # Begin of _basepeer interface.
211 # Begin of _basepeer interface.
227
212
228 def url(self):
213 def url(self):
229 return self._repo.url()
214 return self._repo.url()
230
215
231 def local(self):
216 def local(self):
232 return self._repo
217 return self._repo
233
218
234 def peer(self):
219 def peer(self):
235 return self
220 return self
236
221
237 def canpush(self):
222 def canpush(self):
238 return True
223 return True
239
224
240 def close(self):
225 def close(self):
241 self._repo.close()
226 self._repo.close()
242
227
243 # End of _basepeer interface.
228 # End of _basepeer interface.
244
229
245 # Begin of _basewirecommands interface.
230 # Begin of _basewirecommands interface.
246
231
247 def branchmap(self):
232 def branchmap(self):
248 return self._repo.branchmap()
233 return self._repo.branchmap()
249
234
250 def capabilities(self):
235 def capabilities(self):
251 return self._caps
236 return self._caps
252
237
253 def debugwireargs(self, one, two, three=None, four=None, five=None):
238 def debugwireargs(self, one, two, three=None, four=None, five=None):
254 """Used to test argument passing over the wire"""
239 """Used to test argument passing over the wire"""
255 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
240 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
256 pycompat.bytestr(four),
241 pycompat.bytestr(four),
257 pycompat.bytestr(five))
242 pycompat.bytestr(five))
258
243
259 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
244 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
260 **kwargs):
245 **kwargs):
261 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
246 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
262 common=common, bundlecaps=bundlecaps,
247 common=common, bundlecaps=bundlecaps,
263 **kwargs)[1]
248 **kwargs)[1]
264 cb = util.chunkbuffer(chunks)
249 cb = util.chunkbuffer(chunks)
265
250
266 if exchange.bundle2requested(bundlecaps):
251 if exchange.bundle2requested(bundlecaps):
267 # When requesting a bundle2, getbundle returns a stream to make the
252 # When requesting a bundle2, getbundle returns a stream to make the
268 # wire level function happier. We need to build a proper object
253 # wire level function happier. We need to build a proper object
269 # from it in local peer.
254 # from it in local peer.
270 return bundle2.getunbundler(self.ui, cb)
255 return bundle2.getunbundler(self.ui, cb)
271 else:
256 else:
272 return changegroup.getunbundler('01', cb, None)
257 return changegroup.getunbundler('01', cb, None)
273
258
274 def heads(self):
259 def heads(self):
275 return self._repo.heads()
260 return self._repo.heads()
276
261
277 def known(self, nodes):
262 def known(self, nodes):
278 return self._repo.known(nodes)
263 return self._repo.known(nodes)
279
264
280 def listkeys(self, namespace):
265 def listkeys(self, namespace):
281 return self._repo.listkeys(namespace)
266 return self._repo.listkeys(namespace)
282
267
283 def lookup(self, key):
268 def lookup(self, key):
284 return self._repo.lookup(key)
269 return self._repo.lookup(key)
285
270
286 def pushkey(self, namespace, key, old, new):
271 def pushkey(self, namespace, key, old, new):
287 return self._repo.pushkey(namespace, key, old, new)
272 return self._repo.pushkey(namespace, key, old, new)
288
273
289 def stream_out(self):
274 def stream_out(self):
290 raise error.Abort(_('cannot perform stream clone against local '
275 raise error.Abort(_('cannot perform stream clone against local '
291 'peer'))
276 'peer'))
292
277
293 def unbundle(self, cg, heads, url):
278 def unbundle(self, cg, heads, url):
294 """apply a bundle on a repo
279 """apply a bundle on a repo
295
280
296 This function handles the repo locking itself."""
281 This function handles the repo locking itself."""
297 try:
282 try:
298 try:
283 try:
299 cg = exchange.readbundle(self.ui, cg, None)
284 cg = exchange.readbundle(self.ui, cg, None)
300 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
285 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
301 if util.safehasattr(ret, 'getchunks'):
286 if util.safehasattr(ret, 'getchunks'):
302 # This is a bundle20 object, turn it into an unbundler.
287 # This is a bundle20 object, turn it into an unbundler.
303 # This little dance should be dropped eventually when the
288 # This little dance should be dropped eventually when the
304 # API is finally improved.
289 # API is finally improved.
305 stream = util.chunkbuffer(ret.getchunks())
290 stream = util.chunkbuffer(ret.getchunks())
306 ret = bundle2.getunbundler(self.ui, stream)
291 ret = bundle2.getunbundler(self.ui, stream)
307 return ret
292 return ret
308 except Exception as exc:
293 except Exception as exc:
309 # If the exception contains output salvaged from a bundle2
294 # If the exception contains output salvaged from a bundle2
310 # reply, we need to make sure it is printed before continuing
295 # reply, we need to make sure it is printed before continuing
311 # to fail. So we build a bundle2 with such output and consume
296 # to fail. So we build a bundle2 with such output and consume
312 # it directly.
297 # it directly.
313 #
298 #
314 # This is not very elegant but allows a "simple" solution for
299 # This is not very elegant but allows a "simple" solution for
315 # issue4594
300 # issue4594
316 output = getattr(exc, '_bundle2salvagedoutput', ())
301 output = getattr(exc, '_bundle2salvagedoutput', ())
317 if output:
302 if output:
318 bundler = bundle2.bundle20(self._repo.ui)
303 bundler = bundle2.bundle20(self._repo.ui)
319 for out in output:
304 for out in output:
320 bundler.addpart(out)
305 bundler.addpart(out)
321 stream = util.chunkbuffer(bundler.getchunks())
306 stream = util.chunkbuffer(bundler.getchunks())
322 b = bundle2.getunbundler(self.ui, stream)
307 b = bundle2.getunbundler(self.ui, stream)
323 bundle2.processbundle(self._repo, b)
308 bundle2.processbundle(self._repo, b)
324 raise
309 raise
325 except error.PushRaced as exc:
310 except error.PushRaced as exc:
326 raise error.ResponseError(_('push failed:'),
311 raise error.ResponseError(_('push failed:'),
327 stringutil.forcebytestr(exc))
312 stringutil.forcebytestr(exc))
328
313
329 # End of _basewirecommands interface.
314 # End of _basewirecommands interface.
330
315
331 # Begin of peer interface.
316 # Begin of peer interface.
332
317
333 def commandexecutor(self):
318 def commandexecutor(self):
334 return localcommandexecutor(self)
319 return localcommandexecutor(self)
335
320
336 def iterbatch(self):
337 return localiterbatcher(self)
338
339 # End of peer interface.
321 # End of peer interface.
340
322
341 class locallegacypeer(repository.legacypeer, localpeer):
323 class locallegacypeer(repository.legacypeer, localpeer):
342 '''peer extension which implements legacy methods too; used for tests with
324 '''peer extension which implements legacy methods too; used for tests with
343 restricted capabilities'''
325 restricted capabilities'''
344
326
345 def __init__(self, repo):
327 def __init__(self, repo):
346 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
328 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
347
329
348 # Begin of baselegacywirecommands interface.
330 # Begin of baselegacywirecommands interface.
349
331
350 def between(self, pairs):
332 def between(self, pairs):
351 return self._repo.between(pairs)
333 return self._repo.between(pairs)
352
334
353 def branches(self, nodes):
335 def branches(self, nodes):
354 return self._repo.branches(nodes)
336 return self._repo.branches(nodes)
355
337
356 def changegroup(self, basenodes, source):
338 def changegroup(self, basenodes, source):
357 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
339 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
358 missingheads=self._repo.heads())
340 missingheads=self._repo.heads())
359 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
341 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
360
342
361 def changegroupsubset(self, bases, heads, source):
343 def changegroupsubset(self, bases, heads, source):
362 outgoing = discovery.outgoing(self._repo, missingroots=bases,
344 outgoing = discovery.outgoing(self._repo, missingroots=bases,
363 missingheads=heads)
345 missingheads=heads)
364 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
346 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
365
347
366 # End of baselegacywirecommands interface.
348 # End of baselegacywirecommands interface.
367
349
368 # Increment the sub-version when the revlog v2 format changes to lock out old
350 # Increment the sub-version when the revlog v2 format changes to lock out old
369 # clients.
351 # clients.
370 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
352 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
371
353
372 # Functions receiving (ui, features) that extensions can register to impact
354 # Functions receiving (ui, features) that extensions can register to impact
373 # the ability to load repositories with custom requirements. Only
355 # the ability to load repositories with custom requirements. Only
374 # functions defined in loaded extensions are called.
356 # functions defined in loaded extensions are called.
375 #
357 #
376 # The function receives a set of requirement strings that the repository
358 # The function receives a set of requirement strings that the repository
377 # is capable of opening. Functions will typically add elements to the
359 # is capable of opening. Functions will typically add elements to the
378 # set to reflect that the extension knows how to handle that requirements.
360 # set to reflect that the extension knows how to handle that requirements.
379 featuresetupfuncs = set()
361 featuresetupfuncs = set()
380
362
381 @zi.implementer(repository.completelocalrepository)
363 @zi.implementer(repository.completelocalrepository)
382 class localrepository(object):
364 class localrepository(object):
383
365
384 # obsolete experimental requirements:
366 # obsolete experimental requirements:
385 # - manifestv2: An experimental new manifest format that allowed
367 # - manifestv2: An experimental new manifest format that allowed
386 # for stem compression of long paths. Experiment ended up not
368 # for stem compression of long paths. Experiment ended up not
387 # being successful (repository sizes went up due to worse delta
369 # being successful (repository sizes went up due to worse delta
388 # chains), and the code was deleted in 4.6.
370 # chains), and the code was deleted in 4.6.
389 supportedformats = {
371 supportedformats = {
390 'revlogv1',
372 'revlogv1',
391 'generaldelta',
373 'generaldelta',
392 'treemanifest',
374 'treemanifest',
393 REVLOGV2_REQUIREMENT,
375 REVLOGV2_REQUIREMENT,
394 }
376 }
395 _basesupported = supportedformats | {
377 _basesupported = supportedformats | {
396 'store',
378 'store',
397 'fncache',
379 'fncache',
398 'shared',
380 'shared',
399 'relshared',
381 'relshared',
400 'dotencode',
382 'dotencode',
401 'exp-sparse',
383 'exp-sparse',
402 }
384 }
403 openerreqs = {
385 openerreqs = {
404 'revlogv1',
386 'revlogv1',
405 'generaldelta',
387 'generaldelta',
406 'treemanifest',
388 'treemanifest',
407 }
389 }
408
390
409 # list of prefix for file which can be written without 'wlock'
391 # list of prefix for file which can be written without 'wlock'
410 # Extensions should extend this list when needed
392 # Extensions should extend this list when needed
411 _wlockfreeprefix = {
393 _wlockfreeprefix = {
412 # We migh consider requiring 'wlock' for the next
394 # We migh consider requiring 'wlock' for the next
413 # two, but pretty much all the existing code assume
395 # two, but pretty much all the existing code assume
414 # wlock is not needed so we keep them excluded for
396 # wlock is not needed so we keep them excluded for
415 # now.
397 # now.
416 'hgrc',
398 'hgrc',
417 'requires',
399 'requires',
418 # XXX cache is a complicatged business someone
400 # XXX cache is a complicatged business someone
419 # should investigate this in depth at some point
401 # should investigate this in depth at some point
420 'cache/',
402 'cache/',
421 # XXX shouldn't be dirstate covered by the wlock?
403 # XXX shouldn't be dirstate covered by the wlock?
422 'dirstate',
404 'dirstate',
423 # XXX bisect was still a bit too messy at the time
405 # XXX bisect was still a bit too messy at the time
424 # this changeset was introduced. Someone should fix
406 # this changeset was introduced. Someone should fix
425 # the remainig bit and drop this line
407 # the remainig bit and drop this line
426 'bisect.state',
408 'bisect.state',
427 }
409 }
428
410
429 def __init__(self, baseui, path, create=False):
411 def __init__(self, baseui, path, create=False):
430 self.requirements = set()
412 self.requirements = set()
431 self.filtername = None
413 self.filtername = None
432 # wvfs: rooted at the repository root, used to access the working copy
414 # wvfs: rooted at the repository root, used to access the working copy
433 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
415 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
434 # vfs: rooted at .hg, used to access repo files outside of .hg/store
416 # vfs: rooted at .hg, used to access repo files outside of .hg/store
435 self.vfs = None
417 self.vfs = None
436 # svfs: usually rooted at .hg/store, used to access repository history
418 # svfs: usually rooted at .hg/store, used to access repository history
437 # If this is a shared repository, this vfs may point to another
419 # If this is a shared repository, this vfs may point to another
438 # repository's .hg/store directory.
420 # repository's .hg/store directory.
439 self.svfs = None
421 self.svfs = None
440 self.root = self.wvfs.base
422 self.root = self.wvfs.base
441 self.path = self.wvfs.join(".hg")
423 self.path = self.wvfs.join(".hg")
442 self.origroot = path
424 self.origroot = path
443 # This is only used by context.workingctx.match in order to
425 # This is only used by context.workingctx.match in order to
444 # detect files in subrepos.
426 # detect files in subrepos.
445 self.auditor = pathutil.pathauditor(
427 self.auditor = pathutil.pathauditor(
446 self.root, callback=self._checknested)
428 self.root, callback=self._checknested)
447 # This is only used by context.basectx.match in order to detect
429 # This is only used by context.basectx.match in order to detect
448 # files in subrepos.
430 # files in subrepos.
449 self.nofsauditor = pathutil.pathauditor(
431 self.nofsauditor = pathutil.pathauditor(
450 self.root, callback=self._checknested, realfs=False, cached=True)
432 self.root, callback=self._checknested, realfs=False, cached=True)
451 self.baseui = baseui
433 self.baseui = baseui
452 self.ui = baseui.copy()
434 self.ui = baseui.copy()
453 self.ui.copy = baseui.copy # prevent copying repo configuration
435 self.ui.copy = baseui.copy # prevent copying repo configuration
454 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
436 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
455 if (self.ui.configbool('devel', 'all-warnings') or
437 if (self.ui.configbool('devel', 'all-warnings') or
456 self.ui.configbool('devel', 'check-locks')):
438 self.ui.configbool('devel', 'check-locks')):
457 self.vfs.audit = self._getvfsward(self.vfs.audit)
439 self.vfs.audit = self._getvfsward(self.vfs.audit)
458 # A list of callback to shape the phase if no data were found.
440 # A list of callback to shape the phase if no data were found.
459 # Callback are in the form: func(repo, roots) --> processed root.
441 # Callback are in the form: func(repo, roots) --> processed root.
460 # This list it to be filled by extension during repo setup
442 # This list it to be filled by extension during repo setup
461 self._phasedefaults = []
443 self._phasedefaults = []
462 try:
444 try:
463 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
445 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
464 self._loadextensions()
446 self._loadextensions()
465 except IOError:
447 except IOError:
466 pass
448 pass
467
449
468 if featuresetupfuncs:
450 if featuresetupfuncs:
469 self.supported = set(self._basesupported) # use private copy
451 self.supported = set(self._basesupported) # use private copy
470 extmods = set(m.__name__ for n, m
452 extmods = set(m.__name__ for n, m
471 in extensions.extensions(self.ui))
453 in extensions.extensions(self.ui))
472 for setupfunc in featuresetupfuncs:
454 for setupfunc in featuresetupfuncs:
473 if setupfunc.__module__ in extmods:
455 if setupfunc.__module__ in extmods:
474 setupfunc(self.ui, self.supported)
456 setupfunc(self.ui, self.supported)
475 else:
457 else:
476 self.supported = self._basesupported
458 self.supported = self._basesupported
477 color.setup(self.ui)
459 color.setup(self.ui)
478
460
479 # Add compression engines.
461 # Add compression engines.
480 for name in util.compengines:
462 for name in util.compengines:
481 engine = util.compengines[name]
463 engine = util.compengines[name]
482 if engine.revlogheader():
464 if engine.revlogheader():
483 self.supported.add('exp-compression-%s' % name)
465 self.supported.add('exp-compression-%s' % name)
484
466
485 if not self.vfs.isdir():
467 if not self.vfs.isdir():
486 if create:
468 if create:
487 self.requirements = newreporequirements(self)
469 self.requirements = newreporequirements(self)
488
470
489 if not self.wvfs.exists():
471 if not self.wvfs.exists():
490 self.wvfs.makedirs()
472 self.wvfs.makedirs()
491 self.vfs.makedir(notindexed=True)
473 self.vfs.makedir(notindexed=True)
492
474
493 if 'store' in self.requirements:
475 if 'store' in self.requirements:
494 self.vfs.mkdir("store")
476 self.vfs.mkdir("store")
495
477
496 # create an invalid changelog
478 # create an invalid changelog
497 self.vfs.append(
479 self.vfs.append(
498 "00changelog.i",
480 "00changelog.i",
499 '\0\0\0\2' # represents revlogv2
481 '\0\0\0\2' # represents revlogv2
500 ' dummy changelog to prevent using the old repo layout'
482 ' dummy changelog to prevent using the old repo layout'
501 )
483 )
502 else:
484 else:
503 raise error.RepoError(_("repository %s not found") % path)
485 raise error.RepoError(_("repository %s not found") % path)
504 elif create:
486 elif create:
505 raise error.RepoError(_("repository %s already exists") % path)
487 raise error.RepoError(_("repository %s already exists") % path)
506 else:
488 else:
507 try:
489 try:
508 self.requirements = scmutil.readrequires(
490 self.requirements = scmutil.readrequires(
509 self.vfs, self.supported)
491 self.vfs, self.supported)
510 except IOError as inst:
492 except IOError as inst:
511 if inst.errno != errno.ENOENT:
493 if inst.errno != errno.ENOENT:
512 raise
494 raise
513
495
514 cachepath = self.vfs.join('cache')
496 cachepath = self.vfs.join('cache')
515 self.sharedpath = self.path
497 self.sharedpath = self.path
516 try:
498 try:
517 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
499 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
518 if 'relshared' in self.requirements:
500 if 'relshared' in self.requirements:
519 sharedpath = self.vfs.join(sharedpath)
501 sharedpath = self.vfs.join(sharedpath)
520 vfs = vfsmod.vfs(sharedpath, realpath=True)
502 vfs = vfsmod.vfs(sharedpath, realpath=True)
521 cachepath = vfs.join('cache')
503 cachepath = vfs.join('cache')
522 s = vfs.base
504 s = vfs.base
523 if not vfs.exists():
505 if not vfs.exists():
524 raise error.RepoError(
506 raise error.RepoError(
525 _('.hg/sharedpath points to nonexistent directory %s') % s)
507 _('.hg/sharedpath points to nonexistent directory %s') % s)
526 self.sharedpath = s
508 self.sharedpath = s
527 except IOError as inst:
509 except IOError as inst:
528 if inst.errno != errno.ENOENT:
510 if inst.errno != errno.ENOENT:
529 raise
511 raise
530
512
531 if 'exp-sparse' in self.requirements and not sparse.enabled:
513 if 'exp-sparse' in self.requirements and not sparse.enabled:
532 raise error.RepoError(_('repository is using sparse feature but '
514 raise error.RepoError(_('repository is using sparse feature but '
533 'sparse is not enabled; enable the '
515 'sparse is not enabled; enable the '
534 '"sparse" extensions to access'))
516 '"sparse" extensions to access'))
535
517
536 self.store = store.store(
518 self.store = store.store(
537 self.requirements, self.sharedpath,
519 self.requirements, self.sharedpath,
538 lambda base: vfsmod.vfs(base, cacheaudited=True))
520 lambda base: vfsmod.vfs(base, cacheaudited=True))
539 self.spath = self.store.path
521 self.spath = self.store.path
540 self.svfs = self.store.vfs
522 self.svfs = self.store.vfs
541 self.sjoin = self.store.join
523 self.sjoin = self.store.join
542 self.vfs.createmode = self.store.createmode
524 self.vfs.createmode = self.store.createmode
543 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
525 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
544 self.cachevfs.createmode = self.store.createmode
526 self.cachevfs.createmode = self.store.createmode
545 if (self.ui.configbool('devel', 'all-warnings') or
527 if (self.ui.configbool('devel', 'all-warnings') or
546 self.ui.configbool('devel', 'check-locks')):
528 self.ui.configbool('devel', 'check-locks')):
547 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
529 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
548 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
530 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
549 else: # standard vfs
531 else: # standard vfs
550 self.svfs.audit = self._getsvfsward(self.svfs.audit)
532 self.svfs.audit = self._getsvfsward(self.svfs.audit)
551 self._applyopenerreqs()
533 self._applyopenerreqs()
552 if create:
534 if create:
553 self._writerequirements()
535 self._writerequirements()
554
536
555 self._dirstatevalidatewarned = False
537 self._dirstatevalidatewarned = False
556
538
557 self._branchcaches = {}
539 self._branchcaches = {}
558 self._revbranchcache = None
540 self._revbranchcache = None
559 self._filterpats = {}
541 self._filterpats = {}
560 self._datafilters = {}
542 self._datafilters = {}
561 self._transref = self._lockref = self._wlockref = None
543 self._transref = self._lockref = self._wlockref = None
562
544
563 # A cache for various files under .hg/ that tracks file changes,
545 # A cache for various files under .hg/ that tracks file changes,
564 # (used by the filecache decorator)
546 # (used by the filecache decorator)
565 #
547 #
566 # Maps a property name to its util.filecacheentry
548 # Maps a property name to its util.filecacheentry
567 self._filecache = {}
549 self._filecache = {}
568
550
569 # hold sets of revision to be filtered
551 # hold sets of revision to be filtered
570 # should be cleared when something might have changed the filter value:
552 # should be cleared when something might have changed the filter value:
571 # - new changesets,
553 # - new changesets,
572 # - phase change,
554 # - phase change,
573 # - new obsolescence marker,
555 # - new obsolescence marker,
574 # - working directory parent change,
556 # - working directory parent change,
575 # - bookmark changes
557 # - bookmark changes
576 self.filteredrevcache = {}
558 self.filteredrevcache = {}
577
559
578 # post-dirstate-status hooks
560 # post-dirstate-status hooks
579 self._postdsstatus = []
561 self._postdsstatus = []
580
562
581 # generic mapping between names and nodes
563 # generic mapping between names and nodes
582 self.names = namespaces.namespaces()
564 self.names = namespaces.namespaces()
583
565
584 # Key to signature value.
566 # Key to signature value.
585 self._sparsesignaturecache = {}
567 self._sparsesignaturecache = {}
586 # Signature to cached matcher instance.
568 # Signature to cached matcher instance.
587 self._sparsematchercache = {}
569 self._sparsematchercache = {}
588
570
589 def _getvfsward(self, origfunc):
571 def _getvfsward(self, origfunc):
590 """build a ward for self.vfs"""
572 """build a ward for self.vfs"""
591 rref = weakref.ref(self)
573 rref = weakref.ref(self)
592 def checkvfs(path, mode=None):
574 def checkvfs(path, mode=None):
593 ret = origfunc(path, mode=mode)
575 ret = origfunc(path, mode=mode)
594 repo = rref()
576 repo = rref()
595 if (repo is None
577 if (repo is None
596 or not util.safehasattr(repo, '_wlockref')
578 or not util.safehasattr(repo, '_wlockref')
597 or not util.safehasattr(repo, '_lockref')):
579 or not util.safehasattr(repo, '_lockref')):
598 return
580 return
599 if mode in (None, 'r', 'rb'):
581 if mode in (None, 'r', 'rb'):
600 return
582 return
601 if path.startswith(repo.path):
583 if path.startswith(repo.path):
602 # truncate name relative to the repository (.hg)
584 # truncate name relative to the repository (.hg)
603 path = path[len(repo.path) + 1:]
585 path = path[len(repo.path) + 1:]
604 if path.startswith('cache/'):
586 if path.startswith('cache/'):
605 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
587 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
606 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
588 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
607 if path.startswith('journal.'):
589 if path.startswith('journal.'):
608 # journal is covered by 'lock'
590 # journal is covered by 'lock'
609 if repo._currentlock(repo._lockref) is None:
591 if repo._currentlock(repo._lockref) is None:
610 repo.ui.develwarn('write with no lock: "%s"' % path,
592 repo.ui.develwarn('write with no lock: "%s"' % path,
611 stacklevel=2, config='check-locks')
593 stacklevel=2, config='check-locks')
612 elif repo._currentlock(repo._wlockref) is None:
594 elif repo._currentlock(repo._wlockref) is None:
613 # rest of vfs files are covered by 'wlock'
595 # rest of vfs files are covered by 'wlock'
614 #
596 #
615 # exclude special files
597 # exclude special files
616 for prefix in self._wlockfreeprefix:
598 for prefix in self._wlockfreeprefix:
617 if path.startswith(prefix):
599 if path.startswith(prefix):
618 return
600 return
619 repo.ui.develwarn('write with no wlock: "%s"' % path,
601 repo.ui.develwarn('write with no wlock: "%s"' % path,
620 stacklevel=2, config='check-locks')
602 stacklevel=2, config='check-locks')
621 return ret
603 return ret
622 return checkvfs
604 return checkvfs
623
605
624 def _getsvfsward(self, origfunc):
606 def _getsvfsward(self, origfunc):
625 """build a ward for self.svfs"""
607 """build a ward for self.svfs"""
626 rref = weakref.ref(self)
608 rref = weakref.ref(self)
627 def checksvfs(path, mode=None):
609 def checksvfs(path, mode=None):
628 ret = origfunc(path, mode=mode)
610 ret = origfunc(path, mode=mode)
629 repo = rref()
611 repo = rref()
630 if repo is None or not util.safehasattr(repo, '_lockref'):
612 if repo is None or not util.safehasattr(repo, '_lockref'):
631 return
613 return
632 if mode in (None, 'r', 'rb'):
614 if mode in (None, 'r', 'rb'):
633 return
615 return
634 if path.startswith(repo.sharedpath):
616 if path.startswith(repo.sharedpath):
635 # truncate name relative to the repository (.hg)
617 # truncate name relative to the repository (.hg)
636 path = path[len(repo.sharedpath) + 1:]
618 path = path[len(repo.sharedpath) + 1:]
637 if repo._currentlock(repo._lockref) is None:
619 if repo._currentlock(repo._lockref) is None:
638 repo.ui.develwarn('write with no lock: "%s"' % path,
620 repo.ui.develwarn('write with no lock: "%s"' % path,
639 stacklevel=3)
621 stacklevel=3)
640 return ret
622 return ret
641 return checksvfs
623 return checksvfs
642
624
643 def close(self):
625 def close(self):
644 self._writecaches()
626 self._writecaches()
645
627
646 def _loadextensions(self):
628 def _loadextensions(self):
647 extensions.loadall(self.ui)
629 extensions.loadall(self.ui)
648
630
649 def _writecaches(self):
631 def _writecaches(self):
650 if self._revbranchcache:
632 if self._revbranchcache:
651 self._revbranchcache.write()
633 self._revbranchcache.write()
652
634
653 def _restrictcapabilities(self, caps):
635 def _restrictcapabilities(self, caps):
654 if self.ui.configbool('experimental', 'bundle2-advertise'):
636 if self.ui.configbool('experimental', 'bundle2-advertise'):
655 caps = set(caps)
637 caps = set(caps)
656 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
638 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
657 role='client'))
639 role='client'))
658 caps.add('bundle2=' + urlreq.quote(capsblob))
640 caps.add('bundle2=' + urlreq.quote(capsblob))
659 return caps
641 return caps
660
642
661 def _applyopenerreqs(self):
643 def _applyopenerreqs(self):
662 self.svfs.options = dict((r, 1) for r in self.requirements
644 self.svfs.options = dict((r, 1) for r in self.requirements
663 if r in self.openerreqs)
645 if r in self.openerreqs)
664 # experimental config: format.chunkcachesize
646 # experimental config: format.chunkcachesize
665 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
647 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
666 if chunkcachesize is not None:
648 if chunkcachesize is not None:
667 self.svfs.options['chunkcachesize'] = chunkcachesize
649 self.svfs.options['chunkcachesize'] = chunkcachesize
668 # experimental config: format.maxchainlen
650 # experimental config: format.maxchainlen
669 maxchainlen = self.ui.configint('format', 'maxchainlen')
651 maxchainlen = self.ui.configint('format', 'maxchainlen')
670 if maxchainlen is not None:
652 if maxchainlen is not None:
671 self.svfs.options['maxchainlen'] = maxchainlen
653 self.svfs.options['maxchainlen'] = maxchainlen
672 # experimental config: format.manifestcachesize
654 # experimental config: format.manifestcachesize
673 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
655 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
674 if manifestcachesize is not None:
656 if manifestcachesize is not None:
675 self.svfs.options['manifestcachesize'] = manifestcachesize
657 self.svfs.options['manifestcachesize'] = manifestcachesize
676 # experimental config: format.aggressivemergedeltas
658 # experimental config: format.aggressivemergedeltas
677 aggressivemergedeltas = self.ui.configbool('format',
659 aggressivemergedeltas = self.ui.configbool('format',
678 'aggressivemergedeltas')
660 'aggressivemergedeltas')
679 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
661 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
680 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
662 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
681 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
663 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
682 if 0 <= chainspan:
664 if 0 <= chainspan:
683 self.svfs.options['maxdeltachainspan'] = chainspan
665 self.svfs.options['maxdeltachainspan'] = chainspan
684 mmapindexthreshold = self.ui.configbytes('experimental',
666 mmapindexthreshold = self.ui.configbytes('experimental',
685 'mmapindexthreshold')
667 'mmapindexthreshold')
686 if mmapindexthreshold is not None:
668 if mmapindexthreshold is not None:
687 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
669 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
688 withsparseread = self.ui.configbool('experimental', 'sparse-read')
670 withsparseread = self.ui.configbool('experimental', 'sparse-read')
689 srdensitythres = float(self.ui.config('experimental',
671 srdensitythres = float(self.ui.config('experimental',
690 'sparse-read.density-threshold'))
672 'sparse-read.density-threshold'))
691 srmingapsize = self.ui.configbytes('experimental',
673 srmingapsize = self.ui.configbytes('experimental',
692 'sparse-read.min-gap-size')
674 'sparse-read.min-gap-size')
693 self.svfs.options['with-sparse-read'] = withsparseread
675 self.svfs.options['with-sparse-read'] = withsparseread
694 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
676 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
695 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
677 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
696
678
697 for r in self.requirements:
679 for r in self.requirements:
698 if r.startswith('exp-compression-'):
680 if r.startswith('exp-compression-'):
699 self.svfs.options['compengine'] = r[len('exp-compression-'):]
681 self.svfs.options['compengine'] = r[len('exp-compression-'):]
700
682
701 # TODO move "revlogv2" to openerreqs once finalized.
683 # TODO move "revlogv2" to openerreqs once finalized.
702 if REVLOGV2_REQUIREMENT in self.requirements:
684 if REVLOGV2_REQUIREMENT in self.requirements:
703 self.svfs.options['revlogv2'] = True
685 self.svfs.options['revlogv2'] = True
704
686
705 def _writerequirements(self):
687 def _writerequirements(self):
706 scmutil.writerequires(self.vfs, self.requirements)
688 scmutil.writerequires(self.vfs, self.requirements)
707
689
708 def _checknested(self, path):
690 def _checknested(self, path):
709 """Determine if path is a legal nested repository."""
691 """Determine if path is a legal nested repository."""
710 if not path.startswith(self.root):
692 if not path.startswith(self.root):
711 return False
693 return False
712 subpath = path[len(self.root) + 1:]
694 subpath = path[len(self.root) + 1:]
713 normsubpath = util.pconvert(subpath)
695 normsubpath = util.pconvert(subpath)
714
696
715 # XXX: Checking against the current working copy is wrong in
697 # XXX: Checking against the current working copy is wrong in
716 # the sense that it can reject things like
698 # the sense that it can reject things like
717 #
699 #
718 # $ hg cat -r 10 sub/x.txt
700 # $ hg cat -r 10 sub/x.txt
719 #
701 #
720 # if sub/ is no longer a subrepository in the working copy
702 # if sub/ is no longer a subrepository in the working copy
721 # parent revision.
703 # parent revision.
722 #
704 #
723 # However, it can of course also allow things that would have
705 # However, it can of course also allow things that would have
724 # been rejected before, such as the above cat command if sub/
706 # been rejected before, such as the above cat command if sub/
725 # is a subrepository now, but was a normal directory before.
707 # is a subrepository now, but was a normal directory before.
726 # The old path auditor would have rejected by mistake since it
708 # The old path auditor would have rejected by mistake since it
727 # panics when it sees sub/.hg/.
709 # panics when it sees sub/.hg/.
728 #
710 #
729 # All in all, checking against the working copy seems sensible
711 # All in all, checking against the working copy seems sensible
730 # since we want to prevent access to nested repositories on
712 # since we want to prevent access to nested repositories on
731 # the filesystem *now*.
713 # the filesystem *now*.
732 ctx = self[None]
714 ctx = self[None]
733 parts = util.splitpath(subpath)
715 parts = util.splitpath(subpath)
734 while parts:
716 while parts:
735 prefix = '/'.join(parts)
717 prefix = '/'.join(parts)
736 if prefix in ctx.substate:
718 if prefix in ctx.substate:
737 if prefix == normsubpath:
719 if prefix == normsubpath:
738 return True
720 return True
739 else:
721 else:
740 sub = ctx.sub(prefix)
722 sub = ctx.sub(prefix)
741 return sub.checknested(subpath[len(prefix) + 1:])
723 return sub.checknested(subpath[len(prefix) + 1:])
742 else:
724 else:
743 parts.pop()
725 parts.pop()
744 return False
726 return False
745
727
746 def peer(self):
728 def peer(self):
747 return localpeer(self) # not cached to avoid reference cycle
729 return localpeer(self) # not cached to avoid reference cycle
748
730
749 def unfiltered(self):
731 def unfiltered(self):
750 """Return unfiltered version of the repository
732 """Return unfiltered version of the repository
751
733
752 Intended to be overwritten by filtered repo."""
734 Intended to be overwritten by filtered repo."""
753 return self
735 return self
754
736
755 def filtered(self, name, visibilityexceptions=None):
737 def filtered(self, name, visibilityexceptions=None):
756 """Return a filtered version of a repository"""
738 """Return a filtered version of a repository"""
757 cls = repoview.newtype(self.unfiltered().__class__)
739 cls = repoview.newtype(self.unfiltered().__class__)
758 return cls(self, name, visibilityexceptions)
740 return cls(self, name, visibilityexceptions)
759
741
760 @repofilecache('bookmarks', 'bookmarks.current')
742 @repofilecache('bookmarks', 'bookmarks.current')
761 def _bookmarks(self):
743 def _bookmarks(self):
762 return bookmarks.bmstore(self)
744 return bookmarks.bmstore(self)
763
745
764 @property
746 @property
765 def _activebookmark(self):
747 def _activebookmark(self):
766 return self._bookmarks.active
748 return self._bookmarks.active
767
749
768 # _phasesets depend on changelog. what we need is to call
750 # _phasesets depend on changelog. what we need is to call
769 # _phasecache.invalidate() if '00changelog.i' was changed, but it
751 # _phasecache.invalidate() if '00changelog.i' was changed, but it
770 # can't be easily expressed in filecache mechanism.
752 # can't be easily expressed in filecache mechanism.
771 @storecache('phaseroots', '00changelog.i')
753 @storecache('phaseroots', '00changelog.i')
772 def _phasecache(self):
754 def _phasecache(self):
773 return phases.phasecache(self, self._phasedefaults)
755 return phases.phasecache(self, self._phasedefaults)
774
756
775 @storecache('obsstore')
757 @storecache('obsstore')
776 def obsstore(self):
758 def obsstore(self):
777 return obsolete.makestore(self.ui, self)
759 return obsolete.makestore(self.ui, self)
778
760
779 @storecache('00changelog.i')
761 @storecache('00changelog.i')
780 def changelog(self):
762 def changelog(self):
781 return changelog.changelog(self.svfs,
763 return changelog.changelog(self.svfs,
782 trypending=txnutil.mayhavepending(self.root))
764 trypending=txnutil.mayhavepending(self.root))
783
765
784 def _constructmanifest(self):
766 def _constructmanifest(self):
785 # This is a temporary function while we migrate from manifest to
767 # This is a temporary function while we migrate from manifest to
786 # manifestlog. It allows bundlerepo and unionrepo to intercept the
768 # manifestlog. It allows bundlerepo and unionrepo to intercept the
787 # manifest creation.
769 # manifest creation.
788 return manifest.manifestrevlog(self.svfs)
770 return manifest.manifestrevlog(self.svfs)
789
771
790 @storecache('00manifest.i')
772 @storecache('00manifest.i')
791 def manifestlog(self):
773 def manifestlog(self):
792 return manifest.manifestlog(self.svfs, self)
774 return manifest.manifestlog(self.svfs, self)
793
775
794 @repofilecache('dirstate')
776 @repofilecache('dirstate')
795 def dirstate(self):
777 def dirstate(self):
796 sparsematchfn = lambda: sparse.matcher(self)
778 sparsematchfn = lambda: sparse.matcher(self)
797
779
798 return dirstate.dirstate(self.vfs, self.ui, self.root,
780 return dirstate.dirstate(self.vfs, self.ui, self.root,
799 self._dirstatevalidate, sparsematchfn)
781 self._dirstatevalidate, sparsematchfn)
800
782
801 def _dirstatevalidate(self, node):
783 def _dirstatevalidate(self, node):
802 try:
784 try:
803 self.changelog.rev(node)
785 self.changelog.rev(node)
804 return node
786 return node
805 except error.LookupError:
787 except error.LookupError:
806 if not self._dirstatevalidatewarned:
788 if not self._dirstatevalidatewarned:
807 self._dirstatevalidatewarned = True
789 self._dirstatevalidatewarned = True
808 self.ui.warn(_("warning: ignoring unknown"
790 self.ui.warn(_("warning: ignoring unknown"
809 " working parent %s!\n") % short(node))
791 " working parent %s!\n") % short(node))
810 return nullid
792 return nullid
811
793
812 @repofilecache(narrowspec.FILENAME)
794 @repofilecache(narrowspec.FILENAME)
813 def narrowpats(self):
795 def narrowpats(self):
814 """matcher patterns for this repository's narrowspec
796 """matcher patterns for this repository's narrowspec
815
797
816 A tuple of (includes, excludes).
798 A tuple of (includes, excludes).
817 """
799 """
818 source = self
800 source = self
819 if self.shared():
801 if self.shared():
820 from . import hg
802 from . import hg
821 source = hg.sharedreposource(self)
803 source = hg.sharedreposource(self)
822 return narrowspec.load(source)
804 return narrowspec.load(source)
823
805
824 @repofilecache(narrowspec.FILENAME)
806 @repofilecache(narrowspec.FILENAME)
825 def _narrowmatch(self):
807 def _narrowmatch(self):
826 if changegroup.NARROW_REQUIREMENT not in self.requirements:
808 if changegroup.NARROW_REQUIREMENT not in self.requirements:
827 return matchmod.always(self.root, '')
809 return matchmod.always(self.root, '')
828 include, exclude = self.narrowpats
810 include, exclude = self.narrowpats
829 return narrowspec.match(self.root, include=include, exclude=exclude)
811 return narrowspec.match(self.root, include=include, exclude=exclude)
830
812
831 # TODO(martinvonz): make this property-like instead?
813 # TODO(martinvonz): make this property-like instead?
832 def narrowmatch(self):
814 def narrowmatch(self):
833 return self._narrowmatch
815 return self._narrowmatch
834
816
835 def setnarrowpats(self, newincludes, newexcludes):
817 def setnarrowpats(self, newincludes, newexcludes):
836 target = self
818 target = self
837 if self.shared():
819 if self.shared():
838 from . import hg
820 from . import hg
839 target = hg.sharedreposource(self)
821 target = hg.sharedreposource(self)
840 narrowspec.save(target, newincludes, newexcludes)
822 narrowspec.save(target, newincludes, newexcludes)
841 self.invalidate(clearfilecache=True)
823 self.invalidate(clearfilecache=True)
842
824
843 def __getitem__(self, changeid):
825 def __getitem__(self, changeid):
844 if changeid is None:
826 if changeid is None:
845 return context.workingctx(self)
827 return context.workingctx(self)
846 if isinstance(changeid, context.basectx):
828 if isinstance(changeid, context.basectx):
847 return changeid
829 return changeid
848 if isinstance(changeid, slice):
830 if isinstance(changeid, slice):
849 # wdirrev isn't contiguous so the slice shouldn't include it
831 # wdirrev isn't contiguous so the slice shouldn't include it
850 return [context.changectx(self, i)
832 return [context.changectx(self, i)
851 for i in xrange(*changeid.indices(len(self)))
833 for i in xrange(*changeid.indices(len(self)))
852 if i not in self.changelog.filteredrevs]
834 if i not in self.changelog.filteredrevs]
853 try:
835 try:
854 return context.changectx(self, changeid)
836 return context.changectx(self, changeid)
855 except error.WdirUnsupported:
837 except error.WdirUnsupported:
856 return context.workingctx(self)
838 return context.workingctx(self)
857
839
858 def __contains__(self, changeid):
840 def __contains__(self, changeid):
859 """True if the given changeid exists
841 """True if the given changeid exists
860
842
861 error.LookupError is raised if an ambiguous node specified.
843 error.LookupError is raised if an ambiguous node specified.
862 """
844 """
863 try:
845 try:
864 self[changeid]
846 self[changeid]
865 return True
847 return True
866 except (error.RepoLookupError, error.FilteredIndexError,
848 except (error.RepoLookupError, error.FilteredIndexError,
867 error.FilteredLookupError):
849 error.FilteredLookupError):
868 return False
850 return False
869
851
870 def __nonzero__(self):
852 def __nonzero__(self):
871 return True
853 return True
872
854
873 __bool__ = __nonzero__
855 __bool__ = __nonzero__
874
856
875 def __len__(self):
857 def __len__(self):
876 # no need to pay the cost of repoview.changelog
858 # no need to pay the cost of repoview.changelog
877 unfi = self.unfiltered()
859 unfi = self.unfiltered()
878 return len(unfi.changelog)
860 return len(unfi.changelog)
879
861
880 def __iter__(self):
862 def __iter__(self):
881 return iter(self.changelog)
863 return iter(self.changelog)
882
864
883 def revs(self, expr, *args):
865 def revs(self, expr, *args):
884 '''Find revisions matching a revset.
866 '''Find revisions matching a revset.
885
867
886 The revset is specified as a string ``expr`` that may contain
868 The revset is specified as a string ``expr`` that may contain
887 %-formatting to escape certain types. See ``revsetlang.formatspec``.
869 %-formatting to escape certain types. See ``revsetlang.formatspec``.
888
870
889 Revset aliases from the configuration are not expanded. To expand
871 Revset aliases from the configuration are not expanded. To expand
890 user aliases, consider calling ``scmutil.revrange()`` or
872 user aliases, consider calling ``scmutil.revrange()`` or
891 ``repo.anyrevs([expr], user=True)``.
873 ``repo.anyrevs([expr], user=True)``.
892
874
893 Returns a revset.abstractsmartset, which is a list-like interface
875 Returns a revset.abstractsmartset, which is a list-like interface
894 that contains integer revisions.
876 that contains integer revisions.
895 '''
877 '''
896 expr = revsetlang.formatspec(expr, *args)
878 expr = revsetlang.formatspec(expr, *args)
897 m = revset.match(None, expr)
879 m = revset.match(None, expr)
898 return m(self)
880 return m(self)
899
881
900 def set(self, expr, *args):
882 def set(self, expr, *args):
901 '''Find revisions matching a revset and emit changectx instances.
883 '''Find revisions matching a revset and emit changectx instances.
902
884
903 This is a convenience wrapper around ``revs()`` that iterates the
885 This is a convenience wrapper around ``revs()`` that iterates the
904 result and is a generator of changectx instances.
886 result and is a generator of changectx instances.
905
887
906 Revset aliases from the configuration are not expanded. To expand
888 Revset aliases from the configuration are not expanded. To expand
907 user aliases, consider calling ``scmutil.revrange()``.
889 user aliases, consider calling ``scmutil.revrange()``.
908 '''
890 '''
909 for r in self.revs(expr, *args):
891 for r in self.revs(expr, *args):
910 yield self[r]
892 yield self[r]
911
893
912 def anyrevs(self, specs, user=False, localalias=None):
894 def anyrevs(self, specs, user=False, localalias=None):
913 '''Find revisions matching one of the given revsets.
895 '''Find revisions matching one of the given revsets.
914
896
915 Revset aliases from the configuration are not expanded by default. To
897 Revset aliases from the configuration are not expanded by default. To
916 expand user aliases, specify ``user=True``. To provide some local
898 expand user aliases, specify ``user=True``. To provide some local
917 definitions overriding user aliases, set ``localalias`` to
899 definitions overriding user aliases, set ``localalias`` to
918 ``{name: definitionstring}``.
900 ``{name: definitionstring}``.
919 '''
901 '''
920 if user:
902 if user:
921 m = revset.matchany(self.ui, specs, repo=self,
903 m = revset.matchany(self.ui, specs, repo=self,
922 localalias=localalias)
904 localalias=localalias)
923 else:
905 else:
924 m = revset.matchany(None, specs, localalias=localalias)
906 m = revset.matchany(None, specs, localalias=localalias)
925 return m(self)
907 return m(self)
926
908
927 def url(self):
909 def url(self):
928 return 'file:' + self.root
910 return 'file:' + self.root
929
911
930 def hook(self, name, throw=False, **args):
912 def hook(self, name, throw=False, **args):
931 """Call a hook, passing this repo instance.
913 """Call a hook, passing this repo instance.
932
914
933 This a convenience method to aid invoking hooks. Extensions likely
915 This a convenience method to aid invoking hooks. Extensions likely
934 won't call this unless they have registered a custom hook or are
916 won't call this unless they have registered a custom hook or are
935 replacing code that is expected to call a hook.
917 replacing code that is expected to call a hook.
936 """
918 """
937 return hook.hook(self.ui, self, name, throw, **args)
919 return hook.hook(self.ui, self, name, throw, **args)
938
920
939 @filteredpropertycache
921 @filteredpropertycache
940 def _tagscache(self):
922 def _tagscache(self):
941 '''Returns a tagscache object that contains various tags related
923 '''Returns a tagscache object that contains various tags related
942 caches.'''
924 caches.'''
943
925
944 # This simplifies its cache management by having one decorated
926 # This simplifies its cache management by having one decorated
945 # function (this one) and the rest simply fetch things from it.
927 # function (this one) and the rest simply fetch things from it.
946 class tagscache(object):
928 class tagscache(object):
947 def __init__(self):
929 def __init__(self):
948 # These two define the set of tags for this repository. tags
930 # These two define the set of tags for this repository. tags
949 # maps tag name to node; tagtypes maps tag name to 'global' or
931 # maps tag name to node; tagtypes maps tag name to 'global' or
950 # 'local'. (Global tags are defined by .hgtags across all
932 # 'local'. (Global tags are defined by .hgtags across all
951 # heads, and local tags are defined in .hg/localtags.)
933 # heads, and local tags are defined in .hg/localtags.)
952 # They constitute the in-memory cache of tags.
934 # They constitute the in-memory cache of tags.
953 self.tags = self.tagtypes = None
935 self.tags = self.tagtypes = None
954
936
955 self.nodetagscache = self.tagslist = None
937 self.nodetagscache = self.tagslist = None
956
938
957 cache = tagscache()
939 cache = tagscache()
958 cache.tags, cache.tagtypes = self._findtags()
940 cache.tags, cache.tagtypes = self._findtags()
959
941
960 return cache
942 return cache
961
943
962 def tags(self):
944 def tags(self):
963 '''return a mapping of tag to node'''
945 '''return a mapping of tag to node'''
964 t = {}
946 t = {}
965 if self.changelog.filteredrevs:
947 if self.changelog.filteredrevs:
966 tags, tt = self._findtags()
948 tags, tt = self._findtags()
967 else:
949 else:
968 tags = self._tagscache.tags
950 tags = self._tagscache.tags
969 for k, v in tags.iteritems():
951 for k, v in tags.iteritems():
970 try:
952 try:
971 # ignore tags to unknown nodes
953 # ignore tags to unknown nodes
972 self.changelog.rev(v)
954 self.changelog.rev(v)
973 t[k] = v
955 t[k] = v
974 except (error.LookupError, ValueError):
956 except (error.LookupError, ValueError):
975 pass
957 pass
976 return t
958 return t
977
959
978 def _findtags(self):
960 def _findtags(self):
979 '''Do the hard work of finding tags. Return a pair of dicts
961 '''Do the hard work of finding tags. Return a pair of dicts
980 (tags, tagtypes) where tags maps tag name to node, and tagtypes
962 (tags, tagtypes) where tags maps tag name to node, and tagtypes
981 maps tag name to a string like \'global\' or \'local\'.
963 maps tag name to a string like \'global\' or \'local\'.
982 Subclasses or extensions are free to add their own tags, but
964 Subclasses or extensions are free to add their own tags, but
983 should be aware that the returned dicts will be retained for the
965 should be aware that the returned dicts will be retained for the
984 duration of the localrepo object.'''
966 duration of the localrepo object.'''
985
967
986 # XXX what tagtype should subclasses/extensions use? Currently
968 # XXX what tagtype should subclasses/extensions use? Currently
987 # mq and bookmarks add tags, but do not set the tagtype at all.
969 # mq and bookmarks add tags, but do not set the tagtype at all.
988 # Should each extension invent its own tag type? Should there
970 # Should each extension invent its own tag type? Should there
989 # be one tagtype for all such "virtual" tags? Or is the status
971 # be one tagtype for all such "virtual" tags? Or is the status
990 # quo fine?
972 # quo fine?
991
973
992
974
993 # map tag name to (node, hist)
975 # map tag name to (node, hist)
994 alltags = tagsmod.findglobaltags(self.ui, self)
976 alltags = tagsmod.findglobaltags(self.ui, self)
995 # map tag name to tag type
977 # map tag name to tag type
996 tagtypes = dict((tag, 'global') for tag in alltags)
978 tagtypes = dict((tag, 'global') for tag in alltags)
997
979
998 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
980 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
999
981
1000 # Build the return dicts. Have to re-encode tag names because
982 # Build the return dicts. Have to re-encode tag names because
1001 # the tags module always uses UTF-8 (in order not to lose info
983 # the tags module always uses UTF-8 (in order not to lose info
1002 # writing to the cache), but the rest of Mercurial wants them in
984 # writing to the cache), but the rest of Mercurial wants them in
1003 # local encoding.
985 # local encoding.
1004 tags = {}
986 tags = {}
1005 for (name, (node, hist)) in alltags.iteritems():
987 for (name, (node, hist)) in alltags.iteritems():
1006 if node != nullid:
988 if node != nullid:
1007 tags[encoding.tolocal(name)] = node
989 tags[encoding.tolocal(name)] = node
1008 tags['tip'] = self.changelog.tip()
990 tags['tip'] = self.changelog.tip()
1009 tagtypes = dict([(encoding.tolocal(name), value)
991 tagtypes = dict([(encoding.tolocal(name), value)
1010 for (name, value) in tagtypes.iteritems()])
992 for (name, value) in tagtypes.iteritems()])
1011 return (tags, tagtypes)
993 return (tags, tagtypes)
1012
994
1013 def tagtype(self, tagname):
995 def tagtype(self, tagname):
1014 '''
996 '''
1015 return the type of the given tag. result can be:
997 return the type of the given tag. result can be:
1016
998
1017 'local' : a local tag
999 'local' : a local tag
1018 'global' : a global tag
1000 'global' : a global tag
1019 None : tag does not exist
1001 None : tag does not exist
1020 '''
1002 '''
1021
1003
1022 return self._tagscache.tagtypes.get(tagname)
1004 return self._tagscache.tagtypes.get(tagname)
1023
1005
1024 def tagslist(self):
1006 def tagslist(self):
1025 '''return a list of tags ordered by revision'''
1007 '''return a list of tags ordered by revision'''
1026 if not self._tagscache.tagslist:
1008 if not self._tagscache.tagslist:
1027 l = []
1009 l = []
1028 for t, n in self.tags().iteritems():
1010 for t, n in self.tags().iteritems():
1029 l.append((self.changelog.rev(n), t, n))
1011 l.append((self.changelog.rev(n), t, n))
1030 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1012 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1031
1013
1032 return self._tagscache.tagslist
1014 return self._tagscache.tagslist
1033
1015
1034 def nodetags(self, node):
1016 def nodetags(self, node):
1035 '''return the tags associated with a node'''
1017 '''return the tags associated with a node'''
1036 if not self._tagscache.nodetagscache:
1018 if not self._tagscache.nodetagscache:
1037 nodetagscache = {}
1019 nodetagscache = {}
1038 for t, n in self._tagscache.tags.iteritems():
1020 for t, n in self._tagscache.tags.iteritems():
1039 nodetagscache.setdefault(n, []).append(t)
1021 nodetagscache.setdefault(n, []).append(t)
1040 for tags in nodetagscache.itervalues():
1022 for tags in nodetagscache.itervalues():
1041 tags.sort()
1023 tags.sort()
1042 self._tagscache.nodetagscache = nodetagscache
1024 self._tagscache.nodetagscache = nodetagscache
1043 return self._tagscache.nodetagscache.get(node, [])
1025 return self._tagscache.nodetagscache.get(node, [])
1044
1026
1045 def nodebookmarks(self, node):
1027 def nodebookmarks(self, node):
1046 """return the list of bookmarks pointing to the specified node"""
1028 """return the list of bookmarks pointing to the specified node"""
1047 marks = []
1029 marks = []
1048 for bookmark, n in self._bookmarks.iteritems():
1030 for bookmark, n in self._bookmarks.iteritems():
1049 if n == node:
1031 if n == node:
1050 marks.append(bookmark)
1032 marks.append(bookmark)
1051 return sorted(marks)
1033 return sorted(marks)
1052
1034
1053 def branchmap(self):
1035 def branchmap(self):
1054 '''returns a dictionary {branch: [branchheads]} with branchheads
1036 '''returns a dictionary {branch: [branchheads]} with branchheads
1055 ordered by increasing revision number'''
1037 ordered by increasing revision number'''
1056 branchmap.updatecache(self)
1038 branchmap.updatecache(self)
1057 return self._branchcaches[self.filtername]
1039 return self._branchcaches[self.filtername]
1058
1040
1059 @unfilteredmethod
1041 @unfilteredmethod
1060 def revbranchcache(self):
1042 def revbranchcache(self):
1061 if not self._revbranchcache:
1043 if not self._revbranchcache:
1062 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1044 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1063 return self._revbranchcache
1045 return self._revbranchcache
1064
1046
1065 def branchtip(self, branch, ignoremissing=False):
1047 def branchtip(self, branch, ignoremissing=False):
1066 '''return the tip node for a given branch
1048 '''return the tip node for a given branch
1067
1049
1068 If ignoremissing is True, then this method will not raise an error.
1050 If ignoremissing is True, then this method will not raise an error.
1069 This is helpful for callers that only expect None for a missing branch
1051 This is helpful for callers that only expect None for a missing branch
1070 (e.g. namespace).
1052 (e.g. namespace).
1071
1053
1072 '''
1054 '''
1073 try:
1055 try:
1074 return self.branchmap().branchtip(branch)
1056 return self.branchmap().branchtip(branch)
1075 except KeyError:
1057 except KeyError:
1076 if not ignoremissing:
1058 if not ignoremissing:
1077 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1059 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1078 else:
1060 else:
1079 pass
1061 pass
1080
1062
1081 def lookup(self, key):
1063 def lookup(self, key):
1082 return scmutil.revsymbol(self, key).node()
1064 return scmutil.revsymbol(self, key).node()
1083
1065
1084 def lookupbranch(self, key):
1066 def lookupbranch(self, key):
1085 if key in self.branchmap():
1067 if key in self.branchmap():
1086 return key
1068 return key
1087
1069
1088 return scmutil.revsymbol(self, key).branch()
1070 return scmutil.revsymbol(self, key).branch()
1089
1071
1090 def known(self, nodes):
1072 def known(self, nodes):
1091 cl = self.changelog
1073 cl = self.changelog
1092 nm = cl.nodemap
1074 nm = cl.nodemap
1093 filtered = cl.filteredrevs
1075 filtered = cl.filteredrevs
1094 result = []
1076 result = []
1095 for n in nodes:
1077 for n in nodes:
1096 r = nm.get(n)
1078 r = nm.get(n)
1097 resp = not (r is None or r in filtered)
1079 resp = not (r is None or r in filtered)
1098 result.append(resp)
1080 result.append(resp)
1099 return result
1081 return result
1100
1082
1101 def local(self):
1083 def local(self):
1102 return self
1084 return self
1103
1085
1104 def publishing(self):
1086 def publishing(self):
1105 # it's safe (and desirable) to trust the publish flag unconditionally
1087 # it's safe (and desirable) to trust the publish flag unconditionally
1106 # so that we don't finalize changes shared between users via ssh or nfs
1088 # so that we don't finalize changes shared between users via ssh or nfs
1107 return self.ui.configbool('phases', 'publish', untrusted=True)
1089 return self.ui.configbool('phases', 'publish', untrusted=True)
1108
1090
1109 def cancopy(self):
1091 def cancopy(self):
1110 # so statichttprepo's override of local() works
1092 # so statichttprepo's override of local() works
1111 if not self.local():
1093 if not self.local():
1112 return False
1094 return False
1113 if not self.publishing():
1095 if not self.publishing():
1114 return True
1096 return True
1115 # if publishing we can't copy if there is filtered content
1097 # if publishing we can't copy if there is filtered content
1116 return not self.filtered('visible').changelog.filteredrevs
1098 return not self.filtered('visible').changelog.filteredrevs
1117
1099
1118 def shared(self):
1100 def shared(self):
1119 '''the type of shared repository (None if not shared)'''
1101 '''the type of shared repository (None if not shared)'''
1120 if self.sharedpath != self.path:
1102 if self.sharedpath != self.path:
1121 return 'store'
1103 return 'store'
1122 return None
1104 return None
1123
1105
1124 def wjoin(self, f, *insidef):
1106 def wjoin(self, f, *insidef):
1125 return self.vfs.reljoin(self.root, f, *insidef)
1107 return self.vfs.reljoin(self.root, f, *insidef)
1126
1108
1127 def file(self, f):
1109 def file(self, f):
1128 if f[0] == '/':
1110 if f[0] == '/':
1129 f = f[1:]
1111 f = f[1:]
1130 return filelog.filelog(self.svfs, f)
1112 return filelog.filelog(self.svfs, f)
1131
1113
1132 def setparents(self, p1, p2=nullid):
1114 def setparents(self, p1, p2=nullid):
1133 with self.dirstate.parentchange():
1115 with self.dirstate.parentchange():
1134 copies = self.dirstate.setparents(p1, p2)
1116 copies = self.dirstate.setparents(p1, p2)
1135 pctx = self[p1]
1117 pctx = self[p1]
1136 if copies:
1118 if copies:
1137 # Adjust copy records, the dirstate cannot do it, it
1119 # Adjust copy records, the dirstate cannot do it, it
1138 # requires access to parents manifests. Preserve them
1120 # requires access to parents manifests. Preserve them
1139 # only for entries added to first parent.
1121 # only for entries added to first parent.
1140 for f in copies:
1122 for f in copies:
1141 if f not in pctx and copies[f] in pctx:
1123 if f not in pctx and copies[f] in pctx:
1142 self.dirstate.copy(copies[f], f)
1124 self.dirstate.copy(copies[f], f)
1143 if p2 == nullid:
1125 if p2 == nullid:
1144 for f, s in sorted(self.dirstate.copies().items()):
1126 for f, s in sorted(self.dirstate.copies().items()):
1145 if f not in pctx and s not in pctx:
1127 if f not in pctx and s not in pctx:
1146 self.dirstate.copy(None, f)
1128 self.dirstate.copy(None, f)
1147
1129
1148 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1130 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1149 """changeid can be a changeset revision, node, or tag.
1131 """changeid can be a changeset revision, node, or tag.
1150 fileid can be a file revision or node."""
1132 fileid can be a file revision or node."""
1151 return context.filectx(self, path, changeid, fileid,
1133 return context.filectx(self, path, changeid, fileid,
1152 changectx=changectx)
1134 changectx=changectx)
1153
1135
1154 def getcwd(self):
1136 def getcwd(self):
1155 return self.dirstate.getcwd()
1137 return self.dirstate.getcwd()
1156
1138
1157 def pathto(self, f, cwd=None):
1139 def pathto(self, f, cwd=None):
1158 return self.dirstate.pathto(f, cwd)
1140 return self.dirstate.pathto(f, cwd)
1159
1141
1160 def _loadfilter(self, filter):
1142 def _loadfilter(self, filter):
1161 if filter not in self._filterpats:
1143 if filter not in self._filterpats:
1162 l = []
1144 l = []
1163 for pat, cmd in self.ui.configitems(filter):
1145 for pat, cmd in self.ui.configitems(filter):
1164 if cmd == '!':
1146 if cmd == '!':
1165 continue
1147 continue
1166 mf = matchmod.match(self.root, '', [pat])
1148 mf = matchmod.match(self.root, '', [pat])
1167 fn = None
1149 fn = None
1168 params = cmd
1150 params = cmd
1169 for name, filterfn in self._datafilters.iteritems():
1151 for name, filterfn in self._datafilters.iteritems():
1170 if cmd.startswith(name):
1152 if cmd.startswith(name):
1171 fn = filterfn
1153 fn = filterfn
1172 params = cmd[len(name):].lstrip()
1154 params = cmd[len(name):].lstrip()
1173 break
1155 break
1174 if not fn:
1156 if not fn:
1175 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1157 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1176 # Wrap old filters not supporting keyword arguments
1158 # Wrap old filters not supporting keyword arguments
1177 if not pycompat.getargspec(fn)[2]:
1159 if not pycompat.getargspec(fn)[2]:
1178 oldfn = fn
1160 oldfn = fn
1179 fn = lambda s, c, **kwargs: oldfn(s, c)
1161 fn = lambda s, c, **kwargs: oldfn(s, c)
1180 l.append((mf, fn, params))
1162 l.append((mf, fn, params))
1181 self._filterpats[filter] = l
1163 self._filterpats[filter] = l
1182 return self._filterpats[filter]
1164 return self._filterpats[filter]
1183
1165
1184 def _filter(self, filterpats, filename, data):
1166 def _filter(self, filterpats, filename, data):
1185 for mf, fn, cmd in filterpats:
1167 for mf, fn, cmd in filterpats:
1186 if mf(filename):
1168 if mf(filename):
1187 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1169 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1188 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1170 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1189 break
1171 break
1190
1172
1191 return data
1173 return data
1192
1174
1193 @unfilteredpropertycache
1175 @unfilteredpropertycache
1194 def _encodefilterpats(self):
1176 def _encodefilterpats(self):
1195 return self._loadfilter('encode')
1177 return self._loadfilter('encode')
1196
1178
1197 @unfilteredpropertycache
1179 @unfilteredpropertycache
1198 def _decodefilterpats(self):
1180 def _decodefilterpats(self):
1199 return self._loadfilter('decode')
1181 return self._loadfilter('decode')
1200
1182
1201 def adddatafilter(self, name, filter):
1183 def adddatafilter(self, name, filter):
1202 self._datafilters[name] = filter
1184 self._datafilters[name] = filter
1203
1185
1204 def wread(self, filename):
1186 def wread(self, filename):
1205 if self.wvfs.islink(filename):
1187 if self.wvfs.islink(filename):
1206 data = self.wvfs.readlink(filename)
1188 data = self.wvfs.readlink(filename)
1207 else:
1189 else:
1208 data = self.wvfs.read(filename)
1190 data = self.wvfs.read(filename)
1209 return self._filter(self._encodefilterpats, filename, data)
1191 return self._filter(self._encodefilterpats, filename, data)
1210
1192
1211 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1193 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1212 """write ``data`` into ``filename`` in the working directory
1194 """write ``data`` into ``filename`` in the working directory
1213
1195
1214 This returns length of written (maybe decoded) data.
1196 This returns length of written (maybe decoded) data.
1215 """
1197 """
1216 data = self._filter(self._decodefilterpats, filename, data)
1198 data = self._filter(self._decodefilterpats, filename, data)
1217 if 'l' in flags:
1199 if 'l' in flags:
1218 self.wvfs.symlink(data, filename)
1200 self.wvfs.symlink(data, filename)
1219 else:
1201 else:
1220 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1202 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1221 **kwargs)
1203 **kwargs)
1222 if 'x' in flags:
1204 if 'x' in flags:
1223 self.wvfs.setflags(filename, False, True)
1205 self.wvfs.setflags(filename, False, True)
1224 else:
1206 else:
1225 self.wvfs.setflags(filename, False, False)
1207 self.wvfs.setflags(filename, False, False)
1226 return len(data)
1208 return len(data)
1227
1209
1228 def wwritedata(self, filename, data):
1210 def wwritedata(self, filename, data):
1229 return self._filter(self._decodefilterpats, filename, data)
1211 return self._filter(self._decodefilterpats, filename, data)
1230
1212
1231 def currenttransaction(self):
1213 def currenttransaction(self):
1232 """return the current transaction or None if non exists"""
1214 """return the current transaction or None if non exists"""
1233 if self._transref:
1215 if self._transref:
1234 tr = self._transref()
1216 tr = self._transref()
1235 else:
1217 else:
1236 tr = None
1218 tr = None
1237
1219
1238 if tr and tr.running():
1220 if tr and tr.running():
1239 return tr
1221 return tr
1240 return None
1222 return None
1241
1223
1242 def transaction(self, desc, report=None):
1224 def transaction(self, desc, report=None):
1243 if (self.ui.configbool('devel', 'all-warnings')
1225 if (self.ui.configbool('devel', 'all-warnings')
1244 or self.ui.configbool('devel', 'check-locks')):
1226 or self.ui.configbool('devel', 'check-locks')):
1245 if self._currentlock(self._lockref) is None:
1227 if self._currentlock(self._lockref) is None:
1246 raise error.ProgrammingError('transaction requires locking')
1228 raise error.ProgrammingError('transaction requires locking')
1247 tr = self.currenttransaction()
1229 tr = self.currenttransaction()
1248 if tr is not None:
1230 if tr is not None:
1249 return tr.nest(name=desc)
1231 return tr.nest(name=desc)
1250
1232
1251 # abort here if the journal already exists
1233 # abort here if the journal already exists
1252 if self.svfs.exists("journal"):
1234 if self.svfs.exists("journal"):
1253 raise error.RepoError(
1235 raise error.RepoError(
1254 _("abandoned transaction found"),
1236 _("abandoned transaction found"),
1255 hint=_("run 'hg recover' to clean up transaction"))
1237 hint=_("run 'hg recover' to clean up transaction"))
1256
1238
1257 idbase = "%.40f#%f" % (random.random(), time.time())
1239 idbase = "%.40f#%f" % (random.random(), time.time())
1258 ha = hex(hashlib.sha1(idbase).digest())
1240 ha = hex(hashlib.sha1(idbase).digest())
1259 txnid = 'TXN:' + ha
1241 txnid = 'TXN:' + ha
1260 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1242 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1261
1243
1262 self._writejournal(desc)
1244 self._writejournal(desc)
1263 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1245 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1264 if report:
1246 if report:
1265 rp = report
1247 rp = report
1266 else:
1248 else:
1267 rp = self.ui.warn
1249 rp = self.ui.warn
1268 vfsmap = {'plain': self.vfs} # root of .hg/
1250 vfsmap = {'plain': self.vfs} # root of .hg/
1269 # we must avoid cyclic reference between repo and transaction.
1251 # we must avoid cyclic reference between repo and transaction.
1270 reporef = weakref.ref(self)
1252 reporef = weakref.ref(self)
1271 # Code to track tag movement
1253 # Code to track tag movement
1272 #
1254 #
1273 # Since tags are all handled as file content, it is actually quite hard
1255 # Since tags are all handled as file content, it is actually quite hard
1274 # to track these movement from a code perspective. So we fallback to a
1256 # to track these movement from a code perspective. So we fallback to a
1275 # tracking at the repository level. One could envision to track changes
1257 # tracking at the repository level. One could envision to track changes
1276 # to the '.hgtags' file through changegroup apply but that fails to
1258 # to the '.hgtags' file through changegroup apply but that fails to
1277 # cope with case where transaction expose new heads without changegroup
1259 # cope with case where transaction expose new heads without changegroup
1278 # being involved (eg: phase movement).
1260 # being involved (eg: phase movement).
1279 #
1261 #
1280 # For now, We gate the feature behind a flag since this likely comes
1262 # For now, We gate the feature behind a flag since this likely comes
1281 # with performance impacts. The current code run more often than needed
1263 # with performance impacts. The current code run more often than needed
1282 # and do not use caches as much as it could. The current focus is on
1264 # and do not use caches as much as it could. The current focus is on
1283 # the behavior of the feature so we disable it by default. The flag
1265 # the behavior of the feature so we disable it by default. The flag
1284 # will be removed when we are happy with the performance impact.
1266 # will be removed when we are happy with the performance impact.
1285 #
1267 #
1286 # Once this feature is no longer experimental move the following
1268 # Once this feature is no longer experimental move the following
1287 # documentation to the appropriate help section:
1269 # documentation to the appropriate help section:
1288 #
1270 #
1289 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1271 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1290 # tags (new or changed or deleted tags). In addition the details of
1272 # tags (new or changed or deleted tags). In addition the details of
1291 # these changes are made available in a file at:
1273 # these changes are made available in a file at:
1292 # ``REPOROOT/.hg/changes/tags.changes``.
1274 # ``REPOROOT/.hg/changes/tags.changes``.
1293 # Make sure you check for HG_TAG_MOVED before reading that file as it
1275 # Make sure you check for HG_TAG_MOVED before reading that file as it
1294 # might exist from a previous transaction even if no tag were touched
1276 # might exist from a previous transaction even if no tag were touched
1295 # in this one. Changes are recorded in a line base format::
1277 # in this one. Changes are recorded in a line base format::
1296 #
1278 #
1297 # <action> <hex-node> <tag-name>\n
1279 # <action> <hex-node> <tag-name>\n
1298 #
1280 #
1299 # Actions are defined as follow:
1281 # Actions are defined as follow:
1300 # "-R": tag is removed,
1282 # "-R": tag is removed,
1301 # "+A": tag is added,
1283 # "+A": tag is added,
1302 # "-M": tag is moved (old value),
1284 # "-M": tag is moved (old value),
1303 # "+M": tag is moved (new value),
1285 # "+M": tag is moved (new value),
1304 tracktags = lambda x: None
1286 tracktags = lambda x: None
1305 # experimental config: experimental.hook-track-tags
1287 # experimental config: experimental.hook-track-tags
1306 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1288 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1307 if desc != 'strip' and shouldtracktags:
1289 if desc != 'strip' and shouldtracktags:
1308 oldheads = self.changelog.headrevs()
1290 oldheads = self.changelog.headrevs()
1309 def tracktags(tr2):
1291 def tracktags(tr2):
1310 repo = reporef()
1292 repo = reporef()
1311 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1293 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1312 newheads = repo.changelog.headrevs()
1294 newheads = repo.changelog.headrevs()
1313 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1295 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1314 # notes: we compare lists here.
1296 # notes: we compare lists here.
1315 # As we do it only once buiding set would not be cheaper
1297 # As we do it only once buiding set would not be cheaper
1316 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1298 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1317 if changes:
1299 if changes:
1318 tr2.hookargs['tag_moved'] = '1'
1300 tr2.hookargs['tag_moved'] = '1'
1319 with repo.vfs('changes/tags.changes', 'w',
1301 with repo.vfs('changes/tags.changes', 'w',
1320 atomictemp=True) as changesfile:
1302 atomictemp=True) as changesfile:
1321 # note: we do not register the file to the transaction
1303 # note: we do not register the file to the transaction
1322 # because we needs it to still exist on the transaction
1304 # because we needs it to still exist on the transaction
1323 # is close (for txnclose hooks)
1305 # is close (for txnclose hooks)
1324 tagsmod.writediff(changesfile, changes)
1306 tagsmod.writediff(changesfile, changes)
1325 def validate(tr2):
1307 def validate(tr2):
1326 """will run pre-closing hooks"""
1308 """will run pre-closing hooks"""
1327 # XXX the transaction API is a bit lacking here so we take a hacky
1309 # XXX the transaction API is a bit lacking here so we take a hacky
1328 # path for now
1310 # path for now
1329 #
1311 #
1330 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1312 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1331 # dict is copied before these run. In addition we needs the data
1313 # dict is copied before these run. In addition we needs the data
1332 # available to in memory hooks too.
1314 # available to in memory hooks too.
1333 #
1315 #
1334 # Moreover, we also need to make sure this runs before txnclose
1316 # Moreover, we also need to make sure this runs before txnclose
1335 # hooks and there is no "pending" mechanism that would execute
1317 # hooks and there is no "pending" mechanism that would execute
1336 # logic only if hooks are about to run.
1318 # logic only if hooks are about to run.
1337 #
1319 #
1338 # Fixing this limitation of the transaction is also needed to track
1320 # Fixing this limitation of the transaction is also needed to track
1339 # other families of changes (bookmarks, phases, obsolescence).
1321 # other families of changes (bookmarks, phases, obsolescence).
1340 #
1322 #
1341 # This will have to be fixed before we remove the experimental
1323 # This will have to be fixed before we remove the experimental
1342 # gating.
1324 # gating.
1343 tracktags(tr2)
1325 tracktags(tr2)
1344 repo = reporef()
1326 repo = reporef()
1345 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1327 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1346 scmutil.enforcesinglehead(repo, tr2, desc)
1328 scmutil.enforcesinglehead(repo, tr2, desc)
1347 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1329 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1348 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1330 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1349 args = tr.hookargs.copy()
1331 args = tr.hookargs.copy()
1350 args.update(bookmarks.preparehookargs(name, old, new))
1332 args.update(bookmarks.preparehookargs(name, old, new))
1351 repo.hook('pretxnclose-bookmark', throw=True,
1333 repo.hook('pretxnclose-bookmark', throw=True,
1352 txnname=desc,
1334 txnname=desc,
1353 **pycompat.strkwargs(args))
1335 **pycompat.strkwargs(args))
1354 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1336 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1355 cl = repo.unfiltered().changelog
1337 cl = repo.unfiltered().changelog
1356 for rev, (old, new) in tr.changes['phases'].items():
1338 for rev, (old, new) in tr.changes['phases'].items():
1357 args = tr.hookargs.copy()
1339 args = tr.hookargs.copy()
1358 node = hex(cl.node(rev))
1340 node = hex(cl.node(rev))
1359 args.update(phases.preparehookargs(node, old, new))
1341 args.update(phases.preparehookargs(node, old, new))
1360 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1342 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1361 **pycompat.strkwargs(args))
1343 **pycompat.strkwargs(args))
1362
1344
1363 repo.hook('pretxnclose', throw=True,
1345 repo.hook('pretxnclose', throw=True,
1364 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1346 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1365 def releasefn(tr, success):
1347 def releasefn(tr, success):
1366 repo = reporef()
1348 repo = reporef()
1367 if success:
1349 if success:
1368 # this should be explicitly invoked here, because
1350 # this should be explicitly invoked here, because
1369 # in-memory changes aren't written out at closing
1351 # in-memory changes aren't written out at closing
1370 # transaction, if tr.addfilegenerator (via
1352 # transaction, if tr.addfilegenerator (via
1371 # dirstate.write or so) isn't invoked while
1353 # dirstate.write or so) isn't invoked while
1372 # transaction running
1354 # transaction running
1373 repo.dirstate.write(None)
1355 repo.dirstate.write(None)
1374 else:
1356 else:
1375 # discard all changes (including ones already written
1357 # discard all changes (including ones already written
1376 # out) in this transaction
1358 # out) in this transaction
1377 repo.dirstate.restorebackup(None, 'journal.dirstate')
1359 repo.dirstate.restorebackup(None, 'journal.dirstate')
1378
1360
1379 repo.invalidate(clearfilecache=True)
1361 repo.invalidate(clearfilecache=True)
1380
1362
1381 tr = transaction.transaction(rp, self.svfs, vfsmap,
1363 tr = transaction.transaction(rp, self.svfs, vfsmap,
1382 "journal",
1364 "journal",
1383 "undo",
1365 "undo",
1384 aftertrans(renames),
1366 aftertrans(renames),
1385 self.store.createmode,
1367 self.store.createmode,
1386 validator=validate,
1368 validator=validate,
1387 releasefn=releasefn,
1369 releasefn=releasefn,
1388 checkambigfiles=_cachedfiles,
1370 checkambigfiles=_cachedfiles,
1389 name=desc)
1371 name=desc)
1390 tr.changes['revs'] = xrange(0, 0)
1372 tr.changes['revs'] = xrange(0, 0)
1391 tr.changes['obsmarkers'] = set()
1373 tr.changes['obsmarkers'] = set()
1392 tr.changes['phases'] = {}
1374 tr.changes['phases'] = {}
1393 tr.changes['bookmarks'] = {}
1375 tr.changes['bookmarks'] = {}
1394
1376
1395 tr.hookargs['txnid'] = txnid
1377 tr.hookargs['txnid'] = txnid
1396 # note: writing the fncache only during finalize mean that the file is
1378 # note: writing the fncache only during finalize mean that the file is
1397 # outdated when running hooks. As fncache is used for streaming clone,
1379 # outdated when running hooks. As fncache is used for streaming clone,
1398 # this is not expected to break anything that happen during the hooks.
1380 # this is not expected to break anything that happen during the hooks.
1399 tr.addfinalize('flush-fncache', self.store.write)
1381 tr.addfinalize('flush-fncache', self.store.write)
1400 def txnclosehook(tr2):
1382 def txnclosehook(tr2):
1401 """To be run if transaction is successful, will schedule a hook run
1383 """To be run if transaction is successful, will schedule a hook run
1402 """
1384 """
1403 # Don't reference tr2 in hook() so we don't hold a reference.
1385 # Don't reference tr2 in hook() so we don't hold a reference.
1404 # This reduces memory consumption when there are multiple
1386 # This reduces memory consumption when there are multiple
1405 # transactions per lock. This can likely go away if issue5045
1387 # transactions per lock. This can likely go away if issue5045
1406 # fixes the function accumulation.
1388 # fixes the function accumulation.
1407 hookargs = tr2.hookargs
1389 hookargs = tr2.hookargs
1408
1390
1409 def hookfunc():
1391 def hookfunc():
1410 repo = reporef()
1392 repo = reporef()
1411 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1393 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1412 bmchanges = sorted(tr.changes['bookmarks'].items())
1394 bmchanges = sorted(tr.changes['bookmarks'].items())
1413 for name, (old, new) in bmchanges:
1395 for name, (old, new) in bmchanges:
1414 args = tr.hookargs.copy()
1396 args = tr.hookargs.copy()
1415 args.update(bookmarks.preparehookargs(name, old, new))
1397 args.update(bookmarks.preparehookargs(name, old, new))
1416 repo.hook('txnclose-bookmark', throw=False,
1398 repo.hook('txnclose-bookmark', throw=False,
1417 txnname=desc, **pycompat.strkwargs(args))
1399 txnname=desc, **pycompat.strkwargs(args))
1418
1400
1419 if hook.hashook(repo.ui, 'txnclose-phase'):
1401 if hook.hashook(repo.ui, 'txnclose-phase'):
1420 cl = repo.unfiltered().changelog
1402 cl = repo.unfiltered().changelog
1421 phasemv = sorted(tr.changes['phases'].items())
1403 phasemv = sorted(tr.changes['phases'].items())
1422 for rev, (old, new) in phasemv:
1404 for rev, (old, new) in phasemv:
1423 args = tr.hookargs.copy()
1405 args = tr.hookargs.copy()
1424 node = hex(cl.node(rev))
1406 node = hex(cl.node(rev))
1425 args.update(phases.preparehookargs(node, old, new))
1407 args.update(phases.preparehookargs(node, old, new))
1426 repo.hook('txnclose-phase', throw=False, txnname=desc,
1408 repo.hook('txnclose-phase', throw=False, txnname=desc,
1427 **pycompat.strkwargs(args))
1409 **pycompat.strkwargs(args))
1428
1410
1429 repo.hook('txnclose', throw=False, txnname=desc,
1411 repo.hook('txnclose', throw=False, txnname=desc,
1430 **pycompat.strkwargs(hookargs))
1412 **pycompat.strkwargs(hookargs))
1431 reporef()._afterlock(hookfunc)
1413 reporef()._afterlock(hookfunc)
1432 tr.addfinalize('txnclose-hook', txnclosehook)
1414 tr.addfinalize('txnclose-hook', txnclosehook)
1433 # Include a leading "-" to make it happen before the transaction summary
1415 # Include a leading "-" to make it happen before the transaction summary
1434 # reports registered via scmutil.registersummarycallback() whose names
1416 # reports registered via scmutil.registersummarycallback() whose names
1435 # are 00-txnreport etc. That way, the caches will be warm when the
1417 # are 00-txnreport etc. That way, the caches will be warm when the
1436 # callbacks run.
1418 # callbacks run.
1437 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1419 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1438 def txnaborthook(tr2):
1420 def txnaborthook(tr2):
1439 """To be run if transaction is aborted
1421 """To be run if transaction is aborted
1440 """
1422 """
1441 reporef().hook('txnabort', throw=False, txnname=desc,
1423 reporef().hook('txnabort', throw=False, txnname=desc,
1442 **pycompat.strkwargs(tr2.hookargs))
1424 **pycompat.strkwargs(tr2.hookargs))
1443 tr.addabort('txnabort-hook', txnaborthook)
1425 tr.addabort('txnabort-hook', txnaborthook)
1444 # avoid eager cache invalidation. in-memory data should be identical
1426 # avoid eager cache invalidation. in-memory data should be identical
1445 # to stored data if transaction has no error.
1427 # to stored data if transaction has no error.
1446 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1428 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1447 self._transref = weakref.ref(tr)
1429 self._transref = weakref.ref(tr)
1448 scmutil.registersummarycallback(self, tr, desc)
1430 scmutil.registersummarycallback(self, tr, desc)
1449 return tr
1431 return tr
1450
1432
1451 def _journalfiles(self):
1433 def _journalfiles(self):
1452 return ((self.svfs, 'journal'),
1434 return ((self.svfs, 'journal'),
1453 (self.vfs, 'journal.dirstate'),
1435 (self.vfs, 'journal.dirstate'),
1454 (self.vfs, 'journal.branch'),
1436 (self.vfs, 'journal.branch'),
1455 (self.vfs, 'journal.desc'),
1437 (self.vfs, 'journal.desc'),
1456 (self.vfs, 'journal.bookmarks'),
1438 (self.vfs, 'journal.bookmarks'),
1457 (self.svfs, 'journal.phaseroots'))
1439 (self.svfs, 'journal.phaseroots'))
1458
1440
1459 def undofiles(self):
1441 def undofiles(self):
1460 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1442 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1461
1443
1462 @unfilteredmethod
1444 @unfilteredmethod
1463 def _writejournal(self, desc):
1445 def _writejournal(self, desc):
1464 self.dirstate.savebackup(None, 'journal.dirstate')
1446 self.dirstate.savebackup(None, 'journal.dirstate')
1465 self.vfs.write("journal.branch",
1447 self.vfs.write("journal.branch",
1466 encoding.fromlocal(self.dirstate.branch()))
1448 encoding.fromlocal(self.dirstate.branch()))
1467 self.vfs.write("journal.desc",
1449 self.vfs.write("journal.desc",
1468 "%d\n%s\n" % (len(self), desc))
1450 "%d\n%s\n" % (len(self), desc))
1469 self.vfs.write("journal.bookmarks",
1451 self.vfs.write("journal.bookmarks",
1470 self.vfs.tryread("bookmarks"))
1452 self.vfs.tryread("bookmarks"))
1471 self.svfs.write("journal.phaseroots",
1453 self.svfs.write("journal.phaseroots",
1472 self.svfs.tryread("phaseroots"))
1454 self.svfs.tryread("phaseroots"))
1473
1455
1474 def recover(self):
1456 def recover(self):
1475 with self.lock():
1457 with self.lock():
1476 if self.svfs.exists("journal"):
1458 if self.svfs.exists("journal"):
1477 self.ui.status(_("rolling back interrupted transaction\n"))
1459 self.ui.status(_("rolling back interrupted transaction\n"))
1478 vfsmap = {'': self.svfs,
1460 vfsmap = {'': self.svfs,
1479 'plain': self.vfs,}
1461 'plain': self.vfs,}
1480 transaction.rollback(self.svfs, vfsmap, "journal",
1462 transaction.rollback(self.svfs, vfsmap, "journal",
1481 self.ui.warn,
1463 self.ui.warn,
1482 checkambigfiles=_cachedfiles)
1464 checkambigfiles=_cachedfiles)
1483 self.invalidate()
1465 self.invalidate()
1484 return True
1466 return True
1485 else:
1467 else:
1486 self.ui.warn(_("no interrupted transaction available\n"))
1468 self.ui.warn(_("no interrupted transaction available\n"))
1487 return False
1469 return False
1488
1470
1489 def rollback(self, dryrun=False, force=False):
1471 def rollback(self, dryrun=False, force=False):
1490 wlock = lock = dsguard = None
1472 wlock = lock = dsguard = None
1491 try:
1473 try:
1492 wlock = self.wlock()
1474 wlock = self.wlock()
1493 lock = self.lock()
1475 lock = self.lock()
1494 if self.svfs.exists("undo"):
1476 if self.svfs.exists("undo"):
1495 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1477 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1496
1478
1497 return self._rollback(dryrun, force, dsguard)
1479 return self._rollback(dryrun, force, dsguard)
1498 else:
1480 else:
1499 self.ui.warn(_("no rollback information available\n"))
1481 self.ui.warn(_("no rollback information available\n"))
1500 return 1
1482 return 1
1501 finally:
1483 finally:
1502 release(dsguard, lock, wlock)
1484 release(dsguard, lock, wlock)
1503
1485
1504 @unfilteredmethod # Until we get smarter cache management
1486 @unfilteredmethod # Until we get smarter cache management
1505 def _rollback(self, dryrun, force, dsguard):
1487 def _rollback(self, dryrun, force, dsguard):
1506 ui = self.ui
1488 ui = self.ui
1507 try:
1489 try:
1508 args = self.vfs.read('undo.desc').splitlines()
1490 args = self.vfs.read('undo.desc').splitlines()
1509 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1491 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1510 if len(args) >= 3:
1492 if len(args) >= 3:
1511 detail = args[2]
1493 detail = args[2]
1512 oldtip = oldlen - 1
1494 oldtip = oldlen - 1
1513
1495
1514 if detail and ui.verbose:
1496 if detail and ui.verbose:
1515 msg = (_('repository tip rolled back to revision %d'
1497 msg = (_('repository tip rolled back to revision %d'
1516 ' (undo %s: %s)\n')
1498 ' (undo %s: %s)\n')
1517 % (oldtip, desc, detail))
1499 % (oldtip, desc, detail))
1518 else:
1500 else:
1519 msg = (_('repository tip rolled back to revision %d'
1501 msg = (_('repository tip rolled back to revision %d'
1520 ' (undo %s)\n')
1502 ' (undo %s)\n')
1521 % (oldtip, desc))
1503 % (oldtip, desc))
1522 except IOError:
1504 except IOError:
1523 msg = _('rolling back unknown transaction\n')
1505 msg = _('rolling back unknown transaction\n')
1524 desc = None
1506 desc = None
1525
1507
1526 if not force and self['.'] != self['tip'] and desc == 'commit':
1508 if not force and self['.'] != self['tip'] and desc == 'commit':
1527 raise error.Abort(
1509 raise error.Abort(
1528 _('rollback of last commit while not checked out '
1510 _('rollback of last commit while not checked out '
1529 'may lose data'), hint=_('use -f to force'))
1511 'may lose data'), hint=_('use -f to force'))
1530
1512
1531 ui.status(msg)
1513 ui.status(msg)
1532 if dryrun:
1514 if dryrun:
1533 return 0
1515 return 0
1534
1516
1535 parents = self.dirstate.parents()
1517 parents = self.dirstate.parents()
1536 self.destroying()
1518 self.destroying()
1537 vfsmap = {'plain': self.vfs, '': self.svfs}
1519 vfsmap = {'plain': self.vfs, '': self.svfs}
1538 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1520 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1539 checkambigfiles=_cachedfiles)
1521 checkambigfiles=_cachedfiles)
1540 if self.vfs.exists('undo.bookmarks'):
1522 if self.vfs.exists('undo.bookmarks'):
1541 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1523 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1542 if self.svfs.exists('undo.phaseroots'):
1524 if self.svfs.exists('undo.phaseroots'):
1543 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1525 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1544 self.invalidate()
1526 self.invalidate()
1545
1527
1546 parentgone = (parents[0] not in self.changelog.nodemap or
1528 parentgone = (parents[0] not in self.changelog.nodemap or
1547 parents[1] not in self.changelog.nodemap)
1529 parents[1] not in self.changelog.nodemap)
1548 if parentgone:
1530 if parentgone:
1549 # prevent dirstateguard from overwriting already restored one
1531 # prevent dirstateguard from overwriting already restored one
1550 dsguard.close()
1532 dsguard.close()
1551
1533
1552 self.dirstate.restorebackup(None, 'undo.dirstate')
1534 self.dirstate.restorebackup(None, 'undo.dirstate')
1553 try:
1535 try:
1554 branch = self.vfs.read('undo.branch')
1536 branch = self.vfs.read('undo.branch')
1555 self.dirstate.setbranch(encoding.tolocal(branch))
1537 self.dirstate.setbranch(encoding.tolocal(branch))
1556 except IOError:
1538 except IOError:
1557 ui.warn(_('named branch could not be reset: '
1539 ui.warn(_('named branch could not be reset: '
1558 'current branch is still \'%s\'\n')
1540 'current branch is still \'%s\'\n')
1559 % self.dirstate.branch())
1541 % self.dirstate.branch())
1560
1542
1561 parents = tuple([p.rev() for p in self[None].parents()])
1543 parents = tuple([p.rev() for p in self[None].parents()])
1562 if len(parents) > 1:
1544 if len(parents) > 1:
1563 ui.status(_('working directory now based on '
1545 ui.status(_('working directory now based on '
1564 'revisions %d and %d\n') % parents)
1546 'revisions %d and %d\n') % parents)
1565 else:
1547 else:
1566 ui.status(_('working directory now based on '
1548 ui.status(_('working directory now based on '
1567 'revision %d\n') % parents)
1549 'revision %d\n') % parents)
1568 mergemod.mergestate.clean(self, self['.'].node())
1550 mergemod.mergestate.clean(self, self['.'].node())
1569
1551
1570 # TODO: if we know which new heads may result from this rollback, pass
1552 # TODO: if we know which new heads may result from this rollback, pass
1571 # them to destroy(), which will prevent the branchhead cache from being
1553 # them to destroy(), which will prevent the branchhead cache from being
1572 # invalidated.
1554 # invalidated.
1573 self.destroyed()
1555 self.destroyed()
1574 return 0
1556 return 0
1575
1557
1576 def _buildcacheupdater(self, newtransaction):
1558 def _buildcacheupdater(self, newtransaction):
1577 """called during transaction to build the callback updating cache
1559 """called during transaction to build the callback updating cache
1578
1560
1579 Lives on the repository to help extension who might want to augment
1561 Lives on the repository to help extension who might want to augment
1580 this logic. For this purpose, the created transaction is passed to the
1562 this logic. For this purpose, the created transaction is passed to the
1581 method.
1563 method.
1582 """
1564 """
1583 # we must avoid cyclic reference between repo and transaction.
1565 # we must avoid cyclic reference between repo and transaction.
1584 reporef = weakref.ref(self)
1566 reporef = weakref.ref(self)
1585 def updater(tr):
1567 def updater(tr):
1586 repo = reporef()
1568 repo = reporef()
1587 repo.updatecaches(tr)
1569 repo.updatecaches(tr)
1588 return updater
1570 return updater
1589
1571
1590 @unfilteredmethod
1572 @unfilteredmethod
1591 def updatecaches(self, tr=None, full=False):
1573 def updatecaches(self, tr=None, full=False):
1592 """warm appropriate caches
1574 """warm appropriate caches
1593
1575
1594 If this function is called after a transaction closed. The transaction
1576 If this function is called after a transaction closed. The transaction
1595 will be available in the 'tr' argument. This can be used to selectively
1577 will be available in the 'tr' argument. This can be used to selectively
1596 update caches relevant to the changes in that transaction.
1578 update caches relevant to the changes in that transaction.
1597
1579
1598 If 'full' is set, make sure all caches the function knows about have
1580 If 'full' is set, make sure all caches the function knows about have
1599 up-to-date data. Even the ones usually loaded more lazily.
1581 up-to-date data. Even the ones usually loaded more lazily.
1600 """
1582 """
1601 if tr is not None and tr.hookargs.get('source') == 'strip':
1583 if tr is not None and tr.hookargs.get('source') == 'strip':
1602 # During strip, many caches are invalid but
1584 # During strip, many caches are invalid but
1603 # later call to `destroyed` will refresh them.
1585 # later call to `destroyed` will refresh them.
1604 return
1586 return
1605
1587
1606 if tr is None or tr.changes['revs']:
1588 if tr is None or tr.changes['revs']:
1607 # updating the unfiltered branchmap should refresh all the others,
1589 # updating the unfiltered branchmap should refresh all the others,
1608 self.ui.debug('updating the branch cache\n')
1590 self.ui.debug('updating the branch cache\n')
1609 branchmap.updatecache(self.filtered('served'))
1591 branchmap.updatecache(self.filtered('served'))
1610
1592
1611 if full:
1593 if full:
1612 rbc = self.revbranchcache()
1594 rbc = self.revbranchcache()
1613 for r in self.changelog:
1595 for r in self.changelog:
1614 rbc.branchinfo(r)
1596 rbc.branchinfo(r)
1615 rbc.write()
1597 rbc.write()
1616
1598
1617 def invalidatecaches(self):
1599 def invalidatecaches(self):
1618
1600
1619 if '_tagscache' in vars(self):
1601 if '_tagscache' in vars(self):
1620 # can't use delattr on proxy
1602 # can't use delattr on proxy
1621 del self.__dict__['_tagscache']
1603 del self.__dict__['_tagscache']
1622
1604
1623 self.unfiltered()._branchcaches.clear()
1605 self.unfiltered()._branchcaches.clear()
1624 self.invalidatevolatilesets()
1606 self.invalidatevolatilesets()
1625 self._sparsesignaturecache.clear()
1607 self._sparsesignaturecache.clear()
1626
1608
1627 def invalidatevolatilesets(self):
1609 def invalidatevolatilesets(self):
1628 self.filteredrevcache.clear()
1610 self.filteredrevcache.clear()
1629 obsolete.clearobscaches(self)
1611 obsolete.clearobscaches(self)
1630
1612
1631 def invalidatedirstate(self):
1613 def invalidatedirstate(self):
1632 '''Invalidates the dirstate, causing the next call to dirstate
1614 '''Invalidates the dirstate, causing the next call to dirstate
1633 to check if it was modified since the last time it was read,
1615 to check if it was modified since the last time it was read,
1634 rereading it if it has.
1616 rereading it if it has.
1635
1617
1636 This is different to dirstate.invalidate() that it doesn't always
1618 This is different to dirstate.invalidate() that it doesn't always
1637 rereads the dirstate. Use dirstate.invalidate() if you want to
1619 rereads the dirstate. Use dirstate.invalidate() if you want to
1638 explicitly read the dirstate again (i.e. restoring it to a previous
1620 explicitly read the dirstate again (i.e. restoring it to a previous
1639 known good state).'''
1621 known good state).'''
1640 if hasunfilteredcache(self, 'dirstate'):
1622 if hasunfilteredcache(self, 'dirstate'):
1641 for k in self.dirstate._filecache:
1623 for k in self.dirstate._filecache:
1642 try:
1624 try:
1643 delattr(self.dirstate, k)
1625 delattr(self.dirstate, k)
1644 except AttributeError:
1626 except AttributeError:
1645 pass
1627 pass
1646 delattr(self.unfiltered(), 'dirstate')
1628 delattr(self.unfiltered(), 'dirstate')
1647
1629
1648 def invalidate(self, clearfilecache=False):
1630 def invalidate(self, clearfilecache=False):
1649 '''Invalidates both store and non-store parts other than dirstate
1631 '''Invalidates both store and non-store parts other than dirstate
1650
1632
1651 If a transaction is running, invalidation of store is omitted,
1633 If a transaction is running, invalidation of store is omitted,
1652 because discarding in-memory changes might cause inconsistency
1634 because discarding in-memory changes might cause inconsistency
1653 (e.g. incomplete fncache causes unintentional failure, but
1635 (e.g. incomplete fncache causes unintentional failure, but
1654 redundant one doesn't).
1636 redundant one doesn't).
1655 '''
1637 '''
1656 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1638 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1657 for k in list(self._filecache.keys()):
1639 for k in list(self._filecache.keys()):
1658 # dirstate is invalidated separately in invalidatedirstate()
1640 # dirstate is invalidated separately in invalidatedirstate()
1659 if k == 'dirstate':
1641 if k == 'dirstate':
1660 continue
1642 continue
1661 if (k == 'changelog' and
1643 if (k == 'changelog' and
1662 self.currenttransaction() and
1644 self.currenttransaction() and
1663 self.changelog._delayed):
1645 self.changelog._delayed):
1664 # The changelog object may store unwritten revisions. We don't
1646 # The changelog object may store unwritten revisions. We don't
1665 # want to lose them.
1647 # want to lose them.
1666 # TODO: Solve the problem instead of working around it.
1648 # TODO: Solve the problem instead of working around it.
1667 continue
1649 continue
1668
1650
1669 if clearfilecache:
1651 if clearfilecache:
1670 del self._filecache[k]
1652 del self._filecache[k]
1671 try:
1653 try:
1672 delattr(unfiltered, k)
1654 delattr(unfiltered, k)
1673 except AttributeError:
1655 except AttributeError:
1674 pass
1656 pass
1675 self.invalidatecaches()
1657 self.invalidatecaches()
1676 if not self.currenttransaction():
1658 if not self.currenttransaction():
1677 # TODO: Changing contents of store outside transaction
1659 # TODO: Changing contents of store outside transaction
1678 # causes inconsistency. We should make in-memory store
1660 # causes inconsistency. We should make in-memory store
1679 # changes detectable, and abort if changed.
1661 # changes detectable, and abort if changed.
1680 self.store.invalidatecaches()
1662 self.store.invalidatecaches()
1681
1663
1682 def invalidateall(self):
1664 def invalidateall(self):
1683 '''Fully invalidates both store and non-store parts, causing the
1665 '''Fully invalidates both store and non-store parts, causing the
1684 subsequent operation to reread any outside changes.'''
1666 subsequent operation to reread any outside changes.'''
1685 # extension should hook this to invalidate its caches
1667 # extension should hook this to invalidate its caches
1686 self.invalidate()
1668 self.invalidate()
1687 self.invalidatedirstate()
1669 self.invalidatedirstate()
1688
1670
1689 @unfilteredmethod
1671 @unfilteredmethod
1690 def _refreshfilecachestats(self, tr):
1672 def _refreshfilecachestats(self, tr):
1691 """Reload stats of cached files so that they are flagged as valid"""
1673 """Reload stats of cached files so that they are flagged as valid"""
1692 for k, ce in self._filecache.items():
1674 for k, ce in self._filecache.items():
1693 k = pycompat.sysstr(k)
1675 k = pycompat.sysstr(k)
1694 if k == r'dirstate' or k not in self.__dict__:
1676 if k == r'dirstate' or k not in self.__dict__:
1695 continue
1677 continue
1696 ce.refresh()
1678 ce.refresh()
1697
1679
1698 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1680 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1699 inheritchecker=None, parentenvvar=None):
1681 inheritchecker=None, parentenvvar=None):
1700 parentlock = None
1682 parentlock = None
1701 # the contents of parentenvvar are used by the underlying lock to
1683 # the contents of parentenvvar are used by the underlying lock to
1702 # determine whether it can be inherited
1684 # determine whether it can be inherited
1703 if parentenvvar is not None:
1685 if parentenvvar is not None:
1704 parentlock = encoding.environ.get(parentenvvar)
1686 parentlock = encoding.environ.get(parentenvvar)
1705
1687
1706 timeout = 0
1688 timeout = 0
1707 warntimeout = 0
1689 warntimeout = 0
1708 if wait:
1690 if wait:
1709 timeout = self.ui.configint("ui", "timeout")
1691 timeout = self.ui.configint("ui", "timeout")
1710 warntimeout = self.ui.configint("ui", "timeout.warn")
1692 warntimeout = self.ui.configint("ui", "timeout.warn")
1711
1693
1712 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1694 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1713 releasefn=releasefn,
1695 releasefn=releasefn,
1714 acquirefn=acquirefn, desc=desc,
1696 acquirefn=acquirefn, desc=desc,
1715 inheritchecker=inheritchecker,
1697 inheritchecker=inheritchecker,
1716 parentlock=parentlock)
1698 parentlock=parentlock)
1717 return l
1699 return l
1718
1700
1719 def _afterlock(self, callback):
1701 def _afterlock(self, callback):
1720 """add a callback to be run when the repository is fully unlocked
1702 """add a callback to be run when the repository is fully unlocked
1721
1703
1722 The callback will be executed when the outermost lock is released
1704 The callback will be executed when the outermost lock is released
1723 (with wlock being higher level than 'lock')."""
1705 (with wlock being higher level than 'lock')."""
1724 for ref in (self._wlockref, self._lockref):
1706 for ref in (self._wlockref, self._lockref):
1725 l = ref and ref()
1707 l = ref and ref()
1726 if l and l.held:
1708 if l and l.held:
1727 l.postrelease.append(callback)
1709 l.postrelease.append(callback)
1728 break
1710 break
1729 else: # no lock have been found.
1711 else: # no lock have been found.
1730 callback()
1712 callback()
1731
1713
1732 def lock(self, wait=True):
1714 def lock(self, wait=True):
1733 '''Lock the repository store (.hg/store) and return a weak reference
1715 '''Lock the repository store (.hg/store) and return a weak reference
1734 to the lock. Use this before modifying the store (e.g. committing or
1716 to the lock. Use this before modifying the store (e.g. committing or
1735 stripping). If you are opening a transaction, get a lock as well.)
1717 stripping). If you are opening a transaction, get a lock as well.)
1736
1718
1737 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1719 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1738 'wlock' first to avoid a dead-lock hazard.'''
1720 'wlock' first to avoid a dead-lock hazard.'''
1739 l = self._currentlock(self._lockref)
1721 l = self._currentlock(self._lockref)
1740 if l is not None:
1722 if l is not None:
1741 l.lock()
1723 l.lock()
1742 return l
1724 return l
1743
1725
1744 l = self._lock(self.svfs, "lock", wait, None,
1726 l = self._lock(self.svfs, "lock", wait, None,
1745 self.invalidate, _('repository %s') % self.origroot)
1727 self.invalidate, _('repository %s') % self.origroot)
1746 self._lockref = weakref.ref(l)
1728 self._lockref = weakref.ref(l)
1747 return l
1729 return l
1748
1730
1749 def _wlockchecktransaction(self):
1731 def _wlockchecktransaction(self):
1750 if self.currenttransaction() is not None:
1732 if self.currenttransaction() is not None:
1751 raise error.LockInheritanceContractViolation(
1733 raise error.LockInheritanceContractViolation(
1752 'wlock cannot be inherited in the middle of a transaction')
1734 'wlock cannot be inherited in the middle of a transaction')
1753
1735
1754 def wlock(self, wait=True):
1736 def wlock(self, wait=True):
1755 '''Lock the non-store parts of the repository (everything under
1737 '''Lock the non-store parts of the repository (everything under
1756 .hg except .hg/store) and return a weak reference to the lock.
1738 .hg except .hg/store) and return a weak reference to the lock.
1757
1739
1758 Use this before modifying files in .hg.
1740 Use this before modifying files in .hg.
1759
1741
1760 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1742 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1761 'wlock' first to avoid a dead-lock hazard.'''
1743 'wlock' first to avoid a dead-lock hazard.'''
1762 l = self._wlockref and self._wlockref()
1744 l = self._wlockref and self._wlockref()
1763 if l is not None and l.held:
1745 if l is not None and l.held:
1764 l.lock()
1746 l.lock()
1765 return l
1747 return l
1766
1748
1767 # We do not need to check for non-waiting lock acquisition. Such
1749 # We do not need to check for non-waiting lock acquisition. Such
1768 # acquisition would not cause dead-lock as they would just fail.
1750 # acquisition would not cause dead-lock as they would just fail.
1769 if wait and (self.ui.configbool('devel', 'all-warnings')
1751 if wait and (self.ui.configbool('devel', 'all-warnings')
1770 or self.ui.configbool('devel', 'check-locks')):
1752 or self.ui.configbool('devel', 'check-locks')):
1771 if self._currentlock(self._lockref) is not None:
1753 if self._currentlock(self._lockref) is not None:
1772 self.ui.develwarn('"wlock" acquired after "lock"')
1754 self.ui.develwarn('"wlock" acquired after "lock"')
1773
1755
1774 def unlock():
1756 def unlock():
1775 if self.dirstate.pendingparentchange():
1757 if self.dirstate.pendingparentchange():
1776 self.dirstate.invalidate()
1758 self.dirstate.invalidate()
1777 else:
1759 else:
1778 self.dirstate.write(None)
1760 self.dirstate.write(None)
1779
1761
1780 self._filecache['dirstate'].refresh()
1762 self._filecache['dirstate'].refresh()
1781
1763
1782 l = self._lock(self.vfs, "wlock", wait, unlock,
1764 l = self._lock(self.vfs, "wlock", wait, unlock,
1783 self.invalidatedirstate, _('working directory of %s') %
1765 self.invalidatedirstate, _('working directory of %s') %
1784 self.origroot,
1766 self.origroot,
1785 inheritchecker=self._wlockchecktransaction,
1767 inheritchecker=self._wlockchecktransaction,
1786 parentenvvar='HG_WLOCK_LOCKER')
1768 parentenvvar='HG_WLOCK_LOCKER')
1787 self._wlockref = weakref.ref(l)
1769 self._wlockref = weakref.ref(l)
1788 return l
1770 return l
1789
1771
1790 def _currentlock(self, lockref):
1772 def _currentlock(self, lockref):
1791 """Returns the lock if it's held, or None if it's not."""
1773 """Returns the lock if it's held, or None if it's not."""
1792 if lockref is None:
1774 if lockref is None:
1793 return None
1775 return None
1794 l = lockref()
1776 l = lockref()
1795 if l is None or not l.held:
1777 if l is None or not l.held:
1796 return None
1778 return None
1797 return l
1779 return l
1798
1780
1799 def currentwlock(self):
1781 def currentwlock(self):
1800 """Returns the wlock if it's held, or None if it's not."""
1782 """Returns the wlock if it's held, or None if it's not."""
1801 return self._currentlock(self._wlockref)
1783 return self._currentlock(self._wlockref)
1802
1784
1803 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1785 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1804 """
1786 """
1805 commit an individual file as part of a larger transaction
1787 commit an individual file as part of a larger transaction
1806 """
1788 """
1807
1789
1808 fname = fctx.path()
1790 fname = fctx.path()
1809 fparent1 = manifest1.get(fname, nullid)
1791 fparent1 = manifest1.get(fname, nullid)
1810 fparent2 = manifest2.get(fname, nullid)
1792 fparent2 = manifest2.get(fname, nullid)
1811 if isinstance(fctx, context.filectx):
1793 if isinstance(fctx, context.filectx):
1812 node = fctx.filenode()
1794 node = fctx.filenode()
1813 if node in [fparent1, fparent2]:
1795 if node in [fparent1, fparent2]:
1814 self.ui.debug('reusing %s filelog entry\n' % fname)
1796 self.ui.debug('reusing %s filelog entry\n' % fname)
1815 if manifest1.flags(fname) != fctx.flags():
1797 if manifest1.flags(fname) != fctx.flags():
1816 changelist.append(fname)
1798 changelist.append(fname)
1817 return node
1799 return node
1818
1800
1819 flog = self.file(fname)
1801 flog = self.file(fname)
1820 meta = {}
1802 meta = {}
1821 copy = fctx.renamed()
1803 copy = fctx.renamed()
1822 if copy and copy[0] != fname:
1804 if copy and copy[0] != fname:
1823 # Mark the new revision of this file as a copy of another
1805 # Mark the new revision of this file as a copy of another
1824 # file. This copy data will effectively act as a parent
1806 # file. This copy data will effectively act as a parent
1825 # of this new revision. If this is a merge, the first
1807 # of this new revision. If this is a merge, the first
1826 # parent will be the nullid (meaning "look up the copy data")
1808 # parent will be the nullid (meaning "look up the copy data")
1827 # and the second one will be the other parent. For example:
1809 # and the second one will be the other parent. For example:
1828 #
1810 #
1829 # 0 --- 1 --- 3 rev1 changes file foo
1811 # 0 --- 1 --- 3 rev1 changes file foo
1830 # \ / rev2 renames foo to bar and changes it
1812 # \ / rev2 renames foo to bar and changes it
1831 # \- 2 -/ rev3 should have bar with all changes and
1813 # \- 2 -/ rev3 should have bar with all changes and
1832 # should record that bar descends from
1814 # should record that bar descends from
1833 # bar in rev2 and foo in rev1
1815 # bar in rev2 and foo in rev1
1834 #
1816 #
1835 # this allows this merge to succeed:
1817 # this allows this merge to succeed:
1836 #
1818 #
1837 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1819 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1838 # \ / merging rev3 and rev4 should use bar@rev2
1820 # \ / merging rev3 and rev4 should use bar@rev2
1839 # \- 2 --- 4 as the merge base
1821 # \- 2 --- 4 as the merge base
1840 #
1822 #
1841
1823
1842 cfname = copy[0]
1824 cfname = copy[0]
1843 crev = manifest1.get(cfname)
1825 crev = manifest1.get(cfname)
1844 newfparent = fparent2
1826 newfparent = fparent2
1845
1827
1846 if manifest2: # branch merge
1828 if manifest2: # branch merge
1847 if fparent2 == nullid or crev is None: # copied on remote side
1829 if fparent2 == nullid or crev is None: # copied on remote side
1848 if cfname in manifest2:
1830 if cfname in manifest2:
1849 crev = manifest2[cfname]
1831 crev = manifest2[cfname]
1850 newfparent = fparent1
1832 newfparent = fparent1
1851
1833
1852 # Here, we used to search backwards through history to try to find
1834 # Here, we used to search backwards through history to try to find
1853 # where the file copy came from if the source of a copy was not in
1835 # where the file copy came from if the source of a copy was not in
1854 # the parent directory. However, this doesn't actually make sense to
1836 # the parent directory. However, this doesn't actually make sense to
1855 # do (what does a copy from something not in your working copy even
1837 # do (what does a copy from something not in your working copy even
1856 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1838 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1857 # the user that copy information was dropped, so if they didn't
1839 # the user that copy information was dropped, so if they didn't
1858 # expect this outcome it can be fixed, but this is the correct
1840 # expect this outcome it can be fixed, but this is the correct
1859 # behavior in this circumstance.
1841 # behavior in this circumstance.
1860
1842
1861 if crev:
1843 if crev:
1862 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1844 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1863 meta["copy"] = cfname
1845 meta["copy"] = cfname
1864 meta["copyrev"] = hex(crev)
1846 meta["copyrev"] = hex(crev)
1865 fparent1, fparent2 = nullid, newfparent
1847 fparent1, fparent2 = nullid, newfparent
1866 else:
1848 else:
1867 self.ui.warn(_("warning: can't find ancestor for '%s' "
1849 self.ui.warn(_("warning: can't find ancestor for '%s' "
1868 "copied from '%s'!\n") % (fname, cfname))
1850 "copied from '%s'!\n") % (fname, cfname))
1869
1851
1870 elif fparent1 == nullid:
1852 elif fparent1 == nullid:
1871 fparent1, fparent2 = fparent2, nullid
1853 fparent1, fparent2 = fparent2, nullid
1872 elif fparent2 != nullid:
1854 elif fparent2 != nullid:
1873 # is one parent an ancestor of the other?
1855 # is one parent an ancestor of the other?
1874 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1856 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1875 if fparent1 in fparentancestors:
1857 if fparent1 in fparentancestors:
1876 fparent1, fparent2 = fparent2, nullid
1858 fparent1, fparent2 = fparent2, nullid
1877 elif fparent2 in fparentancestors:
1859 elif fparent2 in fparentancestors:
1878 fparent2 = nullid
1860 fparent2 = nullid
1879
1861
1880 # is the file changed?
1862 # is the file changed?
1881 text = fctx.data()
1863 text = fctx.data()
1882 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1864 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1883 changelist.append(fname)
1865 changelist.append(fname)
1884 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1866 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1885 # are just the flags changed during merge?
1867 # are just the flags changed during merge?
1886 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1868 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1887 changelist.append(fname)
1869 changelist.append(fname)
1888
1870
1889 return fparent1
1871 return fparent1
1890
1872
1891 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1873 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1892 """check for commit arguments that aren't committable"""
1874 """check for commit arguments that aren't committable"""
1893 if match.isexact() or match.prefix():
1875 if match.isexact() or match.prefix():
1894 matched = set(status.modified + status.added + status.removed)
1876 matched = set(status.modified + status.added + status.removed)
1895
1877
1896 for f in match.files():
1878 for f in match.files():
1897 f = self.dirstate.normalize(f)
1879 f = self.dirstate.normalize(f)
1898 if f == '.' or f in matched or f in wctx.substate:
1880 if f == '.' or f in matched or f in wctx.substate:
1899 continue
1881 continue
1900 if f in status.deleted:
1882 if f in status.deleted:
1901 fail(f, _('file not found!'))
1883 fail(f, _('file not found!'))
1902 if f in vdirs: # visited directory
1884 if f in vdirs: # visited directory
1903 d = f + '/'
1885 d = f + '/'
1904 for mf in matched:
1886 for mf in matched:
1905 if mf.startswith(d):
1887 if mf.startswith(d):
1906 break
1888 break
1907 else:
1889 else:
1908 fail(f, _("no match under directory!"))
1890 fail(f, _("no match under directory!"))
1909 elif f not in self.dirstate:
1891 elif f not in self.dirstate:
1910 fail(f, _("file not tracked!"))
1892 fail(f, _("file not tracked!"))
1911
1893
1912 @unfilteredmethod
1894 @unfilteredmethod
1913 def commit(self, text="", user=None, date=None, match=None, force=False,
1895 def commit(self, text="", user=None, date=None, match=None, force=False,
1914 editor=False, extra=None):
1896 editor=False, extra=None):
1915 """Add a new revision to current repository.
1897 """Add a new revision to current repository.
1916
1898
1917 Revision information is gathered from the working directory,
1899 Revision information is gathered from the working directory,
1918 match can be used to filter the committed files. If editor is
1900 match can be used to filter the committed files. If editor is
1919 supplied, it is called to get a commit message.
1901 supplied, it is called to get a commit message.
1920 """
1902 """
1921 if extra is None:
1903 if extra is None:
1922 extra = {}
1904 extra = {}
1923
1905
1924 def fail(f, msg):
1906 def fail(f, msg):
1925 raise error.Abort('%s: %s' % (f, msg))
1907 raise error.Abort('%s: %s' % (f, msg))
1926
1908
1927 if not match:
1909 if not match:
1928 match = matchmod.always(self.root, '')
1910 match = matchmod.always(self.root, '')
1929
1911
1930 if not force:
1912 if not force:
1931 vdirs = []
1913 vdirs = []
1932 match.explicitdir = vdirs.append
1914 match.explicitdir = vdirs.append
1933 match.bad = fail
1915 match.bad = fail
1934
1916
1935 wlock = lock = tr = None
1917 wlock = lock = tr = None
1936 try:
1918 try:
1937 wlock = self.wlock()
1919 wlock = self.wlock()
1938 lock = self.lock() # for recent changelog (see issue4368)
1920 lock = self.lock() # for recent changelog (see issue4368)
1939
1921
1940 wctx = self[None]
1922 wctx = self[None]
1941 merge = len(wctx.parents()) > 1
1923 merge = len(wctx.parents()) > 1
1942
1924
1943 if not force and merge and not match.always():
1925 if not force and merge and not match.always():
1944 raise error.Abort(_('cannot partially commit a merge '
1926 raise error.Abort(_('cannot partially commit a merge '
1945 '(do not specify files or patterns)'))
1927 '(do not specify files or patterns)'))
1946
1928
1947 status = self.status(match=match, clean=force)
1929 status = self.status(match=match, clean=force)
1948 if force:
1930 if force:
1949 status.modified.extend(status.clean) # mq may commit clean files
1931 status.modified.extend(status.clean) # mq may commit clean files
1950
1932
1951 # check subrepos
1933 # check subrepos
1952 subs, commitsubs, newstate = subrepoutil.precommit(
1934 subs, commitsubs, newstate = subrepoutil.precommit(
1953 self.ui, wctx, status, match, force=force)
1935 self.ui, wctx, status, match, force=force)
1954
1936
1955 # make sure all explicit patterns are matched
1937 # make sure all explicit patterns are matched
1956 if not force:
1938 if not force:
1957 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1939 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1958
1940
1959 cctx = context.workingcommitctx(self, status,
1941 cctx = context.workingcommitctx(self, status,
1960 text, user, date, extra)
1942 text, user, date, extra)
1961
1943
1962 # internal config: ui.allowemptycommit
1944 # internal config: ui.allowemptycommit
1963 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1945 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1964 or extra.get('close') or merge or cctx.files()
1946 or extra.get('close') or merge or cctx.files()
1965 or self.ui.configbool('ui', 'allowemptycommit'))
1947 or self.ui.configbool('ui', 'allowemptycommit'))
1966 if not allowemptycommit:
1948 if not allowemptycommit:
1967 return None
1949 return None
1968
1950
1969 if merge and cctx.deleted():
1951 if merge and cctx.deleted():
1970 raise error.Abort(_("cannot commit merge with missing files"))
1952 raise error.Abort(_("cannot commit merge with missing files"))
1971
1953
1972 ms = mergemod.mergestate.read(self)
1954 ms = mergemod.mergestate.read(self)
1973 mergeutil.checkunresolved(ms)
1955 mergeutil.checkunresolved(ms)
1974
1956
1975 if editor:
1957 if editor:
1976 cctx._text = editor(self, cctx, subs)
1958 cctx._text = editor(self, cctx, subs)
1977 edited = (text != cctx._text)
1959 edited = (text != cctx._text)
1978
1960
1979 # Save commit message in case this transaction gets rolled back
1961 # Save commit message in case this transaction gets rolled back
1980 # (e.g. by a pretxncommit hook). Leave the content alone on
1962 # (e.g. by a pretxncommit hook). Leave the content alone on
1981 # the assumption that the user will use the same editor again.
1963 # the assumption that the user will use the same editor again.
1982 msgfn = self.savecommitmessage(cctx._text)
1964 msgfn = self.savecommitmessage(cctx._text)
1983
1965
1984 # commit subs and write new state
1966 # commit subs and write new state
1985 if subs:
1967 if subs:
1986 for s in sorted(commitsubs):
1968 for s in sorted(commitsubs):
1987 sub = wctx.sub(s)
1969 sub = wctx.sub(s)
1988 self.ui.status(_('committing subrepository %s\n') %
1970 self.ui.status(_('committing subrepository %s\n') %
1989 subrepoutil.subrelpath(sub))
1971 subrepoutil.subrelpath(sub))
1990 sr = sub.commit(cctx._text, user, date)
1972 sr = sub.commit(cctx._text, user, date)
1991 newstate[s] = (newstate[s][0], sr)
1973 newstate[s] = (newstate[s][0], sr)
1992 subrepoutil.writestate(self, newstate)
1974 subrepoutil.writestate(self, newstate)
1993
1975
1994 p1, p2 = self.dirstate.parents()
1976 p1, p2 = self.dirstate.parents()
1995 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1977 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1996 try:
1978 try:
1997 self.hook("precommit", throw=True, parent1=hookp1,
1979 self.hook("precommit", throw=True, parent1=hookp1,
1998 parent2=hookp2)
1980 parent2=hookp2)
1999 tr = self.transaction('commit')
1981 tr = self.transaction('commit')
2000 ret = self.commitctx(cctx, True)
1982 ret = self.commitctx(cctx, True)
2001 except: # re-raises
1983 except: # re-raises
2002 if edited:
1984 if edited:
2003 self.ui.write(
1985 self.ui.write(
2004 _('note: commit message saved in %s\n') % msgfn)
1986 _('note: commit message saved in %s\n') % msgfn)
2005 raise
1987 raise
2006 # update bookmarks, dirstate and mergestate
1988 # update bookmarks, dirstate and mergestate
2007 bookmarks.update(self, [p1, p2], ret)
1989 bookmarks.update(self, [p1, p2], ret)
2008 cctx.markcommitted(ret)
1990 cctx.markcommitted(ret)
2009 ms.reset()
1991 ms.reset()
2010 tr.close()
1992 tr.close()
2011
1993
2012 finally:
1994 finally:
2013 lockmod.release(tr, lock, wlock)
1995 lockmod.release(tr, lock, wlock)
2014
1996
2015 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1997 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2016 # hack for command that use a temporary commit (eg: histedit)
1998 # hack for command that use a temporary commit (eg: histedit)
2017 # temporary commit got stripped before hook release
1999 # temporary commit got stripped before hook release
2018 if self.changelog.hasnode(ret):
2000 if self.changelog.hasnode(ret):
2019 self.hook("commit", node=node, parent1=parent1,
2001 self.hook("commit", node=node, parent1=parent1,
2020 parent2=parent2)
2002 parent2=parent2)
2021 self._afterlock(commithook)
2003 self._afterlock(commithook)
2022 return ret
2004 return ret
2023
2005
2024 @unfilteredmethod
2006 @unfilteredmethod
2025 def commitctx(self, ctx, error=False):
2007 def commitctx(self, ctx, error=False):
2026 """Add a new revision to current repository.
2008 """Add a new revision to current repository.
2027 Revision information is passed via the context argument.
2009 Revision information is passed via the context argument.
2028 """
2010 """
2029
2011
2030 tr = None
2012 tr = None
2031 p1, p2 = ctx.p1(), ctx.p2()
2013 p1, p2 = ctx.p1(), ctx.p2()
2032 user = ctx.user()
2014 user = ctx.user()
2033
2015
2034 lock = self.lock()
2016 lock = self.lock()
2035 try:
2017 try:
2036 tr = self.transaction("commit")
2018 tr = self.transaction("commit")
2037 trp = weakref.proxy(tr)
2019 trp = weakref.proxy(tr)
2038
2020
2039 if ctx.manifestnode():
2021 if ctx.manifestnode():
2040 # reuse an existing manifest revision
2022 # reuse an existing manifest revision
2041 mn = ctx.manifestnode()
2023 mn = ctx.manifestnode()
2042 files = ctx.files()
2024 files = ctx.files()
2043 elif ctx.files():
2025 elif ctx.files():
2044 m1ctx = p1.manifestctx()
2026 m1ctx = p1.manifestctx()
2045 m2ctx = p2.manifestctx()
2027 m2ctx = p2.manifestctx()
2046 mctx = m1ctx.copy()
2028 mctx = m1ctx.copy()
2047
2029
2048 m = mctx.read()
2030 m = mctx.read()
2049 m1 = m1ctx.read()
2031 m1 = m1ctx.read()
2050 m2 = m2ctx.read()
2032 m2 = m2ctx.read()
2051
2033
2052 # check in files
2034 # check in files
2053 added = []
2035 added = []
2054 changed = []
2036 changed = []
2055 removed = list(ctx.removed())
2037 removed = list(ctx.removed())
2056 linkrev = len(self)
2038 linkrev = len(self)
2057 self.ui.note(_("committing files:\n"))
2039 self.ui.note(_("committing files:\n"))
2058 for f in sorted(ctx.modified() + ctx.added()):
2040 for f in sorted(ctx.modified() + ctx.added()):
2059 self.ui.note(f + "\n")
2041 self.ui.note(f + "\n")
2060 try:
2042 try:
2061 fctx = ctx[f]
2043 fctx = ctx[f]
2062 if fctx is None:
2044 if fctx is None:
2063 removed.append(f)
2045 removed.append(f)
2064 else:
2046 else:
2065 added.append(f)
2047 added.append(f)
2066 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2048 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2067 trp, changed)
2049 trp, changed)
2068 m.setflag(f, fctx.flags())
2050 m.setflag(f, fctx.flags())
2069 except OSError as inst:
2051 except OSError as inst:
2070 self.ui.warn(_("trouble committing %s!\n") % f)
2052 self.ui.warn(_("trouble committing %s!\n") % f)
2071 raise
2053 raise
2072 except IOError as inst:
2054 except IOError as inst:
2073 errcode = getattr(inst, 'errno', errno.ENOENT)
2055 errcode = getattr(inst, 'errno', errno.ENOENT)
2074 if error or errcode and errcode != errno.ENOENT:
2056 if error or errcode and errcode != errno.ENOENT:
2075 self.ui.warn(_("trouble committing %s!\n") % f)
2057 self.ui.warn(_("trouble committing %s!\n") % f)
2076 raise
2058 raise
2077
2059
2078 # update manifest
2060 # update manifest
2079 self.ui.note(_("committing manifest\n"))
2061 self.ui.note(_("committing manifest\n"))
2080 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2062 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2081 drop = [f for f in removed if f in m]
2063 drop = [f for f in removed if f in m]
2082 for f in drop:
2064 for f in drop:
2083 del m[f]
2065 del m[f]
2084 mn = mctx.write(trp, linkrev,
2066 mn = mctx.write(trp, linkrev,
2085 p1.manifestnode(), p2.manifestnode(),
2067 p1.manifestnode(), p2.manifestnode(),
2086 added, drop)
2068 added, drop)
2087 files = changed + removed
2069 files = changed + removed
2088 else:
2070 else:
2089 mn = p1.manifestnode()
2071 mn = p1.manifestnode()
2090 files = []
2072 files = []
2091
2073
2092 # update changelog
2074 # update changelog
2093 self.ui.note(_("committing changelog\n"))
2075 self.ui.note(_("committing changelog\n"))
2094 self.changelog.delayupdate(tr)
2076 self.changelog.delayupdate(tr)
2095 n = self.changelog.add(mn, files, ctx.description(),
2077 n = self.changelog.add(mn, files, ctx.description(),
2096 trp, p1.node(), p2.node(),
2078 trp, p1.node(), p2.node(),
2097 user, ctx.date(), ctx.extra().copy())
2079 user, ctx.date(), ctx.extra().copy())
2098 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2080 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2099 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2081 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2100 parent2=xp2)
2082 parent2=xp2)
2101 # set the new commit is proper phase
2083 # set the new commit is proper phase
2102 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2084 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2103 if targetphase:
2085 if targetphase:
2104 # retract boundary do not alter parent changeset.
2086 # retract boundary do not alter parent changeset.
2105 # if a parent have higher the resulting phase will
2087 # if a parent have higher the resulting phase will
2106 # be compliant anyway
2088 # be compliant anyway
2107 #
2089 #
2108 # if minimal phase was 0 we don't need to retract anything
2090 # if minimal phase was 0 we don't need to retract anything
2109 phases.registernew(self, tr, targetphase, [n])
2091 phases.registernew(self, tr, targetphase, [n])
2110 tr.close()
2092 tr.close()
2111 return n
2093 return n
2112 finally:
2094 finally:
2113 if tr:
2095 if tr:
2114 tr.release()
2096 tr.release()
2115 lock.release()
2097 lock.release()
2116
2098
2117 @unfilteredmethod
2099 @unfilteredmethod
2118 def destroying(self):
2100 def destroying(self):
2119 '''Inform the repository that nodes are about to be destroyed.
2101 '''Inform the repository that nodes are about to be destroyed.
2120 Intended for use by strip and rollback, so there's a common
2102 Intended for use by strip and rollback, so there's a common
2121 place for anything that has to be done before destroying history.
2103 place for anything that has to be done before destroying history.
2122
2104
2123 This is mostly useful for saving state that is in memory and waiting
2105 This is mostly useful for saving state that is in memory and waiting
2124 to be flushed when the current lock is released. Because a call to
2106 to be flushed when the current lock is released. Because a call to
2125 destroyed is imminent, the repo will be invalidated causing those
2107 destroyed is imminent, the repo will be invalidated causing those
2126 changes to stay in memory (waiting for the next unlock), or vanish
2108 changes to stay in memory (waiting for the next unlock), or vanish
2127 completely.
2109 completely.
2128 '''
2110 '''
2129 # When using the same lock to commit and strip, the phasecache is left
2111 # When using the same lock to commit and strip, the phasecache is left
2130 # dirty after committing. Then when we strip, the repo is invalidated,
2112 # dirty after committing. Then when we strip, the repo is invalidated,
2131 # causing those changes to disappear.
2113 # causing those changes to disappear.
2132 if '_phasecache' in vars(self):
2114 if '_phasecache' in vars(self):
2133 self._phasecache.write()
2115 self._phasecache.write()
2134
2116
2135 @unfilteredmethod
2117 @unfilteredmethod
2136 def destroyed(self):
2118 def destroyed(self):
2137 '''Inform the repository that nodes have been destroyed.
2119 '''Inform the repository that nodes have been destroyed.
2138 Intended for use by strip and rollback, so there's a common
2120 Intended for use by strip and rollback, so there's a common
2139 place for anything that has to be done after destroying history.
2121 place for anything that has to be done after destroying history.
2140 '''
2122 '''
2141 # When one tries to:
2123 # When one tries to:
2142 # 1) destroy nodes thus calling this method (e.g. strip)
2124 # 1) destroy nodes thus calling this method (e.g. strip)
2143 # 2) use phasecache somewhere (e.g. commit)
2125 # 2) use phasecache somewhere (e.g. commit)
2144 #
2126 #
2145 # then 2) will fail because the phasecache contains nodes that were
2127 # then 2) will fail because the phasecache contains nodes that were
2146 # removed. We can either remove phasecache from the filecache,
2128 # removed. We can either remove phasecache from the filecache,
2147 # causing it to reload next time it is accessed, or simply filter
2129 # causing it to reload next time it is accessed, or simply filter
2148 # the removed nodes now and write the updated cache.
2130 # the removed nodes now and write the updated cache.
2149 self._phasecache.filterunknown(self)
2131 self._phasecache.filterunknown(self)
2150 self._phasecache.write()
2132 self._phasecache.write()
2151
2133
2152 # refresh all repository caches
2134 # refresh all repository caches
2153 self.updatecaches()
2135 self.updatecaches()
2154
2136
2155 # Ensure the persistent tag cache is updated. Doing it now
2137 # Ensure the persistent tag cache is updated. Doing it now
2156 # means that the tag cache only has to worry about destroyed
2138 # means that the tag cache only has to worry about destroyed
2157 # heads immediately after a strip/rollback. That in turn
2139 # heads immediately after a strip/rollback. That in turn
2158 # guarantees that "cachetip == currenttip" (comparing both rev
2140 # guarantees that "cachetip == currenttip" (comparing both rev
2159 # and node) always means no nodes have been added or destroyed.
2141 # and node) always means no nodes have been added or destroyed.
2160
2142
2161 # XXX this is suboptimal when qrefresh'ing: we strip the current
2143 # XXX this is suboptimal when qrefresh'ing: we strip the current
2162 # head, refresh the tag cache, then immediately add a new head.
2144 # head, refresh the tag cache, then immediately add a new head.
2163 # But I think doing it this way is necessary for the "instant
2145 # But I think doing it this way is necessary for the "instant
2164 # tag cache retrieval" case to work.
2146 # tag cache retrieval" case to work.
2165 self.invalidate()
2147 self.invalidate()
2166
2148
2167 def status(self, node1='.', node2=None, match=None,
2149 def status(self, node1='.', node2=None, match=None,
2168 ignored=False, clean=False, unknown=False,
2150 ignored=False, clean=False, unknown=False,
2169 listsubrepos=False):
2151 listsubrepos=False):
2170 '''a convenience method that calls node1.status(node2)'''
2152 '''a convenience method that calls node1.status(node2)'''
2171 return self[node1].status(node2, match, ignored, clean, unknown,
2153 return self[node1].status(node2, match, ignored, clean, unknown,
2172 listsubrepos)
2154 listsubrepos)
2173
2155
2174 def addpostdsstatus(self, ps):
2156 def addpostdsstatus(self, ps):
2175 """Add a callback to run within the wlock, at the point at which status
2157 """Add a callback to run within the wlock, at the point at which status
2176 fixups happen.
2158 fixups happen.
2177
2159
2178 On status completion, callback(wctx, status) will be called with the
2160 On status completion, callback(wctx, status) will be called with the
2179 wlock held, unless the dirstate has changed from underneath or the wlock
2161 wlock held, unless the dirstate has changed from underneath or the wlock
2180 couldn't be grabbed.
2162 couldn't be grabbed.
2181
2163
2182 Callbacks should not capture and use a cached copy of the dirstate --
2164 Callbacks should not capture and use a cached copy of the dirstate --
2183 it might change in the meanwhile. Instead, they should access the
2165 it might change in the meanwhile. Instead, they should access the
2184 dirstate via wctx.repo().dirstate.
2166 dirstate via wctx.repo().dirstate.
2185
2167
2186 This list is emptied out after each status run -- extensions should
2168 This list is emptied out after each status run -- extensions should
2187 make sure it adds to this list each time dirstate.status is called.
2169 make sure it adds to this list each time dirstate.status is called.
2188 Extensions should also make sure they don't call this for statuses
2170 Extensions should also make sure they don't call this for statuses
2189 that don't involve the dirstate.
2171 that don't involve the dirstate.
2190 """
2172 """
2191
2173
2192 # The list is located here for uniqueness reasons -- it is actually
2174 # The list is located here for uniqueness reasons -- it is actually
2193 # managed by the workingctx, but that isn't unique per-repo.
2175 # managed by the workingctx, but that isn't unique per-repo.
2194 self._postdsstatus.append(ps)
2176 self._postdsstatus.append(ps)
2195
2177
2196 def postdsstatus(self):
2178 def postdsstatus(self):
2197 """Used by workingctx to get the list of post-dirstate-status hooks."""
2179 """Used by workingctx to get the list of post-dirstate-status hooks."""
2198 return self._postdsstatus
2180 return self._postdsstatus
2199
2181
2200 def clearpostdsstatus(self):
2182 def clearpostdsstatus(self):
2201 """Used by workingctx to clear post-dirstate-status hooks."""
2183 """Used by workingctx to clear post-dirstate-status hooks."""
2202 del self._postdsstatus[:]
2184 del self._postdsstatus[:]
2203
2185
2204 def heads(self, start=None):
2186 def heads(self, start=None):
2205 if start is None:
2187 if start is None:
2206 cl = self.changelog
2188 cl = self.changelog
2207 headrevs = reversed(cl.headrevs())
2189 headrevs = reversed(cl.headrevs())
2208 return [cl.node(rev) for rev in headrevs]
2190 return [cl.node(rev) for rev in headrevs]
2209
2191
2210 heads = self.changelog.heads(start)
2192 heads = self.changelog.heads(start)
2211 # sort the output in rev descending order
2193 # sort the output in rev descending order
2212 return sorted(heads, key=self.changelog.rev, reverse=True)
2194 return sorted(heads, key=self.changelog.rev, reverse=True)
2213
2195
2214 def branchheads(self, branch=None, start=None, closed=False):
2196 def branchheads(self, branch=None, start=None, closed=False):
2215 '''return a (possibly filtered) list of heads for the given branch
2197 '''return a (possibly filtered) list of heads for the given branch
2216
2198
2217 Heads are returned in topological order, from newest to oldest.
2199 Heads are returned in topological order, from newest to oldest.
2218 If branch is None, use the dirstate branch.
2200 If branch is None, use the dirstate branch.
2219 If start is not None, return only heads reachable from start.
2201 If start is not None, return only heads reachable from start.
2220 If closed is True, return heads that are marked as closed as well.
2202 If closed is True, return heads that are marked as closed as well.
2221 '''
2203 '''
2222 if branch is None:
2204 if branch is None:
2223 branch = self[None].branch()
2205 branch = self[None].branch()
2224 branches = self.branchmap()
2206 branches = self.branchmap()
2225 if branch not in branches:
2207 if branch not in branches:
2226 return []
2208 return []
2227 # the cache returns heads ordered lowest to highest
2209 # the cache returns heads ordered lowest to highest
2228 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2210 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2229 if start is not None:
2211 if start is not None:
2230 # filter out the heads that cannot be reached from startrev
2212 # filter out the heads that cannot be reached from startrev
2231 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2213 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2232 bheads = [h for h in bheads if h in fbheads]
2214 bheads = [h for h in bheads if h in fbheads]
2233 return bheads
2215 return bheads
2234
2216
2235 def branches(self, nodes):
2217 def branches(self, nodes):
2236 if not nodes:
2218 if not nodes:
2237 nodes = [self.changelog.tip()]
2219 nodes = [self.changelog.tip()]
2238 b = []
2220 b = []
2239 for n in nodes:
2221 for n in nodes:
2240 t = n
2222 t = n
2241 while True:
2223 while True:
2242 p = self.changelog.parents(n)
2224 p = self.changelog.parents(n)
2243 if p[1] != nullid or p[0] == nullid:
2225 if p[1] != nullid or p[0] == nullid:
2244 b.append((t, n, p[0], p[1]))
2226 b.append((t, n, p[0], p[1]))
2245 break
2227 break
2246 n = p[0]
2228 n = p[0]
2247 return b
2229 return b
2248
2230
2249 def between(self, pairs):
2231 def between(self, pairs):
2250 r = []
2232 r = []
2251
2233
2252 for top, bottom in pairs:
2234 for top, bottom in pairs:
2253 n, l, i = top, [], 0
2235 n, l, i = top, [], 0
2254 f = 1
2236 f = 1
2255
2237
2256 while n != bottom and n != nullid:
2238 while n != bottom and n != nullid:
2257 p = self.changelog.parents(n)[0]
2239 p = self.changelog.parents(n)[0]
2258 if i == f:
2240 if i == f:
2259 l.append(n)
2241 l.append(n)
2260 f = f * 2
2242 f = f * 2
2261 n = p
2243 n = p
2262 i += 1
2244 i += 1
2263
2245
2264 r.append(l)
2246 r.append(l)
2265
2247
2266 return r
2248 return r
2267
2249
2268 def checkpush(self, pushop):
2250 def checkpush(self, pushop):
2269 """Extensions can override this function if additional checks have
2251 """Extensions can override this function if additional checks have
2270 to be performed before pushing, or call it if they override push
2252 to be performed before pushing, or call it if they override push
2271 command.
2253 command.
2272 """
2254 """
2273
2255
2274 @unfilteredpropertycache
2256 @unfilteredpropertycache
2275 def prepushoutgoinghooks(self):
2257 def prepushoutgoinghooks(self):
2276 """Return util.hooks consists of a pushop with repo, remote, outgoing
2258 """Return util.hooks consists of a pushop with repo, remote, outgoing
2277 methods, which are called before pushing changesets.
2259 methods, which are called before pushing changesets.
2278 """
2260 """
2279 return util.hooks()
2261 return util.hooks()
2280
2262
2281 def pushkey(self, namespace, key, old, new):
2263 def pushkey(self, namespace, key, old, new):
2282 try:
2264 try:
2283 tr = self.currenttransaction()
2265 tr = self.currenttransaction()
2284 hookargs = {}
2266 hookargs = {}
2285 if tr is not None:
2267 if tr is not None:
2286 hookargs.update(tr.hookargs)
2268 hookargs.update(tr.hookargs)
2287 hookargs = pycompat.strkwargs(hookargs)
2269 hookargs = pycompat.strkwargs(hookargs)
2288 hookargs[r'namespace'] = namespace
2270 hookargs[r'namespace'] = namespace
2289 hookargs[r'key'] = key
2271 hookargs[r'key'] = key
2290 hookargs[r'old'] = old
2272 hookargs[r'old'] = old
2291 hookargs[r'new'] = new
2273 hookargs[r'new'] = new
2292 self.hook('prepushkey', throw=True, **hookargs)
2274 self.hook('prepushkey', throw=True, **hookargs)
2293 except error.HookAbort as exc:
2275 except error.HookAbort as exc:
2294 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2276 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2295 if exc.hint:
2277 if exc.hint:
2296 self.ui.write_err(_("(%s)\n") % exc.hint)
2278 self.ui.write_err(_("(%s)\n") % exc.hint)
2297 return False
2279 return False
2298 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2280 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2299 ret = pushkey.push(self, namespace, key, old, new)
2281 ret = pushkey.push(self, namespace, key, old, new)
2300 def runhook():
2282 def runhook():
2301 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2283 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2302 ret=ret)
2284 ret=ret)
2303 self._afterlock(runhook)
2285 self._afterlock(runhook)
2304 return ret
2286 return ret
2305
2287
2306 def listkeys(self, namespace):
2288 def listkeys(self, namespace):
2307 self.hook('prelistkeys', throw=True, namespace=namespace)
2289 self.hook('prelistkeys', throw=True, namespace=namespace)
2308 self.ui.debug('listing keys for "%s"\n' % namespace)
2290 self.ui.debug('listing keys for "%s"\n' % namespace)
2309 values = pushkey.list(self, namespace)
2291 values = pushkey.list(self, namespace)
2310 self.hook('listkeys', namespace=namespace, values=values)
2292 self.hook('listkeys', namespace=namespace, values=values)
2311 return values
2293 return values
2312
2294
2313 def debugwireargs(self, one, two, three=None, four=None, five=None):
2295 def debugwireargs(self, one, two, three=None, four=None, five=None):
2314 '''used to test argument passing over the wire'''
2296 '''used to test argument passing over the wire'''
2315 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2297 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2316 pycompat.bytestr(four),
2298 pycompat.bytestr(four),
2317 pycompat.bytestr(five))
2299 pycompat.bytestr(five))
2318
2300
2319 def savecommitmessage(self, text):
2301 def savecommitmessage(self, text):
2320 fp = self.vfs('last-message.txt', 'wb')
2302 fp = self.vfs('last-message.txt', 'wb')
2321 try:
2303 try:
2322 fp.write(text)
2304 fp.write(text)
2323 finally:
2305 finally:
2324 fp.close()
2306 fp.close()
2325 return self.pathto(fp.name[len(self.root) + 1:])
2307 return self.pathto(fp.name[len(self.root) + 1:])
2326
2308
2327 # used to avoid circular references so destructors work
2309 # used to avoid circular references so destructors work
2328 def aftertrans(files):
2310 def aftertrans(files):
2329 renamefiles = [tuple(t) for t in files]
2311 renamefiles = [tuple(t) for t in files]
2330 def a():
2312 def a():
2331 for vfs, src, dest in renamefiles:
2313 for vfs, src, dest in renamefiles:
2332 # if src and dest refer to a same file, vfs.rename is a no-op,
2314 # if src and dest refer to a same file, vfs.rename is a no-op,
2333 # leaving both src and dest on disk. delete dest to make sure
2315 # leaving both src and dest on disk. delete dest to make sure
2334 # the rename couldn't be such a no-op.
2316 # the rename couldn't be such a no-op.
2335 vfs.tryunlink(dest)
2317 vfs.tryunlink(dest)
2336 try:
2318 try:
2337 vfs.rename(src, dest)
2319 vfs.rename(src, dest)
2338 except OSError: # journal file does not yet exist
2320 except OSError: # journal file does not yet exist
2339 pass
2321 pass
2340 return a
2322 return a
2341
2323
2342 def undoname(fn):
2324 def undoname(fn):
2343 base, name = os.path.split(fn)
2325 base, name = os.path.split(fn)
2344 assert name.startswith('journal')
2326 assert name.startswith('journal')
2345 return os.path.join(base, name.replace('journal', 'undo', 1))
2327 return os.path.join(base, name.replace('journal', 'undo', 1))
2346
2328
2347 def instance(ui, path, create):
2329 def instance(ui, path, create):
2348 return localrepository(ui, util.urllocalpath(path), create)
2330 return localrepository(ui, util.urllocalpath(path), create)
2349
2331
2350 def islocal(path):
2332 def islocal(path):
2351 return True
2333 return True
2352
2334
2353 def newreporequirements(repo):
2335 def newreporequirements(repo):
2354 """Determine the set of requirements for a new local repository.
2336 """Determine the set of requirements for a new local repository.
2355
2337
2356 Extensions can wrap this function to specify custom requirements for
2338 Extensions can wrap this function to specify custom requirements for
2357 new repositories.
2339 new repositories.
2358 """
2340 """
2359 ui = repo.ui
2341 ui = repo.ui
2360 requirements = {'revlogv1'}
2342 requirements = {'revlogv1'}
2361 if ui.configbool('format', 'usestore'):
2343 if ui.configbool('format', 'usestore'):
2362 requirements.add('store')
2344 requirements.add('store')
2363 if ui.configbool('format', 'usefncache'):
2345 if ui.configbool('format', 'usefncache'):
2364 requirements.add('fncache')
2346 requirements.add('fncache')
2365 if ui.configbool('format', 'dotencode'):
2347 if ui.configbool('format', 'dotencode'):
2366 requirements.add('dotencode')
2348 requirements.add('dotencode')
2367
2349
2368 compengine = ui.config('experimental', 'format.compression')
2350 compengine = ui.config('experimental', 'format.compression')
2369 if compengine not in util.compengines:
2351 if compengine not in util.compengines:
2370 raise error.Abort(_('compression engine %s defined by '
2352 raise error.Abort(_('compression engine %s defined by '
2371 'experimental.format.compression not available') %
2353 'experimental.format.compression not available') %
2372 compengine,
2354 compengine,
2373 hint=_('run "hg debuginstall" to list available '
2355 hint=_('run "hg debuginstall" to list available '
2374 'compression engines'))
2356 'compression engines'))
2375
2357
2376 # zlib is the historical default and doesn't need an explicit requirement.
2358 # zlib is the historical default and doesn't need an explicit requirement.
2377 if compengine != 'zlib':
2359 if compengine != 'zlib':
2378 requirements.add('exp-compression-%s' % compengine)
2360 requirements.add('exp-compression-%s' % compengine)
2379
2361
2380 if scmutil.gdinitconfig(ui):
2362 if scmutil.gdinitconfig(ui):
2381 requirements.add('generaldelta')
2363 requirements.add('generaldelta')
2382 if ui.configbool('experimental', 'treemanifest'):
2364 if ui.configbool('experimental', 'treemanifest'):
2383 requirements.add('treemanifest')
2365 requirements.add('treemanifest')
2384
2366
2385 revlogv2 = ui.config('experimental', 'revlogv2')
2367 revlogv2 = ui.config('experimental', 'revlogv2')
2386 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2368 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2387 requirements.remove('revlogv1')
2369 requirements.remove('revlogv1')
2388 # generaldelta is implied by revlogv2.
2370 # generaldelta is implied by revlogv2.
2389 requirements.discard('generaldelta')
2371 requirements.discard('generaldelta')
2390 requirements.add(REVLOGV2_REQUIREMENT)
2372 requirements.add(REVLOGV2_REQUIREMENT)
2391
2373
2392 return requirements
2374 return requirements
@@ -1,1019 +1,996 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .thirdparty.zope import (
11 from .thirdparty.zope import (
12 interface as zi,
12 interface as zi,
13 )
13 )
14 from . import (
14 from . import (
15 error,
15 error,
16 )
16 )
17
17
18 class ipeerconnection(zi.Interface):
18 class ipeerconnection(zi.Interface):
19 """Represents a "connection" to a repository.
19 """Represents a "connection" to a repository.
20
20
21 This is the base interface for representing a connection to a repository.
21 This is the base interface for representing a connection to a repository.
22 It holds basic properties and methods applicable to all peer types.
22 It holds basic properties and methods applicable to all peer types.
23
23
24 This is not a complete interface definition and should not be used
24 This is not a complete interface definition and should not be used
25 outside of this module.
25 outside of this module.
26 """
26 """
27 ui = zi.Attribute("""ui.ui instance""")
27 ui = zi.Attribute("""ui.ui instance""")
28
28
29 def url():
29 def url():
30 """Returns a URL string representing this peer.
30 """Returns a URL string representing this peer.
31
31
32 Currently, implementations expose the raw URL used to construct the
32 Currently, implementations expose the raw URL used to construct the
33 instance. It may contain credentials as part of the URL. The
33 instance. It may contain credentials as part of the URL. The
34 expectations of the value aren't well-defined and this could lead to
34 expectations of the value aren't well-defined and this could lead to
35 data leakage.
35 data leakage.
36
36
37 TODO audit/clean consumers and more clearly define the contents of this
37 TODO audit/clean consumers and more clearly define the contents of this
38 value.
38 value.
39 """
39 """
40
40
41 def local():
41 def local():
42 """Returns a local repository instance.
42 """Returns a local repository instance.
43
43
44 If the peer represents a local repository, returns an object that
44 If the peer represents a local repository, returns an object that
45 can be used to interface with it. Otherwise returns ``None``.
45 can be used to interface with it. Otherwise returns ``None``.
46 """
46 """
47
47
48 def peer():
48 def peer():
49 """Returns an object conforming to this interface.
49 """Returns an object conforming to this interface.
50
50
51 Most implementations will ``return self``.
51 Most implementations will ``return self``.
52 """
52 """
53
53
54 def canpush():
54 def canpush():
55 """Returns a boolean indicating if this peer can be pushed to."""
55 """Returns a boolean indicating if this peer can be pushed to."""
56
56
57 def close():
57 def close():
58 """Close the connection to this peer.
58 """Close the connection to this peer.
59
59
60 This is called when the peer will no longer be used. Resources
60 This is called when the peer will no longer be used. Resources
61 associated with the peer should be cleaned up.
61 associated with the peer should be cleaned up.
62 """
62 """
63
63
64 class ipeercapabilities(zi.Interface):
64 class ipeercapabilities(zi.Interface):
65 """Peer sub-interface related to capabilities."""
65 """Peer sub-interface related to capabilities."""
66
66
67 def capable(name):
67 def capable(name):
68 """Determine support for a named capability.
68 """Determine support for a named capability.
69
69
70 Returns ``False`` if capability not supported.
70 Returns ``False`` if capability not supported.
71
71
72 Returns ``True`` if boolean capability is supported. Returns a string
72 Returns ``True`` if boolean capability is supported. Returns a string
73 if capability support is non-boolean.
73 if capability support is non-boolean.
74
74
75 Capability strings may or may not map to wire protocol capabilities.
75 Capability strings may or may not map to wire protocol capabilities.
76 """
76 """
77
77
78 def requirecap(name, purpose):
78 def requirecap(name, purpose):
79 """Require a capability to be present.
79 """Require a capability to be present.
80
80
81 Raises a ``CapabilityError`` if the capability isn't present.
81 Raises a ``CapabilityError`` if the capability isn't present.
82 """
82 """
83
83
84 class ipeercommands(zi.Interface):
84 class ipeercommands(zi.Interface):
85 """Client-side interface for communicating over the wire protocol.
85 """Client-side interface for communicating over the wire protocol.
86
86
87 This interface is used as a gateway to the Mercurial wire protocol.
87 This interface is used as a gateway to the Mercurial wire protocol.
88 methods commonly call wire protocol commands of the same name.
88 methods commonly call wire protocol commands of the same name.
89 """
89 """
90
90
91 def branchmap():
91 def branchmap():
92 """Obtain heads in named branches.
92 """Obtain heads in named branches.
93
93
94 Returns a dict mapping branch name to an iterable of nodes that are
94 Returns a dict mapping branch name to an iterable of nodes that are
95 heads on that branch.
95 heads on that branch.
96 """
96 """
97
97
98 def capabilities():
98 def capabilities():
99 """Obtain capabilities of the peer.
99 """Obtain capabilities of the peer.
100
100
101 Returns a set of string capabilities.
101 Returns a set of string capabilities.
102 """
102 """
103
103
104 def debugwireargs(one, two, three=None, four=None, five=None):
104 def debugwireargs(one, two, three=None, four=None, five=None):
105 """Used to facilitate debugging of arguments passed over the wire."""
105 """Used to facilitate debugging of arguments passed over the wire."""
106
106
107 def getbundle(source, **kwargs):
107 def getbundle(source, **kwargs):
108 """Obtain remote repository data as a bundle.
108 """Obtain remote repository data as a bundle.
109
109
110 This command is how the bulk of repository data is transferred from
110 This command is how the bulk of repository data is transferred from
111 the peer to the local repository
111 the peer to the local repository
112
112
113 Returns a generator of bundle data.
113 Returns a generator of bundle data.
114 """
114 """
115
115
116 def heads():
116 def heads():
117 """Determine all known head revisions in the peer.
117 """Determine all known head revisions in the peer.
118
118
119 Returns an iterable of binary nodes.
119 Returns an iterable of binary nodes.
120 """
120 """
121
121
122 def known(nodes):
122 def known(nodes):
123 """Determine whether multiple nodes are known.
123 """Determine whether multiple nodes are known.
124
124
125 Accepts an iterable of nodes whose presence to check for.
125 Accepts an iterable of nodes whose presence to check for.
126
126
127 Returns an iterable of booleans indicating of the corresponding node
127 Returns an iterable of booleans indicating of the corresponding node
128 at that index is known to the peer.
128 at that index is known to the peer.
129 """
129 """
130
130
131 def listkeys(namespace):
131 def listkeys(namespace):
132 """Obtain all keys in a pushkey namespace.
132 """Obtain all keys in a pushkey namespace.
133
133
134 Returns an iterable of key names.
134 Returns an iterable of key names.
135 """
135 """
136
136
137 def lookup(key):
137 def lookup(key):
138 """Resolve a value to a known revision.
138 """Resolve a value to a known revision.
139
139
140 Returns a binary node of the resolved revision on success.
140 Returns a binary node of the resolved revision on success.
141 """
141 """
142
142
143 def pushkey(namespace, key, old, new):
143 def pushkey(namespace, key, old, new):
144 """Set a value using the ``pushkey`` protocol.
144 """Set a value using the ``pushkey`` protocol.
145
145
146 Arguments correspond to the pushkey namespace and key to operate on and
146 Arguments correspond to the pushkey namespace and key to operate on and
147 the old and new values for that key.
147 the old and new values for that key.
148
148
149 Returns a string with the peer result. The value inside varies by the
149 Returns a string with the peer result. The value inside varies by the
150 namespace.
150 namespace.
151 """
151 """
152
152
153 def stream_out():
153 def stream_out():
154 """Obtain streaming clone data.
154 """Obtain streaming clone data.
155
155
156 Successful result should be a generator of data chunks.
156 Successful result should be a generator of data chunks.
157 """
157 """
158
158
159 def unbundle(bundle, heads, url):
159 def unbundle(bundle, heads, url):
160 """Transfer repository data to the peer.
160 """Transfer repository data to the peer.
161
161
162 This is how the bulk of data during a push is transferred.
162 This is how the bulk of data during a push is transferred.
163
163
164 Returns the integer number of heads added to the peer.
164 Returns the integer number of heads added to the peer.
165 """
165 """
166
166
167 class ipeerlegacycommands(zi.Interface):
167 class ipeerlegacycommands(zi.Interface):
168 """Interface for implementing support for legacy wire protocol commands.
168 """Interface for implementing support for legacy wire protocol commands.
169
169
170 Wire protocol commands transition to legacy status when they are no longer
170 Wire protocol commands transition to legacy status when they are no longer
171 used by modern clients. To facilitate identifying which commands are
171 used by modern clients. To facilitate identifying which commands are
172 legacy, the interfaces are split.
172 legacy, the interfaces are split.
173 """
173 """
174
174
175 def between(pairs):
175 def between(pairs):
176 """Obtain nodes between pairs of nodes.
176 """Obtain nodes between pairs of nodes.
177
177
178 ``pairs`` is an iterable of node pairs.
178 ``pairs`` is an iterable of node pairs.
179
179
180 Returns an iterable of iterables of nodes corresponding to each
180 Returns an iterable of iterables of nodes corresponding to each
181 requested pair.
181 requested pair.
182 """
182 """
183
183
184 def branches(nodes):
184 def branches(nodes):
185 """Obtain ancestor changesets of specific nodes back to a branch point.
185 """Obtain ancestor changesets of specific nodes back to a branch point.
186
186
187 For each requested node, the peer finds the first ancestor node that is
187 For each requested node, the peer finds the first ancestor node that is
188 a DAG root or is a merge.
188 a DAG root or is a merge.
189
189
190 Returns an iterable of iterables with the resolved values for each node.
190 Returns an iterable of iterables with the resolved values for each node.
191 """
191 """
192
192
193 def changegroup(nodes, kind):
193 def changegroup(nodes, kind):
194 """Obtain a changegroup with data for descendants of specified nodes."""
194 """Obtain a changegroup with data for descendants of specified nodes."""
195
195
196 def changegroupsubset(bases, heads, kind):
196 def changegroupsubset(bases, heads, kind):
197 pass
197 pass
198
198
199 class ipeercommandexecutor(zi.Interface):
199 class ipeercommandexecutor(zi.Interface):
200 """Represents a mechanism to execute remote commands.
200 """Represents a mechanism to execute remote commands.
201
201
202 This is the primary interface for requesting that wire protocol commands
202 This is the primary interface for requesting that wire protocol commands
203 be executed. Instances of this interface are active in a context manager
203 be executed. Instances of this interface are active in a context manager
204 and have a well-defined lifetime. When the context manager exits, all
204 and have a well-defined lifetime. When the context manager exits, all
205 outstanding requests are waited on.
205 outstanding requests are waited on.
206 """
206 """
207
207
208 def callcommand(name, args):
208 def callcommand(name, args):
209 """Request that a named command be executed.
209 """Request that a named command be executed.
210
210
211 Receives the command name and a dictionary of command arguments.
211 Receives the command name and a dictionary of command arguments.
212
212
213 Returns a ``concurrent.futures.Future`` that will resolve to the
213 Returns a ``concurrent.futures.Future`` that will resolve to the
214 result of that command request. That exact value is left up to
214 result of that command request. That exact value is left up to
215 the implementation and possibly varies by command.
215 the implementation and possibly varies by command.
216
216
217 Not all commands can coexist with other commands in an executor
217 Not all commands can coexist with other commands in an executor
218 instance: it depends on the underlying wire protocol transport being
218 instance: it depends on the underlying wire protocol transport being
219 used and the command itself.
219 used and the command itself.
220
220
221 Implementations MAY call ``sendcommands()`` automatically if the
221 Implementations MAY call ``sendcommands()`` automatically if the
222 requested command can not coexist with other commands in this executor.
222 requested command can not coexist with other commands in this executor.
223
223
224 Implementations MAY call ``sendcommands()`` automatically when the
224 Implementations MAY call ``sendcommands()`` automatically when the
225 future's ``result()`` is called. So, consumers using multiple
225 future's ``result()`` is called. So, consumers using multiple
226 commands with an executor MUST ensure that ``result()`` is not called
226 commands with an executor MUST ensure that ``result()`` is not called
227 until all command requests have been issued.
227 until all command requests have been issued.
228 """
228 """
229
229
230 def sendcommands():
230 def sendcommands():
231 """Trigger submission of queued command requests.
231 """Trigger submission of queued command requests.
232
232
233 Not all transports submit commands as soon as they are requested to
233 Not all transports submit commands as soon as they are requested to
234 run. When called, this method forces queued command requests to be
234 run. When called, this method forces queued command requests to be
235 issued. It will no-op if all commands have already been sent.
235 issued. It will no-op if all commands have already been sent.
236
236
237 When called, no more new commands may be issued with this executor.
237 When called, no more new commands may be issued with this executor.
238 """
238 """
239
239
240 def close():
240 def close():
241 """Signal that this command request is finished.
241 """Signal that this command request is finished.
242
242
243 When called, no more new commands may be issued. All outstanding
243 When called, no more new commands may be issued. All outstanding
244 commands that have previously been issued are waited on before
244 commands that have previously been issued are waited on before
245 returning. This not only includes waiting for the futures to resolve,
245 returning. This not only includes waiting for the futures to resolve,
246 but also waiting for all response data to arrive. In other words,
246 but also waiting for all response data to arrive. In other words,
247 calling this waits for all on-wire state for issued command requests
247 calling this waits for all on-wire state for issued command requests
248 to finish.
248 to finish.
249
249
250 When used as a context manager, this method is called when exiting the
250 When used as a context manager, this method is called when exiting the
251 context manager.
251 context manager.
252
252
253 This method may call ``sendcommands()`` if there are buffered commands.
253 This method may call ``sendcommands()`` if there are buffered commands.
254 """
254 """
255
255
256 class ipeerrequests(zi.Interface):
256 class ipeerrequests(zi.Interface):
257 """Interface for executing commands on a peer."""
257 """Interface for executing commands on a peer."""
258
258
259 def commandexecutor():
259 def commandexecutor():
260 """A context manager that resolves to an ipeercommandexecutor.
260 """A context manager that resolves to an ipeercommandexecutor.
261
261
262 The object this resolves to can be used to issue command requests
262 The object this resolves to can be used to issue command requests
263 to the peer.
263 to the peer.
264
264
265 Callers should call its ``callcommand`` method to issue command
265 Callers should call its ``callcommand`` method to issue command
266 requests.
266 requests.
267
267
268 A new executor should be obtained for each distinct set of commands
268 A new executor should be obtained for each distinct set of commands
269 (possibly just a single command) that the consumer wants to execute
269 (possibly just a single command) that the consumer wants to execute
270 as part of a single operation or round trip. This is because some
270 as part of a single operation or round trip. This is because some
271 peers are half-duplex and/or don't support persistent connections.
271 peers are half-duplex and/or don't support persistent connections.
272 e.g. in the case of HTTP peers, commands sent to an executor represent
272 e.g. in the case of HTTP peers, commands sent to an executor represent
273 a single HTTP request. While some peers may support multiple command
273 a single HTTP request. While some peers may support multiple command
274 sends over the wire per executor, consumers need to code to the least
274 sends over the wire per executor, consumers need to code to the least
275 capable peer. So it should be assumed that command executors buffer
275 capable peer. So it should be assumed that command executors buffer
276 called commands until they are told to send them and that each
276 called commands until they are told to send them and that each
277 command executor could result in a new connection or wire-level request
277 command executor could result in a new connection or wire-level request
278 being issued.
278 being issued.
279 """
279 """
280
280
281 class ipeerbase(ipeerconnection, ipeercapabilities, ipeercommands,
281 class ipeerbase(ipeerconnection, ipeercapabilities, ipeercommands,
282 ipeerrequests):
282 ipeerrequests):
283 """Unified interface for peer repositories.
283 """Unified interface for peer repositories.
284
284
285 All peer instances must conform to this interface.
285 All peer instances must conform to this interface.
286 """
286 """
287 def iterbatch():
288 """Obtain an object to be used for multiple method calls.
289
290 Various operations call several methods on peer instances. If each
291 method call were performed immediately and serially, this would
292 require round trips to remote peers and/or would slow down execution.
293
294 Some peers have the ability to "batch" method calls to avoid costly
295 round trips or to facilitate concurrent execution.
296
297 This method returns an object that can be used to indicate intent to
298 perform batched method calls.
299
300 The returned object is a proxy of this peer. It intercepts calls to
301 batchable methods and queues them instead of performing them
302 immediately. This proxy object has a ``submit`` method that will
303 perform all queued batchable method calls. A ``results()`` method
304 exposes the results of queued/batched method calls. It is a generator
305 of results in the order they were called.
306
307 Not all peers or wire protocol implementations may actually batch method
308 calls. However, they must all support this API.
309 """
310
287
311 class ipeerbaselegacycommands(ipeerbase, ipeerlegacycommands):
288 class ipeerbaselegacycommands(ipeerbase, ipeerlegacycommands):
312 """Unified peer interface that supports legacy commands."""
289 """Unified peer interface that supports legacy commands."""
313
290
314 @zi.implementer(ipeerbase)
291 @zi.implementer(ipeerbase)
315 class peer(object):
292 class peer(object):
316 """Base class for peer repositories."""
293 """Base class for peer repositories."""
317
294
318 def capable(self, name):
295 def capable(self, name):
319 caps = self.capabilities()
296 caps = self.capabilities()
320 if name in caps:
297 if name in caps:
321 return True
298 return True
322
299
323 name = '%s=' % name
300 name = '%s=' % name
324 for cap in caps:
301 for cap in caps:
325 if cap.startswith(name):
302 if cap.startswith(name):
326 return cap[len(name):]
303 return cap[len(name):]
327
304
328 return False
305 return False
329
306
330 def requirecap(self, name, purpose):
307 def requirecap(self, name, purpose):
331 if self.capable(name):
308 if self.capable(name):
332 return
309 return
333
310
334 raise error.CapabilityError(
311 raise error.CapabilityError(
335 _('cannot %s; remote repository does not support the %r '
312 _('cannot %s; remote repository does not support the %r '
336 'capability') % (purpose, name))
313 'capability') % (purpose, name))
337
314
338 @zi.implementer(ipeerbaselegacycommands)
315 @zi.implementer(ipeerbaselegacycommands)
339 class legacypeer(peer):
316 class legacypeer(peer):
340 """peer but with support for legacy wire protocol commands."""
317 """peer but with support for legacy wire protocol commands."""
341
318
342 class ifilerevisionssequence(zi.Interface):
319 class ifilerevisionssequence(zi.Interface):
343 """Contains index data for all revisions of a file.
320 """Contains index data for all revisions of a file.
344
321
345 Types implementing this behave like lists of tuples. The index
322 Types implementing this behave like lists of tuples. The index
346 in the list corresponds to the revision number. The values contain
323 in the list corresponds to the revision number. The values contain
347 index metadata.
324 index metadata.
348
325
349 The *null* revision (revision number -1) is always the last item
326 The *null* revision (revision number -1) is always the last item
350 in the index.
327 in the index.
351 """
328 """
352
329
353 def __len__():
330 def __len__():
354 """The total number of revisions."""
331 """The total number of revisions."""
355
332
356 def __getitem__(rev):
333 def __getitem__(rev):
357 """Returns the object having a specific revision number.
334 """Returns the object having a specific revision number.
358
335
359 Returns an 8-tuple with the following fields:
336 Returns an 8-tuple with the following fields:
360
337
361 offset+flags
338 offset+flags
362 Contains the offset and flags for the revision. 64-bit unsigned
339 Contains the offset and flags for the revision. 64-bit unsigned
363 integer where first 6 bytes are the offset and the next 2 bytes
340 integer where first 6 bytes are the offset and the next 2 bytes
364 are flags. The offset can be 0 if it is not used by the store.
341 are flags. The offset can be 0 if it is not used by the store.
365 compressed size
342 compressed size
366 Size of the revision data in the store. It can be 0 if it isn't
343 Size of the revision data in the store. It can be 0 if it isn't
367 needed by the store.
344 needed by the store.
368 uncompressed size
345 uncompressed size
369 Fulltext size. It can be 0 if it isn't needed by the store.
346 Fulltext size. It can be 0 if it isn't needed by the store.
370 base revision
347 base revision
371 Revision number of revision the delta for storage is encoded
348 Revision number of revision the delta for storage is encoded
372 against. -1 indicates not encoded against a base revision.
349 against. -1 indicates not encoded against a base revision.
373 link revision
350 link revision
374 Revision number of changelog revision this entry is related to.
351 Revision number of changelog revision this entry is related to.
375 p1 revision
352 p1 revision
376 Revision number of 1st parent. -1 if no 1st parent.
353 Revision number of 1st parent. -1 if no 1st parent.
377 p2 revision
354 p2 revision
378 Revision number of 2nd parent. -1 if no 1st parent.
355 Revision number of 2nd parent. -1 if no 1st parent.
379 node
356 node
380 Binary node value for this revision number.
357 Binary node value for this revision number.
381
358
382 Negative values should index off the end of the sequence. ``-1``
359 Negative values should index off the end of the sequence. ``-1``
383 should return the null revision. ``-2`` should return the most
360 should return the null revision. ``-2`` should return the most
384 recent revision.
361 recent revision.
385 """
362 """
386
363
387 def __contains__(rev):
364 def __contains__(rev):
388 """Whether a revision number exists."""
365 """Whether a revision number exists."""
389
366
390 def insert(self, i, entry):
367 def insert(self, i, entry):
391 """Add an item to the index at specific revision."""
368 """Add an item to the index at specific revision."""
392
369
393 class ifileindex(zi.Interface):
370 class ifileindex(zi.Interface):
394 """Storage interface for index data of a single file.
371 """Storage interface for index data of a single file.
395
372
396 File storage data is divided into index metadata and data storage.
373 File storage data is divided into index metadata and data storage.
397 This interface defines the index portion of the interface.
374 This interface defines the index portion of the interface.
398
375
399 The index logically consists of:
376 The index logically consists of:
400
377
401 * A mapping between revision numbers and nodes.
378 * A mapping between revision numbers and nodes.
402 * DAG data (storing and querying the relationship between nodes).
379 * DAG data (storing and querying the relationship between nodes).
403 * Metadata to facilitate storage.
380 * Metadata to facilitate storage.
404 """
381 """
405 index = zi.Attribute(
382 index = zi.Attribute(
406 """An ``ifilerevisionssequence`` instance.""")
383 """An ``ifilerevisionssequence`` instance.""")
407
384
408 def __len__():
385 def __len__():
409 """Obtain the number of revisions stored for this file."""
386 """Obtain the number of revisions stored for this file."""
410
387
411 def __iter__():
388 def __iter__():
412 """Iterate over revision numbers for this file."""
389 """Iterate over revision numbers for this file."""
413
390
414 def revs(start=0, stop=None):
391 def revs(start=0, stop=None):
415 """Iterate over revision numbers for this file, with control."""
392 """Iterate over revision numbers for this file, with control."""
416
393
417 def parents(node):
394 def parents(node):
418 """Returns a 2-tuple of parent nodes for a revision.
395 """Returns a 2-tuple of parent nodes for a revision.
419
396
420 Values will be ``nullid`` if the parent is empty.
397 Values will be ``nullid`` if the parent is empty.
421 """
398 """
422
399
423 def parentrevs(rev):
400 def parentrevs(rev):
424 """Like parents() but operates on revision numbers."""
401 """Like parents() but operates on revision numbers."""
425
402
426 def rev(node):
403 def rev(node):
427 """Obtain the revision number given a node.
404 """Obtain the revision number given a node.
428
405
429 Raises ``error.LookupError`` if the node is not known.
406 Raises ``error.LookupError`` if the node is not known.
430 """
407 """
431
408
432 def node(rev):
409 def node(rev):
433 """Obtain the node value given a revision number.
410 """Obtain the node value given a revision number.
434
411
435 Raises ``IndexError`` if the node is not known.
412 Raises ``IndexError`` if the node is not known.
436 """
413 """
437
414
438 def lookup(node):
415 def lookup(node):
439 """Attempt to resolve a value to a node.
416 """Attempt to resolve a value to a node.
440
417
441 Value can be a binary node, hex node, revision number, or a string
418 Value can be a binary node, hex node, revision number, or a string
442 that can be converted to an integer.
419 that can be converted to an integer.
443
420
444 Raises ``error.LookupError`` if a node could not be resolved.
421 Raises ``error.LookupError`` if a node could not be resolved.
445 """
422 """
446
423
447 def linkrev(rev):
424 def linkrev(rev):
448 """Obtain the changeset revision number a revision is linked to."""
425 """Obtain the changeset revision number a revision is linked to."""
449
426
450 def flags(rev):
427 def flags(rev):
451 """Obtain flags used to affect storage of a revision."""
428 """Obtain flags used to affect storage of a revision."""
452
429
453 def iscensored(rev):
430 def iscensored(rev):
454 """Return whether a revision's content has been censored."""
431 """Return whether a revision's content has been censored."""
455
432
456 def commonancestorsheads(node1, node2):
433 def commonancestorsheads(node1, node2):
457 """Obtain an iterable of nodes containing heads of common ancestors.
434 """Obtain an iterable of nodes containing heads of common ancestors.
458
435
459 See ``ancestor.commonancestorsheads()``.
436 See ``ancestor.commonancestorsheads()``.
460 """
437 """
461
438
462 def descendants(revs):
439 def descendants(revs):
463 """Obtain descendant revision numbers for a set of revision numbers.
440 """Obtain descendant revision numbers for a set of revision numbers.
464
441
465 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
442 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
466 """
443 """
467
444
468 def headrevs():
445 def headrevs():
469 """Obtain a list of revision numbers that are DAG heads.
446 """Obtain a list of revision numbers that are DAG heads.
470
447
471 The list is sorted oldest to newest.
448 The list is sorted oldest to newest.
472
449
473 TODO determine if sorting is required.
450 TODO determine if sorting is required.
474 """
451 """
475
452
476 def heads(start=None, stop=None):
453 def heads(start=None, stop=None):
477 """Obtain a list of nodes that are DAG heads, with control.
454 """Obtain a list of nodes that are DAG heads, with control.
478
455
479 The set of revisions examined can be limited by specifying
456 The set of revisions examined can be limited by specifying
480 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
457 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
481 iterable of nodes. DAG traversal starts at earlier revision
458 iterable of nodes. DAG traversal starts at earlier revision
482 ``start`` and iterates forward until any node in ``stop`` is
459 ``start`` and iterates forward until any node in ``stop`` is
483 encountered.
460 encountered.
484 """
461 """
485
462
486 def children(node):
463 def children(node):
487 """Obtain nodes that are children of a node.
464 """Obtain nodes that are children of a node.
488
465
489 Returns a list of nodes.
466 Returns a list of nodes.
490 """
467 """
491
468
492 def deltaparent(rev):
469 def deltaparent(rev):
493 """"Return the revision that is a suitable parent to delta against."""
470 """"Return the revision that is a suitable parent to delta against."""
494
471
495 def candelta(baserev, rev):
472 def candelta(baserev, rev):
496 """"Whether a delta can be generated between two revisions."""
473 """"Whether a delta can be generated between two revisions."""
497
474
498 class ifiledata(zi.Interface):
475 class ifiledata(zi.Interface):
499 """Storage interface for data storage of a specific file.
476 """Storage interface for data storage of a specific file.
500
477
501 This complements ``ifileindex`` and provides an interface for accessing
478 This complements ``ifileindex`` and provides an interface for accessing
502 data for a tracked file.
479 data for a tracked file.
503 """
480 """
504 def rawsize(rev):
481 def rawsize(rev):
505 """The size of the fulltext data for a revision as stored."""
482 """The size of the fulltext data for a revision as stored."""
506
483
507 def size(rev):
484 def size(rev):
508 """Obtain the fulltext size of file data.
485 """Obtain the fulltext size of file data.
509
486
510 Any metadata is excluded from size measurements. Use ``rawsize()`` if
487 Any metadata is excluded from size measurements. Use ``rawsize()`` if
511 metadata size is important.
488 metadata size is important.
512 """
489 """
513
490
514 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
491 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
515 """Validate the stored hash of a given fulltext and node.
492 """Validate the stored hash of a given fulltext and node.
516
493
517 Raises ``error.RevlogError`` is hash validation fails.
494 Raises ``error.RevlogError`` is hash validation fails.
518 """
495 """
519
496
520 def revision(node, raw=False):
497 def revision(node, raw=False):
521 """"Obtain fulltext data for a node.
498 """"Obtain fulltext data for a node.
522
499
523 By default, any storage transformations are applied before the data
500 By default, any storage transformations are applied before the data
524 is returned. If ``raw`` is True, non-raw storage transformations
501 is returned. If ``raw`` is True, non-raw storage transformations
525 are not applied.
502 are not applied.
526
503
527 The fulltext data may contain a header containing metadata. Most
504 The fulltext data may contain a header containing metadata. Most
528 consumers should use ``read()`` to obtain the actual file data.
505 consumers should use ``read()`` to obtain the actual file data.
529 """
506 """
530
507
531 def read(node):
508 def read(node):
532 """Resolve file fulltext data.
509 """Resolve file fulltext data.
533
510
534 This is similar to ``revision()`` except any metadata in the data
511 This is similar to ``revision()`` except any metadata in the data
535 headers is stripped.
512 headers is stripped.
536 """
513 """
537
514
538 def renamed(node):
515 def renamed(node):
539 """Obtain copy metadata for a node.
516 """Obtain copy metadata for a node.
540
517
541 Returns ``False`` if no copy metadata is stored or a 2-tuple of
518 Returns ``False`` if no copy metadata is stored or a 2-tuple of
542 (path, node) from which this revision was copied.
519 (path, node) from which this revision was copied.
543 """
520 """
544
521
545 def cmp(node, fulltext):
522 def cmp(node, fulltext):
546 """Compare fulltext to another revision.
523 """Compare fulltext to another revision.
547
524
548 Returns True if the fulltext is different from what is stored.
525 Returns True if the fulltext is different from what is stored.
549
526
550 This takes copy metadata into account.
527 This takes copy metadata into account.
551
528
552 TODO better document the copy metadata and censoring logic.
529 TODO better document the copy metadata and censoring logic.
553 """
530 """
554
531
555 def revdiff(rev1, rev2):
532 def revdiff(rev1, rev2):
556 """Obtain a delta between two revision numbers.
533 """Obtain a delta between two revision numbers.
557
534
558 Operates on raw data in the store (``revision(node, raw=True)``).
535 Operates on raw data in the store (``revision(node, raw=True)``).
559
536
560 The returned data is the result of ``bdiff.bdiff`` on the raw
537 The returned data is the result of ``bdiff.bdiff`` on the raw
561 revision data.
538 revision data.
562 """
539 """
563
540
564 class ifilemutation(zi.Interface):
541 class ifilemutation(zi.Interface):
565 """Storage interface for mutation events of a tracked file."""
542 """Storage interface for mutation events of a tracked file."""
566
543
567 def add(filedata, meta, transaction, linkrev, p1, p2):
544 def add(filedata, meta, transaction, linkrev, p1, p2):
568 """Add a new revision to the store.
545 """Add a new revision to the store.
569
546
570 Takes file data, dictionary of metadata, a transaction, linkrev,
547 Takes file data, dictionary of metadata, a transaction, linkrev,
571 and parent nodes.
548 and parent nodes.
572
549
573 Returns the node that was added.
550 Returns the node that was added.
574
551
575 May no-op if a revision matching the supplied data is already stored.
552 May no-op if a revision matching the supplied data is already stored.
576 """
553 """
577
554
578 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
555 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
579 flags=0, cachedelta=None):
556 flags=0, cachedelta=None):
580 """Add a new revision to the store.
557 """Add a new revision to the store.
581
558
582 This is similar to ``add()`` except it operates at a lower level.
559 This is similar to ``add()`` except it operates at a lower level.
583
560
584 The data passed in already contains a metadata header, if any.
561 The data passed in already contains a metadata header, if any.
585
562
586 ``node`` and ``flags`` can be used to define the expected node and
563 ``node`` and ``flags`` can be used to define the expected node and
587 the flags to use with storage.
564 the flags to use with storage.
588
565
589 ``add()`` is usually called when adding files from e.g. the working
566 ``add()`` is usually called when adding files from e.g. the working
590 directory. ``addrevision()`` is often called by ``add()`` and for
567 directory. ``addrevision()`` is often called by ``add()`` and for
591 scenarios where revision data has already been computed, such as when
568 scenarios where revision data has already been computed, such as when
592 applying raw data from a peer repo.
569 applying raw data from a peer repo.
593 """
570 """
594
571
595 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
572 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
596 """Process a series of deltas for storage.
573 """Process a series of deltas for storage.
597
574
598 ``deltas`` is an iterable of 7-tuples of
575 ``deltas`` is an iterable of 7-tuples of
599 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
576 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
600 to add.
577 to add.
601
578
602 The ``delta`` field contains ``mpatch`` data to apply to a base
579 The ``delta`` field contains ``mpatch`` data to apply to a base
603 revision, identified by ``deltabase``. The base node can be
580 revision, identified by ``deltabase``. The base node can be
604 ``nullid``, in which case the header from the delta can be ignored
581 ``nullid``, in which case the header from the delta can be ignored
605 and the delta used as the fulltext.
582 and the delta used as the fulltext.
606
583
607 ``addrevisioncb`` should be called for each node as it is committed.
584 ``addrevisioncb`` should be called for each node as it is committed.
608
585
609 Returns a list of nodes that were processed. A node will be in the list
586 Returns a list of nodes that were processed. A node will be in the list
610 even if it existed in the store previously.
587 even if it existed in the store previously.
611 """
588 """
612
589
613 def getstrippoint(minlink):
590 def getstrippoint(minlink):
614 """Find the minimum revision that must be stripped to strip a linkrev.
591 """Find the minimum revision that must be stripped to strip a linkrev.
615
592
616 Returns a 2-tuple containing the minimum revision number and a set
593 Returns a 2-tuple containing the minimum revision number and a set
617 of all revisions numbers that would be broken by this strip.
594 of all revisions numbers that would be broken by this strip.
618
595
619 TODO this is highly revlog centric and should be abstracted into
596 TODO this is highly revlog centric and should be abstracted into
620 a higher-level deletion API. ``repair.strip()`` relies on this.
597 a higher-level deletion API. ``repair.strip()`` relies on this.
621 """
598 """
622
599
623 def strip(minlink, transaction):
600 def strip(minlink, transaction):
624 """Remove storage of items starting at a linkrev.
601 """Remove storage of items starting at a linkrev.
625
602
626 This uses ``getstrippoint()`` to determine the first node to remove.
603 This uses ``getstrippoint()`` to determine the first node to remove.
627 Then it effectively truncates storage for all revisions after that.
604 Then it effectively truncates storage for all revisions after that.
628
605
629 TODO this is highly revlog centric and should be abstracted into a
606 TODO this is highly revlog centric and should be abstracted into a
630 higher-level deletion API.
607 higher-level deletion API.
631 """
608 """
632
609
633 class ifilestorage(ifileindex, ifiledata, ifilemutation):
610 class ifilestorage(ifileindex, ifiledata, ifilemutation):
634 """Complete storage interface for a single tracked file."""
611 """Complete storage interface for a single tracked file."""
635
612
636 version = zi.Attribute(
613 version = zi.Attribute(
637 """Version number of storage.
614 """Version number of storage.
638
615
639 TODO this feels revlog centric and could likely be removed.
616 TODO this feels revlog centric and could likely be removed.
640 """)
617 """)
641
618
642 storedeltachains = zi.Attribute(
619 storedeltachains = zi.Attribute(
643 """Whether the store stores deltas.
620 """Whether the store stores deltas.
644
621
645 TODO deltachains are revlog centric. This can probably removed
622 TODO deltachains are revlog centric. This can probably removed
646 once there are better abstractions for obtaining/writing
623 once there are better abstractions for obtaining/writing
647 data.
624 data.
648 """)
625 """)
649
626
650 _generaldelta = zi.Attribute(
627 _generaldelta = zi.Attribute(
651 """Whether deltas can be against any parent revision.
628 """Whether deltas can be against any parent revision.
652
629
653 TODO this is used by changegroup code and it could probably be
630 TODO this is used by changegroup code and it could probably be
654 folded into another API.
631 folded into another API.
655 """)
632 """)
656
633
657 def files():
634 def files():
658 """Obtain paths that are backing storage for this file.
635 """Obtain paths that are backing storage for this file.
659
636
660 TODO this is used heavily by verify code and there should probably
637 TODO this is used heavily by verify code and there should probably
661 be a better API for that.
638 be a better API for that.
662 """
639 """
663
640
664 def checksize():
641 def checksize():
665 """Obtain the expected sizes of backing files.
642 """Obtain the expected sizes of backing files.
666
643
667 TODO this is used by verify and it should not be part of the interface.
644 TODO this is used by verify and it should not be part of the interface.
668 """
645 """
669
646
670 class completelocalrepository(zi.Interface):
647 class completelocalrepository(zi.Interface):
671 """Monolithic interface for local repositories.
648 """Monolithic interface for local repositories.
672
649
673 This currently captures the reality of things - not how things should be.
650 This currently captures the reality of things - not how things should be.
674 """
651 """
675
652
676 supportedformats = zi.Attribute(
653 supportedformats = zi.Attribute(
677 """Set of requirements that apply to stream clone.
654 """Set of requirements that apply to stream clone.
678
655
679 This is actually a class attribute and is shared among all instances.
656 This is actually a class attribute and is shared among all instances.
680 """)
657 """)
681
658
682 openerreqs = zi.Attribute(
659 openerreqs = zi.Attribute(
683 """Set of requirements that are passed to the opener.
660 """Set of requirements that are passed to the opener.
684
661
685 This is actually a class attribute and is shared among all instances.
662 This is actually a class attribute and is shared among all instances.
686 """)
663 """)
687
664
688 supported = zi.Attribute(
665 supported = zi.Attribute(
689 """Set of requirements that this repo is capable of opening.""")
666 """Set of requirements that this repo is capable of opening.""")
690
667
691 requirements = zi.Attribute(
668 requirements = zi.Attribute(
692 """Set of requirements this repo uses.""")
669 """Set of requirements this repo uses.""")
693
670
694 filtername = zi.Attribute(
671 filtername = zi.Attribute(
695 """Name of the repoview that is active on this repo.""")
672 """Name of the repoview that is active on this repo.""")
696
673
697 wvfs = zi.Attribute(
674 wvfs = zi.Attribute(
698 """VFS used to access the working directory.""")
675 """VFS used to access the working directory.""")
699
676
700 vfs = zi.Attribute(
677 vfs = zi.Attribute(
701 """VFS rooted at the .hg directory.
678 """VFS rooted at the .hg directory.
702
679
703 Used to access repository data not in the store.
680 Used to access repository data not in the store.
704 """)
681 """)
705
682
706 svfs = zi.Attribute(
683 svfs = zi.Attribute(
707 """VFS rooted at the store.
684 """VFS rooted at the store.
708
685
709 Used to access repository data in the store. Typically .hg/store.
686 Used to access repository data in the store. Typically .hg/store.
710 But can point elsewhere if the store is shared.
687 But can point elsewhere if the store is shared.
711 """)
688 """)
712
689
713 root = zi.Attribute(
690 root = zi.Attribute(
714 """Path to the root of the working directory.""")
691 """Path to the root of the working directory.""")
715
692
716 path = zi.Attribute(
693 path = zi.Attribute(
717 """Path to the .hg directory.""")
694 """Path to the .hg directory.""")
718
695
719 origroot = zi.Attribute(
696 origroot = zi.Attribute(
720 """The filesystem path that was used to construct the repo.""")
697 """The filesystem path that was used to construct the repo.""")
721
698
722 auditor = zi.Attribute(
699 auditor = zi.Attribute(
723 """A pathauditor for the working directory.
700 """A pathauditor for the working directory.
724
701
725 This checks if a path refers to a nested repository.
702 This checks if a path refers to a nested repository.
726
703
727 Operates on the filesystem.
704 Operates on the filesystem.
728 """)
705 """)
729
706
730 nofsauditor = zi.Attribute(
707 nofsauditor = zi.Attribute(
731 """A pathauditor for the working directory.
708 """A pathauditor for the working directory.
732
709
733 This is like ``auditor`` except it doesn't do filesystem checks.
710 This is like ``auditor`` except it doesn't do filesystem checks.
734 """)
711 """)
735
712
736 baseui = zi.Attribute(
713 baseui = zi.Attribute(
737 """Original ui instance passed into constructor.""")
714 """Original ui instance passed into constructor.""")
738
715
739 ui = zi.Attribute(
716 ui = zi.Attribute(
740 """Main ui instance for this instance.""")
717 """Main ui instance for this instance.""")
741
718
742 sharedpath = zi.Attribute(
719 sharedpath = zi.Attribute(
743 """Path to the .hg directory of the repo this repo was shared from.""")
720 """Path to the .hg directory of the repo this repo was shared from.""")
744
721
745 store = zi.Attribute(
722 store = zi.Attribute(
746 """A store instance.""")
723 """A store instance.""")
747
724
748 spath = zi.Attribute(
725 spath = zi.Attribute(
749 """Path to the store.""")
726 """Path to the store.""")
750
727
751 sjoin = zi.Attribute(
728 sjoin = zi.Attribute(
752 """Alias to self.store.join.""")
729 """Alias to self.store.join.""")
753
730
754 cachevfs = zi.Attribute(
731 cachevfs = zi.Attribute(
755 """A VFS used to access the cache directory.
732 """A VFS used to access the cache directory.
756
733
757 Typically .hg/cache.
734 Typically .hg/cache.
758 """)
735 """)
759
736
760 filteredrevcache = zi.Attribute(
737 filteredrevcache = zi.Attribute(
761 """Holds sets of revisions to be filtered.""")
738 """Holds sets of revisions to be filtered.""")
762
739
763 names = zi.Attribute(
740 names = zi.Attribute(
764 """A ``namespaces`` instance.""")
741 """A ``namespaces`` instance.""")
765
742
766 def close():
743 def close():
767 """Close the handle on this repository."""
744 """Close the handle on this repository."""
768
745
769 def peer():
746 def peer():
770 """Obtain an object conforming to the ``peer`` interface."""
747 """Obtain an object conforming to the ``peer`` interface."""
771
748
772 def unfiltered():
749 def unfiltered():
773 """Obtain an unfiltered/raw view of this repo."""
750 """Obtain an unfiltered/raw view of this repo."""
774
751
775 def filtered(name, visibilityexceptions=None):
752 def filtered(name, visibilityexceptions=None):
776 """Obtain a named view of this repository."""
753 """Obtain a named view of this repository."""
777
754
778 obsstore = zi.Attribute(
755 obsstore = zi.Attribute(
779 """A store of obsolescence data.""")
756 """A store of obsolescence data.""")
780
757
781 changelog = zi.Attribute(
758 changelog = zi.Attribute(
782 """A handle on the changelog revlog.""")
759 """A handle on the changelog revlog.""")
783
760
784 manifestlog = zi.Attribute(
761 manifestlog = zi.Attribute(
785 """A handle on the root manifest revlog.""")
762 """A handle on the root manifest revlog.""")
786
763
787 dirstate = zi.Attribute(
764 dirstate = zi.Attribute(
788 """Working directory state.""")
765 """Working directory state.""")
789
766
790 narrowpats = zi.Attribute(
767 narrowpats = zi.Attribute(
791 """Matcher patterns for this repository's narrowspec.""")
768 """Matcher patterns for this repository's narrowspec.""")
792
769
793 def narrowmatch():
770 def narrowmatch():
794 """Obtain a matcher for the narrowspec."""
771 """Obtain a matcher for the narrowspec."""
795
772
796 def setnarrowpats(newincludes, newexcludes):
773 def setnarrowpats(newincludes, newexcludes):
797 """Define the narrowspec for this repository."""
774 """Define the narrowspec for this repository."""
798
775
799 def __getitem__(changeid):
776 def __getitem__(changeid):
800 """Try to resolve a changectx."""
777 """Try to resolve a changectx."""
801
778
802 def __contains__(changeid):
779 def __contains__(changeid):
803 """Whether a changeset exists."""
780 """Whether a changeset exists."""
804
781
805 def __nonzero__():
782 def __nonzero__():
806 """Always returns True."""
783 """Always returns True."""
807 return True
784 return True
808
785
809 __bool__ = __nonzero__
786 __bool__ = __nonzero__
810
787
811 def __len__():
788 def __len__():
812 """Returns the number of changesets in the repo."""
789 """Returns the number of changesets in the repo."""
813
790
814 def __iter__():
791 def __iter__():
815 """Iterate over revisions in the changelog."""
792 """Iterate over revisions in the changelog."""
816
793
817 def revs(expr, *args):
794 def revs(expr, *args):
818 """Evaluate a revset.
795 """Evaluate a revset.
819
796
820 Emits revisions.
797 Emits revisions.
821 """
798 """
822
799
823 def set(expr, *args):
800 def set(expr, *args):
824 """Evaluate a revset.
801 """Evaluate a revset.
825
802
826 Emits changectx instances.
803 Emits changectx instances.
827 """
804 """
828
805
829 def anyrevs(specs, user=False, localalias=None):
806 def anyrevs(specs, user=False, localalias=None):
830 """Find revisions matching one of the given revsets."""
807 """Find revisions matching one of the given revsets."""
831
808
832 def url():
809 def url():
833 """Returns a string representing the location of this repo."""
810 """Returns a string representing the location of this repo."""
834
811
835 def hook(name, throw=False, **args):
812 def hook(name, throw=False, **args):
836 """Call a hook."""
813 """Call a hook."""
837
814
838 def tags():
815 def tags():
839 """Return a mapping of tag to node."""
816 """Return a mapping of tag to node."""
840
817
841 def tagtype(tagname):
818 def tagtype(tagname):
842 """Return the type of a given tag."""
819 """Return the type of a given tag."""
843
820
844 def tagslist():
821 def tagslist():
845 """Return a list of tags ordered by revision."""
822 """Return a list of tags ordered by revision."""
846
823
847 def nodetags(node):
824 def nodetags(node):
848 """Return the tags associated with a node."""
825 """Return the tags associated with a node."""
849
826
850 def nodebookmarks(node):
827 def nodebookmarks(node):
851 """Return the list of bookmarks pointing to the specified node."""
828 """Return the list of bookmarks pointing to the specified node."""
852
829
853 def branchmap():
830 def branchmap():
854 """Return a mapping of branch to heads in that branch."""
831 """Return a mapping of branch to heads in that branch."""
855
832
856 def revbranchcache():
833 def revbranchcache():
857 pass
834 pass
858
835
859 def branchtip(branchtip, ignoremissing=False):
836 def branchtip(branchtip, ignoremissing=False):
860 """Return the tip node for a given branch."""
837 """Return the tip node for a given branch."""
861
838
862 def lookup(key):
839 def lookup(key):
863 """Resolve the node for a revision."""
840 """Resolve the node for a revision."""
864
841
865 def lookupbranch(key):
842 def lookupbranch(key):
866 """Look up the branch name of the given revision or branch name."""
843 """Look up the branch name of the given revision or branch name."""
867
844
868 def known(nodes):
845 def known(nodes):
869 """Determine whether a series of nodes is known.
846 """Determine whether a series of nodes is known.
870
847
871 Returns a list of bools.
848 Returns a list of bools.
872 """
849 """
873
850
874 def local():
851 def local():
875 """Whether the repository is local."""
852 """Whether the repository is local."""
876 return True
853 return True
877
854
878 def publishing():
855 def publishing():
879 """Whether the repository is a publishing repository."""
856 """Whether the repository is a publishing repository."""
880
857
881 def cancopy():
858 def cancopy():
882 pass
859 pass
883
860
884 def shared():
861 def shared():
885 """The type of shared repository or None."""
862 """The type of shared repository or None."""
886
863
887 def wjoin(f, *insidef):
864 def wjoin(f, *insidef):
888 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
865 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
889
866
890 def file(f):
867 def file(f):
891 """Obtain a filelog for a tracked path."""
868 """Obtain a filelog for a tracked path."""
892
869
893 def setparents(p1, p2):
870 def setparents(p1, p2):
894 """Set the parent nodes of the working directory."""
871 """Set the parent nodes of the working directory."""
895
872
896 def filectx(path, changeid=None, fileid=None):
873 def filectx(path, changeid=None, fileid=None):
897 """Obtain a filectx for the given file revision."""
874 """Obtain a filectx for the given file revision."""
898
875
899 def getcwd():
876 def getcwd():
900 """Obtain the current working directory from the dirstate."""
877 """Obtain the current working directory from the dirstate."""
901
878
902 def pathto(f, cwd=None):
879 def pathto(f, cwd=None):
903 """Obtain the relative path to a file."""
880 """Obtain the relative path to a file."""
904
881
905 def adddatafilter(name, fltr):
882 def adddatafilter(name, fltr):
906 pass
883 pass
907
884
908 def wread(filename):
885 def wread(filename):
909 """Read a file from wvfs, using data filters."""
886 """Read a file from wvfs, using data filters."""
910
887
911 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
888 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
912 """Write data to a file in the wvfs, using data filters."""
889 """Write data to a file in the wvfs, using data filters."""
913
890
914 def wwritedata(filename, data):
891 def wwritedata(filename, data):
915 """Resolve data for writing to the wvfs, using data filters."""
892 """Resolve data for writing to the wvfs, using data filters."""
916
893
917 def currenttransaction():
894 def currenttransaction():
918 """Obtain the current transaction instance or None."""
895 """Obtain the current transaction instance or None."""
919
896
920 def transaction(desc, report=None):
897 def transaction(desc, report=None):
921 """Open a new transaction to write to the repository."""
898 """Open a new transaction to write to the repository."""
922
899
923 def undofiles():
900 def undofiles():
924 """Returns a list of (vfs, path) for files to undo transactions."""
901 """Returns a list of (vfs, path) for files to undo transactions."""
925
902
926 def recover():
903 def recover():
927 """Roll back an interrupted transaction."""
904 """Roll back an interrupted transaction."""
928
905
929 def rollback(dryrun=False, force=False):
906 def rollback(dryrun=False, force=False):
930 """Undo the last transaction.
907 """Undo the last transaction.
931
908
932 DANGEROUS.
909 DANGEROUS.
933 """
910 """
934
911
935 def updatecaches(tr=None, full=False):
912 def updatecaches(tr=None, full=False):
936 """Warm repo caches."""
913 """Warm repo caches."""
937
914
938 def invalidatecaches():
915 def invalidatecaches():
939 """Invalidate cached data due to the repository mutating."""
916 """Invalidate cached data due to the repository mutating."""
940
917
941 def invalidatevolatilesets():
918 def invalidatevolatilesets():
942 pass
919 pass
943
920
944 def invalidatedirstate():
921 def invalidatedirstate():
945 """Invalidate the dirstate."""
922 """Invalidate the dirstate."""
946
923
947 def invalidate(clearfilecache=False):
924 def invalidate(clearfilecache=False):
948 pass
925 pass
949
926
950 def invalidateall():
927 def invalidateall():
951 pass
928 pass
952
929
953 def lock(wait=True):
930 def lock(wait=True):
954 """Lock the repository store and return a lock instance."""
931 """Lock the repository store and return a lock instance."""
955
932
956 def wlock(wait=True):
933 def wlock(wait=True):
957 """Lock the non-store parts of the repository."""
934 """Lock the non-store parts of the repository."""
958
935
959 def currentwlock():
936 def currentwlock():
960 """Return the wlock if it's held or None."""
937 """Return the wlock if it's held or None."""
961
938
962 def checkcommitpatterns(wctx, vdirs, match, status, fail):
939 def checkcommitpatterns(wctx, vdirs, match, status, fail):
963 pass
940 pass
964
941
965 def commit(text='', user=None, date=None, match=None, force=False,
942 def commit(text='', user=None, date=None, match=None, force=False,
966 editor=False, extra=None):
943 editor=False, extra=None):
967 """Add a new revision to the repository."""
944 """Add a new revision to the repository."""
968
945
969 def commitctx(ctx, error=False):
946 def commitctx(ctx, error=False):
970 """Commit a commitctx instance to the repository."""
947 """Commit a commitctx instance to the repository."""
971
948
972 def destroying():
949 def destroying():
973 """Inform the repository that nodes are about to be destroyed."""
950 """Inform the repository that nodes are about to be destroyed."""
974
951
975 def destroyed():
952 def destroyed():
976 """Inform the repository that nodes have been destroyed."""
953 """Inform the repository that nodes have been destroyed."""
977
954
978 def status(node1='.', node2=None, match=None, ignored=False,
955 def status(node1='.', node2=None, match=None, ignored=False,
979 clean=False, unknown=False, listsubrepos=False):
956 clean=False, unknown=False, listsubrepos=False):
980 """Convenience method to call repo[x].status()."""
957 """Convenience method to call repo[x].status()."""
981
958
982 def addpostdsstatus(ps):
959 def addpostdsstatus(ps):
983 pass
960 pass
984
961
985 def postdsstatus():
962 def postdsstatus():
986 pass
963 pass
987
964
988 def clearpostdsstatus():
965 def clearpostdsstatus():
989 pass
966 pass
990
967
991 def heads(start=None):
968 def heads(start=None):
992 """Obtain list of nodes that are DAG heads."""
969 """Obtain list of nodes that are DAG heads."""
993
970
994 def branchheads(branch=None, start=None, closed=False):
971 def branchheads(branch=None, start=None, closed=False):
995 pass
972 pass
996
973
997 def branches(nodes):
974 def branches(nodes):
998 pass
975 pass
999
976
1000 def between(pairs):
977 def between(pairs):
1001 pass
978 pass
1002
979
1003 def checkpush(pushop):
980 def checkpush(pushop):
1004 pass
981 pass
1005
982
1006 prepushoutgoinghooks = zi.Attribute(
983 prepushoutgoinghooks = zi.Attribute(
1007 """util.hooks instance.""")
984 """util.hooks instance.""")
1008
985
1009 def pushkey(namespace, key, old, new):
986 def pushkey(namespace, key, old, new):
1010 pass
987 pass
1011
988
1012 def listkeys(namespace):
989 def listkeys(namespace):
1013 pass
990 pass
1014
991
1015 def debugwireargs(one, two, three=None, four=None, five=None):
992 def debugwireargs(one, two, three=None, four=None, five=None):
1016 pass
993 pass
1017
994
1018 def savecommitmessage(text):
995 def savecommitmessage(text):
1019 pass
996 pass
@@ -1,708 +1,614 b''
1 # wireprotov1peer.py - Client-side functionality for wire protocol version 1.
1 # wireprotov1peer.py - Client-side functionality for wire protocol version 1.
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import sys
11 import sys
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 )
17 )
18 from .thirdparty.zope import (
18 from .thirdparty.zope import (
19 interface as zi,
19 interface as zi,
20 )
20 )
21 from . import (
21 from . import (
22 bundle2,
22 bundle2,
23 changegroup as changegroupmod,
23 changegroup as changegroupmod,
24 encoding,
24 encoding,
25 error,
25 error,
26 pushkey as pushkeymod,
26 pushkey as pushkeymod,
27 pycompat,
27 pycompat,
28 repository,
28 repository,
29 util,
29 util,
30 wireprototypes,
30 wireprototypes,
31 )
31 )
32
32
33 urlreq = util.urlreq
33 urlreq = util.urlreq
34
34
35 def batchable(f):
35 def batchable(f):
36 '''annotation for batchable methods
36 '''annotation for batchable methods
37
37
38 Such methods must implement a coroutine as follows:
38 Such methods must implement a coroutine as follows:
39
39
40 @batchable
40 @batchable
41 def sample(self, one, two=None):
41 def sample(self, one, two=None):
42 # Build list of encoded arguments suitable for your wire protocol:
42 # Build list of encoded arguments suitable for your wire protocol:
43 encargs = [('one', encode(one),), ('two', encode(two),)]
43 encargs = [('one', encode(one),), ('two', encode(two),)]
44 # Create future for injection of encoded result:
44 # Create future for injection of encoded result:
45 encresref = future()
45 encresref = future()
46 # Return encoded arguments and future:
46 # Return encoded arguments and future:
47 yield encargs, encresref
47 yield encargs, encresref
48 # Assuming the future to be filled with the result from the batched
48 # Assuming the future to be filled with the result from the batched
49 # request now. Decode it:
49 # request now. Decode it:
50 yield decode(encresref.value)
50 yield decode(encresref.value)
51
51
52 The decorator returns a function which wraps this coroutine as a plain
52 The decorator returns a function which wraps this coroutine as a plain
53 method, but adds the original method as an attribute called "batchable",
53 method, but adds the original method as an attribute called "batchable",
54 which is used by remotebatch to split the call into separate encoding and
54 which is used by remotebatch to split the call into separate encoding and
55 decoding phases.
55 decoding phases.
56 '''
56 '''
57 def plain(*args, **opts):
57 def plain(*args, **opts):
58 batchable = f(*args, **opts)
58 batchable = f(*args, **opts)
59 encargsorres, encresref = next(batchable)
59 encargsorres, encresref = next(batchable)
60 if not encresref:
60 if not encresref:
61 return encargsorres # a local result in this case
61 return encargsorres # a local result in this case
62 self = args[0]
62 self = args[0]
63 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
63 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
64 encresref.set(self._submitone(cmd, encargsorres))
64 encresref.set(self._submitone(cmd, encargsorres))
65 return next(batchable)
65 return next(batchable)
66 setattr(plain, 'batchable', f)
66 setattr(plain, 'batchable', f)
67 return plain
67 return plain
68
68
69 class future(object):
69 class future(object):
70 '''placeholder for a value to be set later'''
70 '''placeholder for a value to be set later'''
71 def set(self, value):
71 def set(self, value):
72 if util.safehasattr(self, 'value'):
72 if util.safehasattr(self, 'value'):
73 raise error.RepoError("future is already set")
73 raise error.RepoError("future is already set")
74 self.value = value
74 self.value = value
75
75
76 class batcher(object):
77 '''base class for batches of commands submittable in a single request
78
79 All methods invoked on instances of this class are simply queued and
80 return a a future for the result. Once you call submit(), all the queued
81 calls are performed and the results set in their respective futures.
82 '''
83 def __init__(self):
84 self.calls = []
85 def __getattr__(self, name):
86 def call(*args, **opts):
87 resref = future()
88 # Please don't invent non-ascii method names, or you will
89 # give core hg a very sad time.
90 self.calls.append((name.encode('ascii'), args, opts, resref,))
91 return resref
92 return call
93 def submit(self):
94 raise NotImplementedError()
95
96 class iterbatcher(batcher):
97
98 def submit(self):
99 raise NotImplementedError()
100
101 def results(self):
102 raise NotImplementedError()
103
104 class remoteiterbatcher(iterbatcher):
105 def __init__(self, remote):
106 super(remoteiterbatcher, self).__init__()
107 self._remote = remote
108
109 def __getattr__(self, name):
110 # Validate this method is batchable, since submit() only supports
111 # batchable methods.
112 fn = getattr(self._remote, name)
113 if not getattr(fn, 'batchable', None):
114 raise error.ProgrammingError('Attempted to batch a non-batchable '
115 'call to %r' % name)
116
117 return super(remoteiterbatcher, self).__getattr__(name)
118
119 def submit(self):
120 """Break the batch request into many patch calls and pipeline them.
121
122 This is mostly valuable over http where request sizes can be
123 limited, but can be used in other places as well.
124 """
125 # 2-tuple of (command, arguments) that represents what will be
126 # sent over the wire.
127 requests = []
128
129 # 4-tuple of (command, final future, @batchable generator, remote
130 # future).
131 results = []
132
133 for command, args, opts, finalfuture in self.calls:
134 mtd = getattr(self._remote, command)
135 batchable = mtd.batchable(mtd.__self__, *args, **opts)
136
137 commandargs, fremote = next(batchable)
138 assert fremote
139 requests.append((command, commandargs))
140 results.append((command, finalfuture, batchable, fremote))
141
142 if requests:
143 self._resultiter = self._remote._submitbatch(requests)
144
145 self._results = results
146
147 def results(self):
148 for command, finalfuture, batchable, remotefuture in self._results:
149 # Get the raw result, set it in the remote future, feed it
150 # back into the @batchable generator so it can be decoded, and
151 # set the result on the final future to this value.
152 remoteresult = next(self._resultiter)
153 remotefuture.set(remoteresult)
154 finalfuture.set(next(batchable))
155
156 # Verify our @batchable generators only emit 2 values.
157 try:
158 next(batchable)
159 except StopIteration:
160 pass
161 else:
162 raise error.ProgrammingError('%s @batchable generator emitted '
163 'unexpected value count' % command)
164
165 yield finalfuture.value
166
167 def encodebatchcmds(req):
76 def encodebatchcmds(req):
168 """Return a ``cmds`` argument value for the ``batch`` command."""
77 """Return a ``cmds`` argument value for the ``batch`` command."""
169 escapearg = wireprototypes.escapebatcharg
78 escapearg = wireprototypes.escapebatcharg
170
79
171 cmds = []
80 cmds = []
172 for op, argsdict in req:
81 for op, argsdict in req:
173 # Old servers didn't properly unescape argument names. So prevent
82 # Old servers didn't properly unescape argument names. So prevent
174 # the sending of argument names that may not be decoded properly by
83 # the sending of argument names that may not be decoded properly by
175 # servers.
84 # servers.
176 assert all(escapearg(k) == k for k in argsdict)
85 assert all(escapearg(k) == k for k in argsdict)
177
86
178 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
87 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
179 for k, v in argsdict.iteritems())
88 for k, v in argsdict.iteritems())
180 cmds.append('%s %s' % (op, args))
89 cmds.append('%s %s' % (op, args))
181
90
182 return ';'.join(cmds)
91 return ';'.join(cmds)
183
92
184 class unsentfuture(pycompat.futures.Future):
93 class unsentfuture(pycompat.futures.Future):
185 """A Future variation to represent an unsent command.
94 """A Future variation to represent an unsent command.
186
95
187 Because we buffer commands and don't submit them immediately, calling
96 Because we buffer commands and don't submit them immediately, calling
188 ``result()`` on an unsent future could deadlock. Futures for buffered
97 ``result()`` on an unsent future could deadlock. Futures for buffered
189 commands are represented by this type, which wraps ``result()`` to
98 commands are represented by this type, which wraps ``result()`` to
190 call ``sendcommands()``.
99 call ``sendcommands()``.
191 """
100 """
192
101
193 def result(self, timeout=None):
102 def result(self, timeout=None):
194 if self.done():
103 if self.done():
195 return pycompat.futures.Future.result(self, timeout)
104 return pycompat.futures.Future.result(self, timeout)
196
105
197 self._peerexecutor.sendcommands()
106 self._peerexecutor.sendcommands()
198
107
199 # This looks like it will infinitely recurse. However,
108 # This looks like it will infinitely recurse. However,
200 # sendcommands() should modify __class__. This call serves as a check
109 # sendcommands() should modify __class__. This call serves as a check
201 # on that.
110 # on that.
202 return self.result(timeout)
111 return self.result(timeout)
203
112
204 @zi.implementer(repository.ipeercommandexecutor)
113 @zi.implementer(repository.ipeercommandexecutor)
205 class peerexecutor(object):
114 class peerexecutor(object):
206 def __init__(self, peer):
115 def __init__(self, peer):
207 self._peer = peer
116 self._peer = peer
208 self._sent = False
117 self._sent = False
209 self._closed = False
118 self._closed = False
210 self._calls = []
119 self._calls = []
211 self._futures = weakref.WeakSet()
120 self._futures = weakref.WeakSet()
212 self._responseexecutor = None
121 self._responseexecutor = None
213 self._responsef = None
122 self._responsef = None
214
123
215 def __enter__(self):
124 def __enter__(self):
216 return self
125 return self
217
126
218 def __exit__(self, exctype, excvalee, exctb):
127 def __exit__(self, exctype, excvalee, exctb):
219 self.close()
128 self.close()
220
129
221 def callcommand(self, command, args):
130 def callcommand(self, command, args):
222 if self._sent:
131 if self._sent:
223 raise error.ProgrammingError('callcommand() cannot be used '
132 raise error.ProgrammingError('callcommand() cannot be used '
224 'after commands are sent')
133 'after commands are sent')
225
134
226 if self._closed:
135 if self._closed:
227 raise error.ProgrammingError('callcommand() cannot be used '
136 raise error.ProgrammingError('callcommand() cannot be used '
228 'after close()')
137 'after close()')
229
138
230 # Commands are dispatched through methods on the peer.
139 # Commands are dispatched through methods on the peer.
231 fn = getattr(self._peer, pycompat.sysstr(command), None)
140 fn = getattr(self._peer, pycompat.sysstr(command), None)
232
141
233 if not fn:
142 if not fn:
234 raise error.ProgrammingError(
143 raise error.ProgrammingError(
235 'cannot call command %s: method of same name not available '
144 'cannot call command %s: method of same name not available '
236 'on peer' % command)
145 'on peer' % command)
237
146
238 # Commands are either batchable or they aren't. If a command
147 # Commands are either batchable or they aren't. If a command
239 # isn't batchable, we send it immediately because the executor
148 # isn't batchable, we send it immediately because the executor
240 # can no longer accept new commands after a non-batchable command.
149 # can no longer accept new commands after a non-batchable command.
241 # If a command is batchable, we queue it for later. But we have
150 # If a command is batchable, we queue it for later. But we have
242 # to account for the case of a non-batchable command arriving after
151 # to account for the case of a non-batchable command arriving after
243 # a batchable one and refuse to service it.
152 # a batchable one and refuse to service it.
244
153
245 def addcall():
154 def addcall():
246 f = pycompat.futures.Future()
155 f = pycompat.futures.Future()
247 self._futures.add(f)
156 self._futures.add(f)
248 self._calls.append((command, args, fn, f))
157 self._calls.append((command, args, fn, f))
249 return f
158 return f
250
159
251 if getattr(fn, 'batchable', False):
160 if getattr(fn, 'batchable', False):
252 f = addcall()
161 f = addcall()
253
162
254 # But since we don't issue it immediately, we wrap its result()
163 # But since we don't issue it immediately, we wrap its result()
255 # to trigger sending so we avoid deadlocks.
164 # to trigger sending so we avoid deadlocks.
256 f.__class__ = unsentfuture
165 f.__class__ = unsentfuture
257 f._peerexecutor = self
166 f._peerexecutor = self
258 else:
167 else:
259 if self._calls:
168 if self._calls:
260 raise error.ProgrammingError(
169 raise error.ProgrammingError(
261 '%s is not batchable and cannot be called on a command '
170 '%s is not batchable and cannot be called on a command '
262 'executor along with other commands' % command)
171 'executor along with other commands' % command)
263
172
264 f = addcall()
173 f = addcall()
265
174
266 # Non-batchable commands can never coexist with another command
175 # Non-batchable commands can never coexist with another command
267 # in this executor. So send the command immediately.
176 # in this executor. So send the command immediately.
268 self.sendcommands()
177 self.sendcommands()
269
178
270 return f
179 return f
271
180
272 def sendcommands(self):
181 def sendcommands(self):
273 if self._sent:
182 if self._sent:
274 return
183 return
275
184
276 if not self._calls:
185 if not self._calls:
277 return
186 return
278
187
279 self._sent = True
188 self._sent = True
280
189
281 # Unhack any future types so caller seens a clean type and to break
190 # Unhack any future types so caller seens a clean type and to break
282 # cycle between us and futures.
191 # cycle between us and futures.
283 for f in self._futures:
192 for f in self._futures:
284 if isinstance(f, unsentfuture):
193 if isinstance(f, unsentfuture):
285 f.__class__ = pycompat.futures.Future
194 f.__class__ = pycompat.futures.Future
286 f._peerexecutor = None
195 f._peerexecutor = None
287
196
288 calls = self._calls
197 calls = self._calls
289 # Mainly to destroy references to futures.
198 # Mainly to destroy references to futures.
290 self._calls = None
199 self._calls = None
291
200
292 # Simple case of a single command. We call it synchronously.
201 # Simple case of a single command. We call it synchronously.
293 if len(calls) == 1:
202 if len(calls) == 1:
294 command, args, fn, f = calls[0]
203 command, args, fn, f = calls[0]
295
204
296 # Future was cancelled. Ignore it.
205 # Future was cancelled. Ignore it.
297 if not f.set_running_or_notify_cancel():
206 if not f.set_running_or_notify_cancel():
298 return
207 return
299
208
300 try:
209 try:
301 result = fn(**pycompat.strkwargs(args))
210 result = fn(**pycompat.strkwargs(args))
302 except Exception:
211 except Exception:
303 f.set_exception_info(*sys.exc_info()[1:])
212 f.set_exception_info(*sys.exc_info()[1:])
304 else:
213 else:
305 f.set_result(result)
214 f.set_result(result)
306
215
307 return
216 return
308
217
309 # Batch commands are a bit harder. First, we have to deal with the
218 # Batch commands are a bit harder. First, we have to deal with the
310 # @batchable coroutine. That's a bit annoying. Furthermore, we also
219 # @batchable coroutine. That's a bit annoying. Furthermore, we also
311 # need to preserve streaming. i.e. it should be possible for the
220 # need to preserve streaming. i.e. it should be possible for the
312 # futures to resolve as data is coming in off the wire without having
221 # futures to resolve as data is coming in off the wire without having
313 # to wait for the final byte of the final response. We do this by
222 # to wait for the final byte of the final response. We do this by
314 # spinning up a thread to read the responses.
223 # spinning up a thread to read the responses.
315
224
316 requests = []
225 requests = []
317 states = []
226 states = []
318
227
319 for command, args, fn, f in calls:
228 for command, args, fn, f in calls:
320 # Future was cancelled. Ignore it.
229 # Future was cancelled. Ignore it.
321 if not f.set_running_or_notify_cancel():
230 if not f.set_running_or_notify_cancel():
322 continue
231 continue
323
232
324 try:
233 try:
325 batchable = fn.batchable(fn.__self__,
234 batchable = fn.batchable(fn.__self__,
326 **pycompat.strkwargs(args))
235 **pycompat.strkwargs(args))
327 except Exception:
236 except Exception:
328 f.set_exception_info(*sys.exc_info()[1:])
237 f.set_exception_info(*sys.exc_info()[1:])
329 return
238 return
330
239
331 # Encoded arguments and future holding remote result.
240 # Encoded arguments and future holding remote result.
332 try:
241 try:
333 encodedargs, fremote = next(batchable)
242 encodedargs, fremote = next(batchable)
334 except Exception:
243 except Exception:
335 f.set_exception_info(*sys.exc_info()[1:])
244 f.set_exception_info(*sys.exc_info()[1:])
336 return
245 return
337
246
338 requests.append((command, encodedargs))
247 requests.append((command, encodedargs))
339 states.append((command, f, batchable, fremote))
248 states.append((command, f, batchable, fremote))
340
249
341 if not requests:
250 if not requests:
342 return
251 return
343
252
344 # This will emit responses in order they were executed.
253 # This will emit responses in order they were executed.
345 wireresults = self._peer._submitbatch(requests)
254 wireresults = self._peer._submitbatch(requests)
346
255
347 # The use of a thread pool executor here is a bit weird for something
256 # The use of a thread pool executor here is a bit weird for something
348 # that only spins up a single thread. However, thread management is
257 # that only spins up a single thread. However, thread management is
349 # hard and it is easy to encounter race conditions, deadlocks, etc.
258 # hard and it is easy to encounter race conditions, deadlocks, etc.
350 # concurrent.futures already solves these problems and its thread pool
259 # concurrent.futures already solves these problems and its thread pool
351 # executor has minimal overhead. So we use it.
260 # executor has minimal overhead. So we use it.
352 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
261 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
353 self._responsef = self._responseexecutor.submit(self._readbatchresponse,
262 self._responsef = self._responseexecutor.submit(self._readbatchresponse,
354 states, wireresults)
263 states, wireresults)
355
264
356 def close(self):
265 def close(self):
357 self.sendcommands()
266 self.sendcommands()
358
267
359 if self._closed:
268 if self._closed:
360 return
269 return
361
270
362 self._closed = True
271 self._closed = True
363
272
364 if not self._responsef:
273 if not self._responsef:
365 return
274 return
366
275
367 # We need to wait on our in-flight response and then shut down the
276 # We need to wait on our in-flight response and then shut down the
368 # executor once we have a result.
277 # executor once we have a result.
369 try:
278 try:
370 self._responsef.result()
279 self._responsef.result()
371 finally:
280 finally:
372 self._responseexecutor.shutdown(wait=True)
281 self._responseexecutor.shutdown(wait=True)
373 self._responsef = None
282 self._responsef = None
374 self._responseexecutor = None
283 self._responseexecutor = None
375
284
376 # If any of our futures are still in progress, mark them as
285 # If any of our futures are still in progress, mark them as
377 # errored. Otherwise a result() could wait indefinitely.
286 # errored. Otherwise a result() could wait indefinitely.
378 for f in self._futures:
287 for f in self._futures:
379 if not f.done():
288 if not f.done():
380 f.set_exception(error.ResponseError(
289 f.set_exception(error.ResponseError(
381 _('unfulfilled batch command response')))
290 _('unfulfilled batch command response')))
382
291
383 self._futures = None
292 self._futures = None
384
293
385 def _readbatchresponse(self, states, wireresults):
294 def _readbatchresponse(self, states, wireresults):
386 # Executes in a thread to read data off the wire.
295 # Executes in a thread to read data off the wire.
387
296
388 for command, f, batchable, fremote in states:
297 for command, f, batchable, fremote in states:
389 # Grab raw result off the wire and teach the internal future
298 # Grab raw result off the wire and teach the internal future
390 # about it.
299 # about it.
391 remoteresult = next(wireresults)
300 remoteresult = next(wireresults)
392 fremote.set(remoteresult)
301 fremote.set(remoteresult)
393
302
394 # And ask the coroutine to decode that value.
303 # And ask the coroutine to decode that value.
395 try:
304 try:
396 result = next(batchable)
305 result = next(batchable)
397 except Exception:
306 except Exception:
398 f.set_exception_info(*sys.exc_info()[1:])
307 f.set_exception_info(*sys.exc_info()[1:])
399 else:
308 else:
400 f.set_result(result)
309 f.set_result(result)
401
310
402 class wirepeer(repository.legacypeer):
311 class wirepeer(repository.legacypeer):
403 """Client-side interface for communicating with a peer repository.
312 """Client-side interface for communicating with a peer repository.
404
313
405 Methods commonly call wire protocol commands of the same name.
314 Methods commonly call wire protocol commands of the same name.
406
315
407 See also httppeer.py and sshpeer.py for protocol-specific
316 See also httppeer.py and sshpeer.py for protocol-specific
408 implementations of this interface.
317 implementations of this interface.
409 """
318 """
410 def commandexecutor(self):
319 def commandexecutor(self):
411 return peerexecutor(self)
320 return peerexecutor(self)
412
321
413 # Begin of ipeercommands interface.
322 # Begin of ipeercommands interface.
414
323
415 def iterbatch(self):
416 return remoteiterbatcher(self)
417
418 @batchable
324 @batchable
419 def lookup(self, key):
325 def lookup(self, key):
420 self.requirecap('lookup', _('look up remote revision'))
326 self.requirecap('lookup', _('look up remote revision'))
421 f = future()
327 f = future()
422 yield {'key': encoding.fromlocal(key)}, f
328 yield {'key': encoding.fromlocal(key)}, f
423 d = f.value
329 d = f.value
424 success, data = d[:-1].split(" ", 1)
330 success, data = d[:-1].split(" ", 1)
425 if int(success):
331 if int(success):
426 yield bin(data)
332 yield bin(data)
427 else:
333 else:
428 self._abort(error.RepoError(data))
334 self._abort(error.RepoError(data))
429
335
430 @batchable
336 @batchable
431 def heads(self):
337 def heads(self):
432 f = future()
338 f = future()
433 yield {}, f
339 yield {}, f
434 d = f.value
340 d = f.value
435 try:
341 try:
436 yield wireprototypes.decodelist(d[:-1])
342 yield wireprototypes.decodelist(d[:-1])
437 except ValueError:
343 except ValueError:
438 self._abort(error.ResponseError(_("unexpected response:"), d))
344 self._abort(error.ResponseError(_("unexpected response:"), d))
439
345
440 @batchable
346 @batchable
441 def known(self, nodes):
347 def known(self, nodes):
442 f = future()
348 f = future()
443 yield {'nodes': wireprototypes.encodelist(nodes)}, f
349 yield {'nodes': wireprototypes.encodelist(nodes)}, f
444 d = f.value
350 d = f.value
445 try:
351 try:
446 yield [bool(int(b)) for b in d]
352 yield [bool(int(b)) for b in d]
447 except ValueError:
353 except ValueError:
448 self._abort(error.ResponseError(_("unexpected response:"), d))
354 self._abort(error.ResponseError(_("unexpected response:"), d))
449
355
450 @batchable
356 @batchable
451 def branchmap(self):
357 def branchmap(self):
452 f = future()
358 f = future()
453 yield {}, f
359 yield {}, f
454 d = f.value
360 d = f.value
455 try:
361 try:
456 branchmap = {}
362 branchmap = {}
457 for branchpart in d.splitlines():
363 for branchpart in d.splitlines():
458 branchname, branchheads = branchpart.split(' ', 1)
364 branchname, branchheads = branchpart.split(' ', 1)
459 branchname = encoding.tolocal(urlreq.unquote(branchname))
365 branchname = encoding.tolocal(urlreq.unquote(branchname))
460 branchheads = wireprototypes.decodelist(branchheads)
366 branchheads = wireprototypes.decodelist(branchheads)
461 branchmap[branchname] = branchheads
367 branchmap[branchname] = branchheads
462 yield branchmap
368 yield branchmap
463 except TypeError:
369 except TypeError:
464 self._abort(error.ResponseError(_("unexpected response:"), d))
370 self._abort(error.ResponseError(_("unexpected response:"), d))
465
371
466 @batchable
372 @batchable
467 def listkeys(self, namespace):
373 def listkeys(self, namespace):
468 if not self.capable('pushkey'):
374 if not self.capable('pushkey'):
469 yield {}, None
375 yield {}, None
470 f = future()
376 f = future()
471 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
377 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
472 yield {'namespace': encoding.fromlocal(namespace)}, f
378 yield {'namespace': encoding.fromlocal(namespace)}, f
473 d = f.value
379 d = f.value
474 self.ui.debug('received listkey for "%s": %i bytes\n'
380 self.ui.debug('received listkey for "%s": %i bytes\n'
475 % (namespace, len(d)))
381 % (namespace, len(d)))
476 yield pushkeymod.decodekeys(d)
382 yield pushkeymod.decodekeys(d)
477
383
478 @batchable
384 @batchable
479 def pushkey(self, namespace, key, old, new):
385 def pushkey(self, namespace, key, old, new):
480 if not self.capable('pushkey'):
386 if not self.capable('pushkey'):
481 yield False, None
387 yield False, None
482 f = future()
388 f = future()
483 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
389 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
484 yield {'namespace': encoding.fromlocal(namespace),
390 yield {'namespace': encoding.fromlocal(namespace),
485 'key': encoding.fromlocal(key),
391 'key': encoding.fromlocal(key),
486 'old': encoding.fromlocal(old),
392 'old': encoding.fromlocal(old),
487 'new': encoding.fromlocal(new)}, f
393 'new': encoding.fromlocal(new)}, f
488 d = f.value
394 d = f.value
489 d, output = d.split('\n', 1)
395 d, output = d.split('\n', 1)
490 try:
396 try:
491 d = bool(int(d))
397 d = bool(int(d))
492 except ValueError:
398 except ValueError:
493 raise error.ResponseError(
399 raise error.ResponseError(
494 _('push failed (unexpected response):'), d)
400 _('push failed (unexpected response):'), d)
495 for l in output.splitlines(True):
401 for l in output.splitlines(True):
496 self.ui.status(_('remote: '), l)
402 self.ui.status(_('remote: '), l)
497 yield d
403 yield d
498
404
499 def stream_out(self):
405 def stream_out(self):
500 return self._callstream('stream_out')
406 return self._callstream('stream_out')
501
407
502 def getbundle(self, source, **kwargs):
408 def getbundle(self, source, **kwargs):
503 kwargs = pycompat.byteskwargs(kwargs)
409 kwargs = pycompat.byteskwargs(kwargs)
504 self.requirecap('getbundle', _('look up remote changes'))
410 self.requirecap('getbundle', _('look up remote changes'))
505 opts = {}
411 opts = {}
506 bundlecaps = kwargs.get('bundlecaps') or set()
412 bundlecaps = kwargs.get('bundlecaps') or set()
507 for key, value in kwargs.iteritems():
413 for key, value in kwargs.iteritems():
508 if value is None:
414 if value is None:
509 continue
415 continue
510 keytype = wireprototypes.GETBUNDLE_ARGUMENTS.get(key)
416 keytype = wireprototypes.GETBUNDLE_ARGUMENTS.get(key)
511 if keytype is None:
417 if keytype is None:
512 raise error.ProgrammingError(
418 raise error.ProgrammingError(
513 'Unexpectedly None keytype for key %s' % key)
419 'Unexpectedly None keytype for key %s' % key)
514 elif keytype == 'nodes':
420 elif keytype == 'nodes':
515 value = wireprototypes.encodelist(value)
421 value = wireprototypes.encodelist(value)
516 elif keytype == 'csv':
422 elif keytype == 'csv':
517 value = ','.join(value)
423 value = ','.join(value)
518 elif keytype == 'scsv':
424 elif keytype == 'scsv':
519 value = ','.join(sorted(value))
425 value = ','.join(sorted(value))
520 elif keytype == 'boolean':
426 elif keytype == 'boolean':
521 value = '%i' % bool(value)
427 value = '%i' % bool(value)
522 elif keytype != 'plain':
428 elif keytype != 'plain':
523 raise KeyError('unknown getbundle option type %s'
429 raise KeyError('unknown getbundle option type %s'
524 % keytype)
430 % keytype)
525 opts[key] = value
431 opts[key] = value
526 f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
432 f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
527 if any((cap.startswith('HG2') for cap in bundlecaps)):
433 if any((cap.startswith('HG2') for cap in bundlecaps)):
528 return bundle2.getunbundler(self.ui, f)
434 return bundle2.getunbundler(self.ui, f)
529 else:
435 else:
530 return changegroupmod.cg1unpacker(f, 'UN')
436 return changegroupmod.cg1unpacker(f, 'UN')
531
437
532 def unbundle(self, cg, heads, url):
438 def unbundle(self, cg, heads, url):
533 '''Send cg (a readable file-like object representing the
439 '''Send cg (a readable file-like object representing the
534 changegroup to push, typically a chunkbuffer object) to the
440 changegroup to push, typically a chunkbuffer object) to the
535 remote server as a bundle.
441 remote server as a bundle.
536
442
537 When pushing a bundle10 stream, return an integer indicating the
443 When pushing a bundle10 stream, return an integer indicating the
538 result of the push (see changegroup.apply()).
444 result of the push (see changegroup.apply()).
539
445
540 When pushing a bundle20 stream, return a bundle20 stream.
446 When pushing a bundle20 stream, return a bundle20 stream.
541
447
542 `url` is the url the client thinks it's pushing to, which is
448 `url` is the url the client thinks it's pushing to, which is
543 visible to hooks.
449 visible to hooks.
544 '''
450 '''
545
451
546 if heads != ['force'] and self.capable('unbundlehash'):
452 if heads != ['force'] and self.capable('unbundlehash'):
547 heads = wireprototypes.encodelist(
453 heads = wireprototypes.encodelist(
548 ['hashed', hashlib.sha1(''.join(sorted(heads))).digest()])
454 ['hashed', hashlib.sha1(''.join(sorted(heads))).digest()])
549 else:
455 else:
550 heads = wireprototypes.encodelist(heads)
456 heads = wireprototypes.encodelist(heads)
551
457
552 if util.safehasattr(cg, 'deltaheader'):
458 if util.safehasattr(cg, 'deltaheader'):
553 # this a bundle10, do the old style call sequence
459 # this a bundle10, do the old style call sequence
554 ret, output = self._callpush("unbundle", cg, heads=heads)
460 ret, output = self._callpush("unbundle", cg, heads=heads)
555 if ret == "":
461 if ret == "":
556 raise error.ResponseError(
462 raise error.ResponseError(
557 _('push failed:'), output)
463 _('push failed:'), output)
558 try:
464 try:
559 ret = int(ret)
465 ret = int(ret)
560 except ValueError:
466 except ValueError:
561 raise error.ResponseError(
467 raise error.ResponseError(
562 _('push failed (unexpected response):'), ret)
468 _('push failed (unexpected response):'), ret)
563
469
564 for l in output.splitlines(True):
470 for l in output.splitlines(True):
565 self.ui.status(_('remote: '), l)
471 self.ui.status(_('remote: '), l)
566 else:
472 else:
567 # bundle2 push. Send a stream, fetch a stream.
473 # bundle2 push. Send a stream, fetch a stream.
568 stream = self._calltwowaystream('unbundle', cg, heads=heads)
474 stream = self._calltwowaystream('unbundle', cg, heads=heads)
569 ret = bundle2.getunbundler(self.ui, stream)
475 ret = bundle2.getunbundler(self.ui, stream)
570 return ret
476 return ret
571
477
572 # End of ipeercommands interface.
478 # End of ipeercommands interface.
573
479
574 # Begin of ipeerlegacycommands interface.
480 # Begin of ipeerlegacycommands interface.
575
481
576 def branches(self, nodes):
482 def branches(self, nodes):
577 n = wireprototypes.encodelist(nodes)
483 n = wireprototypes.encodelist(nodes)
578 d = self._call("branches", nodes=n)
484 d = self._call("branches", nodes=n)
579 try:
485 try:
580 br = [tuple(wireprototypes.decodelist(b)) for b in d.splitlines()]
486 br = [tuple(wireprototypes.decodelist(b)) for b in d.splitlines()]
581 return br
487 return br
582 except ValueError:
488 except ValueError:
583 self._abort(error.ResponseError(_("unexpected response:"), d))
489 self._abort(error.ResponseError(_("unexpected response:"), d))
584
490
585 def between(self, pairs):
491 def between(self, pairs):
586 batch = 8 # avoid giant requests
492 batch = 8 # avoid giant requests
587 r = []
493 r = []
588 for i in xrange(0, len(pairs), batch):
494 for i in xrange(0, len(pairs), batch):
589 n = " ".join([wireprototypes.encodelist(p, '-')
495 n = " ".join([wireprototypes.encodelist(p, '-')
590 for p in pairs[i:i + batch]])
496 for p in pairs[i:i + batch]])
591 d = self._call("between", pairs=n)
497 d = self._call("between", pairs=n)
592 try:
498 try:
593 r.extend(l and wireprototypes.decodelist(l) or []
499 r.extend(l and wireprototypes.decodelist(l) or []
594 for l in d.splitlines())
500 for l in d.splitlines())
595 except ValueError:
501 except ValueError:
596 self._abort(error.ResponseError(_("unexpected response:"), d))
502 self._abort(error.ResponseError(_("unexpected response:"), d))
597 return r
503 return r
598
504
599 def changegroup(self, nodes, kind):
505 def changegroup(self, nodes, kind):
600 n = wireprototypes.encodelist(nodes)
506 n = wireprototypes.encodelist(nodes)
601 f = self._callcompressable("changegroup", roots=n)
507 f = self._callcompressable("changegroup", roots=n)
602 return changegroupmod.cg1unpacker(f, 'UN')
508 return changegroupmod.cg1unpacker(f, 'UN')
603
509
604 def changegroupsubset(self, bases, heads, kind):
510 def changegroupsubset(self, bases, heads, kind):
605 self.requirecap('changegroupsubset', _('look up remote changes'))
511 self.requirecap('changegroupsubset', _('look up remote changes'))
606 bases = wireprototypes.encodelist(bases)
512 bases = wireprototypes.encodelist(bases)
607 heads = wireprototypes.encodelist(heads)
513 heads = wireprototypes.encodelist(heads)
608 f = self._callcompressable("changegroupsubset",
514 f = self._callcompressable("changegroupsubset",
609 bases=bases, heads=heads)
515 bases=bases, heads=heads)
610 return changegroupmod.cg1unpacker(f, 'UN')
516 return changegroupmod.cg1unpacker(f, 'UN')
611
517
612 # End of ipeerlegacycommands interface.
518 # End of ipeerlegacycommands interface.
613
519
614 def _submitbatch(self, req):
520 def _submitbatch(self, req):
615 """run batch request <req> on the server
521 """run batch request <req> on the server
616
522
617 Returns an iterator of the raw responses from the server.
523 Returns an iterator of the raw responses from the server.
618 """
524 """
619 ui = self.ui
525 ui = self.ui
620 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
526 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
621 ui.debug('devel-peer-request: batched-content\n')
527 ui.debug('devel-peer-request: batched-content\n')
622 for op, args in req:
528 for op, args in req:
623 msg = 'devel-peer-request: - %s (%d arguments)\n'
529 msg = 'devel-peer-request: - %s (%d arguments)\n'
624 ui.debug(msg % (op, len(args)))
530 ui.debug(msg % (op, len(args)))
625
531
626 unescapearg = wireprototypes.unescapebatcharg
532 unescapearg = wireprototypes.unescapebatcharg
627
533
628 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
534 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
629 chunk = rsp.read(1024)
535 chunk = rsp.read(1024)
630 work = [chunk]
536 work = [chunk]
631 while chunk:
537 while chunk:
632 while ';' not in chunk and chunk:
538 while ';' not in chunk and chunk:
633 chunk = rsp.read(1024)
539 chunk = rsp.read(1024)
634 work.append(chunk)
540 work.append(chunk)
635 merged = ''.join(work)
541 merged = ''.join(work)
636 while ';' in merged:
542 while ';' in merged:
637 one, merged = merged.split(';', 1)
543 one, merged = merged.split(';', 1)
638 yield unescapearg(one)
544 yield unescapearg(one)
639 chunk = rsp.read(1024)
545 chunk = rsp.read(1024)
640 work = [merged, chunk]
546 work = [merged, chunk]
641 yield unescapearg(''.join(work))
547 yield unescapearg(''.join(work))
642
548
643 def _submitone(self, op, args):
549 def _submitone(self, op, args):
644 return self._call(op, **pycompat.strkwargs(args))
550 return self._call(op, **pycompat.strkwargs(args))
645
551
646 def debugwireargs(self, one, two, three=None, four=None, five=None):
552 def debugwireargs(self, one, two, three=None, four=None, five=None):
647 # don't pass optional arguments left at their default value
553 # don't pass optional arguments left at their default value
648 opts = {}
554 opts = {}
649 if three is not None:
555 if three is not None:
650 opts[r'three'] = three
556 opts[r'three'] = three
651 if four is not None:
557 if four is not None:
652 opts[r'four'] = four
558 opts[r'four'] = four
653 return self._call('debugwireargs', one=one, two=two, **opts)
559 return self._call('debugwireargs', one=one, two=two, **opts)
654
560
655 def _call(self, cmd, **args):
561 def _call(self, cmd, **args):
656 """execute <cmd> on the server
562 """execute <cmd> on the server
657
563
658 The command is expected to return a simple string.
564 The command is expected to return a simple string.
659
565
660 returns the server reply as a string."""
566 returns the server reply as a string."""
661 raise NotImplementedError()
567 raise NotImplementedError()
662
568
663 def _callstream(self, cmd, **args):
569 def _callstream(self, cmd, **args):
664 """execute <cmd> on the server
570 """execute <cmd> on the server
665
571
666 The command is expected to return a stream. Note that if the
572 The command is expected to return a stream. Note that if the
667 command doesn't return a stream, _callstream behaves
573 command doesn't return a stream, _callstream behaves
668 differently for ssh and http peers.
574 differently for ssh and http peers.
669
575
670 returns the server reply as a file like object.
576 returns the server reply as a file like object.
671 """
577 """
672 raise NotImplementedError()
578 raise NotImplementedError()
673
579
674 def _callcompressable(self, cmd, **args):
580 def _callcompressable(self, cmd, **args):
675 """execute <cmd> on the server
581 """execute <cmd> on the server
676
582
677 The command is expected to return a stream.
583 The command is expected to return a stream.
678
584
679 The stream may have been compressed in some implementations. This
585 The stream may have been compressed in some implementations. This
680 function takes care of the decompression. This is the only difference
586 function takes care of the decompression. This is the only difference
681 with _callstream.
587 with _callstream.
682
588
683 returns the server reply as a file like object.
589 returns the server reply as a file like object.
684 """
590 """
685 raise NotImplementedError()
591 raise NotImplementedError()
686
592
687 def _callpush(self, cmd, fp, **args):
593 def _callpush(self, cmd, fp, **args):
688 """execute a <cmd> on server
594 """execute a <cmd> on server
689
595
690 The command is expected to be related to a push. Push has a special
596 The command is expected to be related to a push. Push has a special
691 return method.
597 return method.
692
598
693 returns the server reply as a (ret, output) tuple. ret is either
599 returns the server reply as a (ret, output) tuple. ret is either
694 empty (error) or a stringified int.
600 empty (error) or a stringified int.
695 """
601 """
696 raise NotImplementedError()
602 raise NotImplementedError()
697
603
698 def _calltwowaystream(self, cmd, fp, **args):
604 def _calltwowaystream(self, cmd, fp, **args):
699 """execute <cmd> on server
605 """execute <cmd> on server
700
606
701 The command will send a stream to the server and get a stream in reply.
607 The command will send a stream to the server and get a stream in reply.
702 """
608 """
703 raise NotImplementedError()
609 raise NotImplementedError()
704
610
705 def _abort(self, exception):
611 def _abort(self, exception):
706 """clearly abort the wire protocol connection and raise the exception
612 """clearly abort the wire protocol connection and raise the exception
707 """
613 """
708 raise NotImplementedError()
614 raise NotImplementedError()
@@ -1,207 +1,180 b''
1 # test-batching.py - tests for transparent command batching
1 # test-batching.py - tests for transparent command batching
2 #
2 #
3 # Copyright 2011 Peter Arrenbrecht <peter@arrenbrecht.ch>
3 # Copyright 2011 Peter Arrenbrecht <peter@arrenbrecht.ch>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import contextlib
11
10 from mercurial import (
12 from mercurial import (
11 error,
12 localrepo,
13 localrepo,
13 util,
14 wireprotov1peer,
14 wireprotov1peer,
15
15
16 )
16 )
17
17
18 # equivalent of repo.repository
18 # equivalent of repo.repository
19 class thing(object):
19 class thing(object):
20 def hello(self):
20 def hello(self):
21 return "Ready."
21 return "Ready."
22
22
23 # equivalent of localrepo.localrepository
23 # equivalent of localrepo.localrepository
24 class localthing(thing):
24 class localthing(thing):
25 def foo(self, one, two=None):
25 def foo(self, one, two=None):
26 if one:
26 if one:
27 return "%s and %s" % (one, two,)
27 return "%s and %s" % (one, two,)
28 return "Nope"
28 return "Nope"
29 def bar(self, b, a):
29 def bar(self, b, a):
30 return "%s und %s" % (b, a,)
30 return "%s und %s" % (b, a,)
31 def greet(self, name=None):
31 def greet(self, name=None):
32 return "Hello, %s" % name
32 return "Hello, %s" % name
33 def batchiter(self):
33
34 '''Support for local batching.'''
34 @contextlib.contextmanager
35 return localrepo.localiterbatcher(self)
35 def commandexecutor(self):
36 e = localrepo.localcommandexecutor(self)
37 try:
38 yield e
39 finally:
40 e.close()
36
41
37 # usage of "thing" interface
42 # usage of "thing" interface
38 def use(it):
43 def use(it):
39
44
40 # Direct call to base method shared between client and server.
45 # Direct call to base method shared between client and server.
41 print(it.hello())
46 print(it.hello())
42
47
43 # Direct calls to proxied methods. They cause individual roundtrips.
48 # Direct calls to proxied methods. They cause individual roundtrips.
44 print(it.foo("Un", two="Deux"))
49 print(it.foo("Un", two="Deux"))
45 print(it.bar("Eins", "Zwei"))
50 print(it.bar("Eins", "Zwei"))
46
51
47 # Batched call to a couple of proxied methods.
52 # Batched call to a couple of proxied methods.
48 batch = it.batchiter()
49 # The calls return futures to eventually hold results.
50 foo = batch.foo(one="One", two="Two")
51 bar = batch.bar("Eins", "Zwei")
52 bar2 = batch.bar(b="Uno", a="Due")
53
54 # Future shouldn't be set until we submit().
55 assert isinstance(foo, wireprotov1peer.future)
56 assert not util.safehasattr(foo, 'value')
57 assert not util.safehasattr(bar, 'value')
58 batch.submit()
59 # Call results() to obtain results as a generator.
60 results = batch.results()
61
53
62 # Future results shouldn't be set until we consume a value.
54 with it.commandexecutor() as e:
63 assert not util.safehasattr(foo, 'value')
55 ffoo = e.callcommand('foo', {'one': 'One', 'two': 'Two'})
64 foovalue = next(results)
56 fbar = e.callcommand('bar', {'b': 'Eins', 'a': 'Zwei'})
65 assert util.safehasattr(foo, 'value')
57 fbar2 = e.callcommand('bar', {'b': 'Uno', 'a': 'Due'})
66 assert foovalue == foo.value
67 print(foo.value)
68 next(results)
69 print(bar.value)
70 next(results)
71 print(bar2.value)
72
58
73 # We should be at the end of the results generator.
59 print(ffoo.result())
74 try:
60 print(fbar.result())
75 next(results)
61 print(fbar2.result())
76 except StopIteration:
77 print('proper end of results generator')
78 else:
79 print('extra emitted element!')
80
81 # Attempting to call a non-batchable method inside a batch fails.
82 batch = it.batchiter()
83 try:
84 batch.greet(name='John Smith')
85 except error.ProgrammingError as e:
86 print(e)
87
88 # Attempting to call a local method inside a batch fails.
89 batch = it.batchiter()
90 try:
91 batch.hello()
92 except error.ProgrammingError as e:
93 print(e)
94
62
95 # local usage
63 # local usage
96 mylocal = localthing()
64 mylocal = localthing()
97 print()
65 print()
98 print("== Local")
66 print("== Local")
99 use(mylocal)
67 use(mylocal)
100
68
101 # demo remoting; mimicks what wireproto and HTTP/SSH do
69 # demo remoting; mimicks what wireproto and HTTP/SSH do
102
70
103 # shared
71 # shared
104
72
105 def escapearg(plain):
73 def escapearg(plain):
106 return (plain
74 return (plain
107 .replace(':', '::')
75 .replace(':', '::')
108 .replace(',', ':,')
76 .replace(',', ':,')
109 .replace(';', ':;')
77 .replace(';', ':;')
110 .replace('=', ':='))
78 .replace('=', ':='))
111 def unescapearg(escaped):
79 def unescapearg(escaped):
112 return (escaped
80 return (escaped
113 .replace(':=', '=')
81 .replace(':=', '=')
114 .replace(':;', ';')
82 .replace(':;', ';')
115 .replace(':,', ',')
83 .replace(':,', ',')
116 .replace('::', ':'))
84 .replace('::', ':'))
117
85
118 # server side
86 # server side
119
87
120 # equivalent of wireproto's global functions
88 # equivalent of wireproto's global functions
121 class server(object):
89 class server(object):
122 def __init__(self, local):
90 def __init__(self, local):
123 self.local = local
91 self.local = local
124 def _call(self, name, args):
92 def _call(self, name, args):
125 args = dict(arg.split('=', 1) for arg in args)
93 args = dict(arg.split('=', 1) for arg in args)
126 return getattr(self, name)(**args)
94 return getattr(self, name)(**args)
127 def perform(self, req):
95 def perform(self, req):
128 print("REQ:", req)
96 print("REQ:", req)
129 name, args = req.split('?', 1)
97 name, args = req.split('?', 1)
130 args = args.split('&')
98 args = args.split('&')
131 vals = dict(arg.split('=', 1) for arg in args)
99 vals = dict(arg.split('=', 1) for arg in args)
132 res = getattr(self, name)(**vals)
100 res = getattr(self, name)(**vals)
133 print(" ->", res)
101 print(" ->", res)
134 return res
102 return res
135 def batch(self, cmds):
103 def batch(self, cmds):
136 res = []
104 res = []
137 for pair in cmds.split(';'):
105 for pair in cmds.split(';'):
138 name, args = pair.split(':', 1)
106 name, args = pair.split(':', 1)
139 vals = {}
107 vals = {}
140 for a in args.split(','):
108 for a in args.split(','):
141 if a:
109 if a:
142 n, v = a.split('=')
110 n, v = a.split('=')
143 vals[n] = unescapearg(v)
111 vals[n] = unescapearg(v)
144 res.append(escapearg(getattr(self, name)(**vals)))
112 res.append(escapearg(getattr(self, name)(**vals)))
145 return ';'.join(res)
113 return ';'.join(res)
146 def foo(self, one, two):
114 def foo(self, one, two):
147 return mangle(self.local.foo(unmangle(one), unmangle(two)))
115 return mangle(self.local.foo(unmangle(one), unmangle(two)))
148 def bar(self, b, a):
116 def bar(self, b, a):
149 return mangle(self.local.bar(unmangle(b), unmangle(a)))
117 return mangle(self.local.bar(unmangle(b), unmangle(a)))
150 def greet(self, name):
118 def greet(self, name):
151 return mangle(self.local.greet(unmangle(name)))
119 return mangle(self.local.greet(unmangle(name)))
152 myserver = server(mylocal)
120 myserver = server(mylocal)
153
121
154 # local side
122 # local side
155
123
156 # equivalent of wireproto.encode/decodelist, that is, type-specific marshalling
124 # equivalent of wireproto.encode/decodelist, that is, type-specific marshalling
157 # here we just transform the strings a bit to check we're properly en-/decoding
125 # here we just transform the strings a bit to check we're properly en-/decoding
158 def mangle(s):
126 def mangle(s):
159 return ''.join(chr(ord(c) + 1) for c in s)
127 return ''.join(chr(ord(c) + 1) for c in s)
160 def unmangle(s):
128 def unmangle(s):
161 return ''.join(chr(ord(c) - 1) for c in s)
129 return ''.join(chr(ord(c) - 1) for c in s)
162
130
163 # equivalent of wireproto.wirerepository and something like http's wire format
131 # equivalent of wireproto.wirerepository and something like http's wire format
164 class remotething(thing):
132 class remotething(thing):
165 def __init__(self, server):
133 def __init__(self, server):
166 self.server = server
134 self.server = server
167 def _submitone(self, name, args):
135 def _submitone(self, name, args):
168 req = name + '?' + '&'.join(['%s=%s' % (n, v) for n, v in args])
136 req = name + '?' + '&'.join(['%s=%s' % (n, v) for n, v in args])
169 return self.server.perform(req)
137 return self.server.perform(req)
170 def _submitbatch(self, cmds):
138 def _submitbatch(self, cmds):
171 req = []
139 req = []
172 for name, args in cmds:
140 for name, args in cmds:
173 args = ','.join(n + '=' + escapearg(v) for n, v in args)
141 args = ','.join(n + '=' + escapearg(v) for n, v in args)
174 req.append(name + ':' + args)
142 req.append(name + ':' + args)
175 req = ';'.join(req)
143 req = ';'.join(req)
176 res = self._submitone('batch', [('cmds', req,)])
144 res = self._submitone('batch', [('cmds', req,)])
177 for r in res.split(';'):
145 for r in res.split(';'):
178 yield r
146 yield r
179
147
180 def batchiter(self):
148 @contextlib.contextmanager
181 return wireprotov1peer.remoteiterbatcher(self)
149 def commandexecutor(self):
150 e = wireprotov1peer.peerexecutor(self)
151 try:
152 yield e
153 finally:
154 e.close()
182
155
183 @wireprotov1peer.batchable
156 @wireprotov1peer.batchable
184 def foo(self, one, two=None):
157 def foo(self, one, two=None):
185 encargs = [('one', mangle(one),), ('two', mangle(two),)]
158 encargs = [('one', mangle(one),), ('two', mangle(two),)]
186 encresref = wireprotov1peer.future()
159 encresref = wireprotov1peer.future()
187 yield encargs, encresref
160 yield encargs, encresref
188 yield unmangle(encresref.value)
161 yield unmangle(encresref.value)
189
162
190 @wireprotov1peer.batchable
163 @wireprotov1peer.batchable
191 def bar(self, b, a):
164 def bar(self, b, a):
192 encresref = wireprotov1peer.future()
165 encresref = wireprotov1peer.future()
193 yield [('b', mangle(b),), ('a', mangle(a),)], encresref
166 yield [('b', mangle(b),), ('a', mangle(a),)], encresref
194 yield unmangle(encresref.value)
167 yield unmangle(encresref.value)
195
168
196 # greet is coded directly. It therefore does not support batching. If it
169 # greet is coded directly. It therefore does not support batching. If it
197 # does appear in a batch, the batch is split around greet, and the call to
170 # does appear in a batch, the batch is split around greet, and the call to
198 # greet is done in its own roundtrip.
171 # greet is done in its own roundtrip.
199 def greet(self, name=None):
172 def greet(self, name=None):
200 return unmangle(self._submitone('greet', [('name', mangle(name),)]))
173 return unmangle(self._submitone('greet', [('name', mangle(name),)]))
201
174
202 # demo remote usage
175 # demo remote usage
203
176
204 myproxy = remotething(myserver)
177 myproxy = remotething(myserver)
205 print()
178 print()
206 print("== Remote")
179 print("== Remote")
207 use(myproxy)
180 use(myproxy)
@@ -1,26 +1,22 b''
1
1
2 == Local
2 == Local
3 Ready.
3 Ready.
4 Un and Deux
4 Un and Deux
5 Eins und Zwei
5 Eins und Zwei
6 One and Two
6 One and Two
7 Eins und Zwei
7 Eins und Zwei
8 Uno und Due
8 Uno und Due
9 proper end of results generator
10
9
11 == Remote
10 == Remote
12 Ready.
11 Ready.
13 REQ: foo?one=Vo&two=Efvy
12 REQ: foo?one=Vo&two=Efvy
14 -> Vo!boe!Efvy
13 -> Vo!boe!Efvy
15 Un and Deux
14 Un and Deux
16 REQ: bar?b=Fjot&a=[xfj
15 REQ: bar?b=Fjot&a=[xfj
17 -> Fjot!voe![xfj
16 -> Fjot!voe![xfj
18 Eins und Zwei
17 Eins und Zwei
19 REQ: batch?cmds=foo:one=Pof,two=Uxp;bar:b=Fjot,a=[xfj;bar:b=Vop,a=Evf
18 REQ: batch?cmds=foo:one=Pof,two=Uxp;bar:b=Fjot,a=[xfj;bar:b=Vop,a=Evf
20 -> Pof!boe!Uxp;Fjot!voe![xfj;Vop!voe!Evf
19 -> Pof!boe!Uxp;Fjot!voe![xfj;Vop!voe!Evf
21 One and Two
20 One and Two
22 Eins und Zwei
21 Eins und Zwei
23 Uno und Due
22 Uno und Due
24 proper end of results generator
25 Attempted to batch a non-batchable call to 'greet'
26 Attempted to batch a non-batchable call to 'hello'
@@ -1,99 +1,101 b''
1 from __future__ import absolute_import, print_function
1 from __future__ import absolute_import, print_function
2
2
3 from mercurial import (
3 from mercurial import (
4 error,
4 error,
5 pycompat,
5 pycompat,
6 ui as uimod,
6 ui as uimod,
7 util,
7 util,
8 wireproto,
8 wireproto,
9 wireprototypes,
9 wireprototypes,
10 wireprotov1peer,
10 wireprotov1peer,
11 )
11 )
12 stringio = util.stringio
12 stringio = util.stringio
13
13
14 class proto(object):
14 class proto(object):
15 def __init__(self, args):
15 def __init__(self, args):
16 self.args = args
16 self.args = args
17 self.name = 'dummyproto'
17 self.name = 'dummyproto'
18
18
19 def getargs(self, spec):
19 def getargs(self, spec):
20 args = self.args
20 args = self.args
21 args.setdefault(b'*', {})
21 args.setdefault(b'*', {})
22 names = spec.split()
22 names = spec.split()
23 return [args[n] for n in names]
23 return [args[n] for n in names]
24
24
25 def checkperm(self, perm):
25 def checkperm(self, perm):
26 pass
26 pass
27
27
28 wireprototypes.TRANSPORTS['dummyproto'] = {
28 wireprototypes.TRANSPORTS['dummyproto'] = {
29 'transport': 'dummy',
29 'transport': 'dummy',
30 'version': 1,
30 'version': 1,
31 }
31 }
32
32
33 class clientpeer(wireprotov1peer.wirepeer):
33 class clientpeer(wireprotov1peer.wirepeer):
34 def __init__(self, serverrepo, ui):
34 def __init__(self, serverrepo, ui):
35 self.serverrepo = serverrepo
35 self.serverrepo = serverrepo
36 self.ui = ui
36 self.ui = ui
37
37
38 def url(self):
38 def url(self):
39 return b'test'
39 return b'test'
40
40
41 def local(self):
41 def local(self):
42 return None
42 return None
43
43
44 def peer(self):
44 def peer(self):
45 return self
45 return self
46
46
47 def canpush(self):
47 def canpush(self):
48 return True
48 return True
49
49
50 def close(self):
50 def close(self):
51 pass
51 pass
52
52
53 def capabilities(self):
53 def capabilities(self):
54 return [b'batch']
54 return [b'batch']
55
55
56 def _call(self, cmd, **args):
56 def _call(self, cmd, **args):
57 args = pycompat.byteskwargs(args)
57 args = pycompat.byteskwargs(args)
58 res = wireproto.dispatch(self.serverrepo, proto(args), cmd)
58 res = wireproto.dispatch(self.serverrepo, proto(args), cmd)
59 if isinstance(res, wireprototypes.bytesresponse):
59 if isinstance(res, wireprototypes.bytesresponse):
60 return res.data
60 return res.data
61 elif isinstance(res, bytes):
61 elif isinstance(res, bytes):
62 return res
62 return res
63 else:
63 else:
64 raise error.Abort('dummy client does not support response type')
64 raise error.Abort('dummy client does not support response type')
65
65
66 def _callstream(self, cmd, **args):
66 def _callstream(self, cmd, **args):
67 return stringio(self._call(cmd, **args))
67 return stringio(self._call(cmd, **args))
68
68
69 @wireprotov1peer.batchable
69 @wireprotov1peer.batchable
70 def greet(self, name):
70 def greet(self, name):
71 f = wireprotov1peer.future()
71 f = wireprotov1peer.future()
72 yield {b'name': mangle(name)}, f
72 yield {b'name': mangle(name)}, f
73 yield unmangle(f.value)
73 yield unmangle(f.value)
74
74
75 class serverrepo(object):
75 class serverrepo(object):
76 def greet(self, name):
76 def greet(self, name):
77 return b"Hello, " + name
77 return b"Hello, " + name
78
78
79 def filtered(self, name):
79 def filtered(self, name):
80 return self
80 return self
81
81
82 def mangle(s):
82 def mangle(s):
83 return b''.join(pycompat.bytechr(ord(c) + 1) for c in pycompat.bytestr(s))
83 return b''.join(pycompat.bytechr(ord(c) + 1) for c in pycompat.bytestr(s))
84 def unmangle(s):
84 def unmangle(s):
85 return b''.join(pycompat.bytechr(ord(c) - 1) for c in pycompat.bytestr(s))
85 return b''.join(pycompat.bytechr(ord(c) - 1) for c in pycompat.bytestr(s))
86
86
87 def greet(repo, proto, name):
87 def greet(repo, proto, name):
88 return mangle(repo.greet(unmangle(name)))
88 return mangle(repo.greet(unmangle(name)))
89
89
90 wireproto.commands[b'greet'] = (greet, b'name',)
90 wireproto.commands[b'greet'] = (greet, b'name',)
91
91
92 srv = serverrepo()
92 srv = serverrepo()
93 clt = clientpeer(srv, uimod.ui())
93 clt = clientpeer(srv, uimod.ui())
94
94
95 print(clt.greet(b"Foobar"))
95 print(clt.greet(b"Foobar"))
96 b = clt.iterbatch()
96
97 list(map(b.greet, (b'Fo, =;:<o', b'Bar')))
97 with clt.commandexecutor() as e:
98 b.submit()
98 fgreet1 = e.callcommand(b'greet', {b'name': b'Fo, =;:<o'})
99 print([r for r in b.results()])
99 fgreet2 = e.callcommand(b'greet', {b'name': b'Bar'})
100
101 print([f.result() for f in (fgreet1, fgreet2)])
General Comments 0
You need to be logged in to leave comments. Login now