##// END OF EJS Templates
localrepo: support marking repos as having shallow file storage...
Gregory Szorc -
r40426:7e3b6c4f default
parent child Browse files
Show More
@@ -1,1124 +1,1134 b''
1 # sqlitestore.py - Storage backend that uses SQLite
1 # sqlitestore.py - Storage backend that uses SQLite
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """store repository data in SQLite (EXPERIMENTAL)
8 """store repository data in SQLite (EXPERIMENTAL)
9
9
10 The sqlitestore extension enables the storage of repository data in SQLite.
10 The sqlitestore extension enables the storage of repository data in SQLite.
11
11
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
13 GUARANTEES. This means that repositories created with this extension may
13 GUARANTEES. This means that repositories created with this extension may
14 only be usable with the exact version of this extension/Mercurial that was
14 only be usable with the exact version of this extension/Mercurial that was
15 used. The extension attempts to enforce this in order to prevent repository
15 used. The extension attempts to enforce this in order to prevent repository
16 corruption.
16 corruption.
17
17
18 In addition, several features are not yet supported or have known bugs:
18 In addition, several features are not yet supported or have known bugs:
19
19
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
21 data is not yet stored in SQLite.
21 data is not yet stored in SQLite.
22 * Transactions are not robust. If the process is aborted at the right time
22 * Transactions are not robust. If the process is aborted at the right time
23 during transaction close/rollback, the repository could be in an inconsistent
23 during transaction close/rollback, the repository could be in an inconsistent
24 state. This problem will diminish once all repository data is tracked by
24 state. This problem will diminish once all repository data is tracked by
25 SQLite.
25 SQLite.
26 * Bundle repositories do not work (the ability to use e.g.
26 * Bundle repositories do not work (the ability to use e.g.
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
28 existing repository).
28 existing repository).
29 * Various other features don't work.
29 * Various other features don't work.
30
30
31 This extension should work for basic clone/pull, update, and commit workflows.
31 This extension should work for basic clone/pull, update, and commit workflows.
32 Some history rewriting operations may fail due to lack of support for bundle
32 Some history rewriting operations may fail due to lack of support for bundle
33 repositories.
33 repositories.
34
34
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
37 """
37 """
38
38
39 # To run the test suite with repos using SQLite by default, execute the
39 # To run the test suite with repos using SQLite by default, execute the
40 # following:
40 # following:
41 #
41 #
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
43 # --extra-config-opt extensions.sqlitestore= \
43 # --extra-config-opt extensions.sqlitestore= \
44 # --extra-config-opt storage.new-repo-backend=sqlite
44 # --extra-config-opt storage.new-repo-backend=sqlite
45
45
46 from __future__ import absolute_import
46 from __future__ import absolute_import
47
47
48 import hashlib
48 import hashlib
49 import sqlite3
49 import sqlite3
50 import struct
50 import struct
51 import threading
51 import threading
52 import zlib
52 import zlib
53
53
54 from mercurial.i18n import _
54 from mercurial.i18n import _
55 from mercurial.node import (
55 from mercurial.node import (
56 nullid,
56 nullid,
57 nullrev,
57 nullrev,
58 short,
58 short,
59 )
59 )
60 from mercurial.thirdparty import (
60 from mercurial.thirdparty import (
61 attr,
61 attr,
62 )
62 )
63 from mercurial import (
63 from mercurial import (
64 ancestor,
64 ancestor,
65 dagop,
65 dagop,
66 error,
66 error,
67 extensions,
67 extensions,
68 localrepo,
68 localrepo,
69 mdiff,
69 mdiff,
70 pycompat,
70 pycompat,
71 registrar,
71 registrar,
72 repository,
72 repository,
73 util,
73 util,
74 verify,
74 verify,
75 )
75 )
76 from mercurial.utils import (
76 from mercurial.utils import (
77 interfaceutil,
77 interfaceutil,
78 storageutil,
78 storageutil,
79 )
79 )
80
80
81 try:
81 try:
82 from mercurial import zstd
82 from mercurial import zstd
83 zstd.__version__
83 zstd.__version__
84 except ImportError:
84 except ImportError:
85 zstd = None
85 zstd = None
86
86
87 configtable = {}
87 configtable = {}
88 configitem = registrar.configitem(configtable)
88 configitem = registrar.configitem(configtable)
89
89
90 # experimental config: storage.sqlite.compression
90 # experimental config: storage.sqlite.compression
91 configitem('storage', 'sqlite.compression',
91 configitem('storage', 'sqlite.compression',
92 default='zstd' if zstd else 'zlib')
92 default='zstd' if zstd else 'zlib')
93
93
94 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
94 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
95 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
95 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
96 # be specifying the version(s) of Mercurial they are tested with, or
96 # be specifying the version(s) of Mercurial they are tested with, or
97 # leave the attribute unspecified.
97 # leave the attribute unspecified.
98 testedwith = 'ships-with-hg-core'
98 testedwith = 'ships-with-hg-core'
99
99
100 REQUIREMENT = b'exp-sqlite-001'
100 REQUIREMENT = b'exp-sqlite-001'
101 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
101 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
102 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
102 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
103 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
103 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
104 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
104
105
105 CURRENT_SCHEMA_VERSION = 1
106 CURRENT_SCHEMA_VERSION = 1
106
107
107 COMPRESSION_NONE = 1
108 COMPRESSION_NONE = 1
108 COMPRESSION_ZSTD = 2
109 COMPRESSION_ZSTD = 2
109 COMPRESSION_ZLIB = 3
110 COMPRESSION_ZLIB = 3
110
111
111 FLAG_CENSORED = 1
112 FLAG_CENSORED = 1
112
113
113 CREATE_SCHEMA = [
114 CREATE_SCHEMA = [
114 # Deltas are stored as content-indexed blobs.
115 # Deltas are stored as content-indexed blobs.
115 # compression column holds COMPRESSION_* constant for how the
116 # compression column holds COMPRESSION_* constant for how the
116 # delta is encoded.
117 # delta is encoded.
117
118
118 r'CREATE TABLE delta ('
119 r'CREATE TABLE delta ('
119 r' id INTEGER PRIMARY KEY, '
120 r' id INTEGER PRIMARY KEY, '
120 r' compression INTEGER NOT NULL, '
121 r' compression INTEGER NOT NULL, '
121 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
122 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
122 r' delta BLOB NOT NULL '
123 r' delta BLOB NOT NULL '
123 r')',
124 r')',
124
125
125 # Tracked paths are denormalized to integers to avoid redundant
126 # Tracked paths are denormalized to integers to avoid redundant
126 # storage of the path name.
127 # storage of the path name.
127 r'CREATE TABLE filepath ('
128 r'CREATE TABLE filepath ('
128 r' id INTEGER PRIMARY KEY, '
129 r' id INTEGER PRIMARY KEY, '
129 r' path BLOB NOT NULL '
130 r' path BLOB NOT NULL '
130 r')',
131 r')',
131
132
132 r'CREATE UNIQUE INDEX filepath_path '
133 r'CREATE UNIQUE INDEX filepath_path '
133 r' ON filepath (path)',
134 r' ON filepath (path)',
134
135
135 # We have a single table for all file revision data.
136 # We have a single table for all file revision data.
136 # Each file revision is uniquely described by a (path, rev) and
137 # Each file revision is uniquely described by a (path, rev) and
137 # (path, node).
138 # (path, node).
138 #
139 #
139 # Revision data is stored as a pointer to the delta producing this
140 # Revision data is stored as a pointer to the delta producing this
140 # revision and the file revision whose delta should be applied before
141 # revision and the file revision whose delta should be applied before
141 # that one. One can reconstruct the delta chain by recursively following
142 # that one. One can reconstruct the delta chain by recursively following
142 # the delta base revision pointers until one encounters NULL.
143 # the delta base revision pointers until one encounters NULL.
143 #
144 #
144 # flags column holds bitwise integer flags controlling storage options.
145 # flags column holds bitwise integer flags controlling storage options.
145 # These flags are defined by the FLAG_* constants.
146 # These flags are defined by the FLAG_* constants.
146 r'CREATE TABLE fileindex ('
147 r'CREATE TABLE fileindex ('
147 r' id INTEGER PRIMARY KEY, '
148 r' id INTEGER PRIMARY KEY, '
148 r' pathid INTEGER REFERENCES filepath(id), '
149 r' pathid INTEGER REFERENCES filepath(id), '
149 r' revnum INTEGER NOT NULL, '
150 r' revnum INTEGER NOT NULL, '
150 r' p1rev INTEGER NOT NULL, '
151 r' p1rev INTEGER NOT NULL, '
151 r' p2rev INTEGER NOT NULL, '
152 r' p2rev INTEGER NOT NULL, '
152 r' linkrev INTEGER NOT NULL, '
153 r' linkrev INTEGER NOT NULL, '
153 r' flags INTEGER NOT NULL, '
154 r' flags INTEGER NOT NULL, '
154 r' deltaid INTEGER REFERENCES delta(id), '
155 r' deltaid INTEGER REFERENCES delta(id), '
155 r' deltabaseid INTEGER REFERENCES fileindex(id), '
156 r' deltabaseid INTEGER REFERENCES fileindex(id), '
156 r' node BLOB NOT NULL '
157 r' node BLOB NOT NULL '
157 r')',
158 r')',
158
159
159 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
160 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
160 r' ON fileindex (pathid, revnum)',
161 r' ON fileindex (pathid, revnum)',
161
162
162 r'CREATE UNIQUE INDEX fileindex_pathnode '
163 r'CREATE UNIQUE INDEX fileindex_pathnode '
163 r' ON fileindex (pathid, node)',
164 r' ON fileindex (pathid, node)',
164
165
165 # Provide a view over all file data for convenience.
166 # Provide a view over all file data for convenience.
166 r'CREATE VIEW filedata AS '
167 r'CREATE VIEW filedata AS '
167 r'SELECT '
168 r'SELECT '
168 r' fileindex.id AS id, '
169 r' fileindex.id AS id, '
169 r' filepath.id AS pathid, '
170 r' filepath.id AS pathid, '
170 r' filepath.path AS path, '
171 r' filepath.path AS path, '
171 r' fileindex.revnum AS revnum, '
172 r' fileindex.revnum AS revnum, '
172 r' fileindex.node AS node, '
173 r' fileindex.node AS node, '
173 r' fileindex.p1rev AS p1rev, '
174 r' fileindex.p1rev AS p1rev, '
174 r' fileindex.p2rev AS p2rev, '
175 r' fileindex.p2rev AS p2rev, '
175 r' fileindex.linkrev AS linkrev, '
176 r' fileindex.linkrev AS linkrev, '
176 r' fileindex.flags AS flags, '
177 r' fileindex.flags AS flags, '
177 r' fileindex.deltaid AS deltaid, '
178 r' fileindex.deltaid AS deltaid, '
178 r' fileindex.deltabaseid AS deltabaseid '
179 r' fileindex.deltabaseid AS deltabaseid '
179 r'FROM filepath, fileindex '
180 r'FROM filepath, fileindex '
180 r'WHERE fileindex.pathid=filepath.id',
181 r'WHERE fileindex.pathid=filepath.id',
181
182
182 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
183 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
183 ]
184 ]
184
185
185 def resolvedeltachain(db, pathid, node, revisioncache,
186 def resolvedeltachain(db, pathid, node, revisioncache,
186 stoprids, zstddctx=None):
187 stoprids, zstddctx=None):
187 """Resolve a delta chain for a file node."""
188 """Resolve a delta chain for a file node."""
188
189
189 # TODO the "not in ({stops})" here is possibly slowing down the query
190 # TODO the "not in ({stops})" here is possibly slowing down the query
190 # because it needs to perform the lookup on every recursive invocation.
191 # because it needs to perform the lookup on every recursive invocation.
191 # This could possibly be faster if we created a temporary query with
192 # This could possibly be faster if we created a temporary query with
192 # baseid "poisoned" to null and limited the recursive filter to
193 # baseid "poisoned" to null and limited the recursive filter to
193 # "is not null".
194 # "is not null".
194 res = db.execute(
195 res = db.execute(
195 r'WITH RECURSIVE '
196 r'WITH RECURSIVE '
196 r' deltachain(deltaid, baseid) AS ('
197 r' deltachain(deltaid, baseid) AS ('
197 r' SELECT deltaid, deltabaseid FROM fileindex '
198 r' SELECT deltaid, deltabaseid FROM fileindex '
198 r' WHERE pathid=? AND node=? '
199 r' WHERE pathid=? AND node=? '
199 r' UNION ALL '
200 r' UNION ALL '
200 r' SELECT fileindex.deltaid, deltabaseid '
201 r' SELECT fileindex.deltaid, deltabaseid '
201 r' FROM fileindex, deltachain '
202 r' FROM fileindex, deltachain '
202 r' WHERE '
203 r' WHERE '
203 r' fileindex.id=deltachain.baseid '
204 r' fileindex.id=deltachain.baseid '
204 r' AND deltachain.baseid IS NOT NULL '
205 r' AND deltachain.baseid IS NOT NULL '
205 r' AND fileindex.id NOT IN ({stops}) '
206 r' AND fileindex.id NOT IN ({stops}) '
206 r' ) '
207 r' ) '
207 r'SELECT deltachain.baseid, compression, delta '
208 r'SELECT deltachain.baseid, compression, delta '
208 r'FROM deltachain, delta '
209 r'FROM deltachain, delta '
209 r'WHERE delta.id=deltachain.deltaid'.format(
210 r'WHERE delta.id=deltachain.deltaid'.format(
210 stops=r','.join([r'?'] * len(stoprids))),
211 stops=r','.join([r'?'] * len(stoprids))),
211 tuple([pathid, node] + list(stoprids.keys())))
212 tuple([pathid, node] + list(stoprids.keys())))
212
213
213 deltas = []
214 deltas = []
214 lastdeltabaseid = None
215 lastdeltabaseid = None
215
216
216 for deltabaseid, compression, delta in res:
217 for deltabaseid, compression, delta in res:
217 lastdeltabaseid = deltabaseid
218 lastdeltabaseid = deltabaseid
218
219
219 if compression == COMPRESSION_ZSTD:
220 if compression == COMPRESSION_ZSTD:
220 delta = zstddctx.decompress(delta)
221 delta = zstddctx.decompress(delta)
221 elif compression == COMPRESSION_NONE:
222 elif compression == COMPRESSION_NONE:
222 delta = delta
223 delta = delta
223 elif compression == COMPRESSION_ZLIB:
224 elif compression == COMPRESSION_ZLIB:
224 delta = zlib.decompress(delta)
225 delta = zlib.decompress(delta)
225 else:
226 else:
226 raise SQLiteStoreError('unhandled compression type: %d' %
227 raise SQLiteStoreError('unhandled compression type: %d' %
227 compression)
228 compression)
228
229
229 deltas.append(delta)
230 deltas.append(delta)
230
231
231 if lastdeltabaseid in stoprids:
232 if lastdeltabaseid in stoprids:
232 basetext = revisioncache[stoprids[lastdeltabaseid]]
233 basetext = revisioncache[stoprids[lastdeltabaseid]]
233 else:
234 else:
234 basetext = deltas.pop()
235 basetext = deltas.pop()
235
236
236 deltas.reverse()
237 deltas.reverse()
237 fulltext = mdiff.patches(basetext, deltas)
238 fulltext = mdiff.patches(basetext, deltas)
238
239
239 # SQLite returns buffer instances for blob columns on Python 2. This
240 # SQLite returns buffer instances for blob columns on Python 2. This
240 # type can propagate through the delta application layer. Because
241 # type can propagate through the delta application layer. Because
241 # downstream callers assume revisions are bytes, cast as needed.
242 # downstream callers assume revisions are bytes, cast as needed.
242 if not isinstance(fulltext, bytes):
243 if not isinstance(fulltext, bytes):
243 fulltext = bytes(delta)
244 fulltext = bytes(delta)
244
245
245 return fulltext
246 return fulltext
246
247
247 def insertdelta(db, compression, hash, delta):
248 def insertdelta(db, compression, hash, delta):
248 try:
249 try:
249 return db.execute(
250 return db.execute(
250 r'INSERT INTO delta (compression, hash, delta) '
251 r'INSERT INTO delta (compression, hash, delta) '
251 r'VALUES (?, ?, ?)',
252 r'VALUES (?, ?, ?)',
252 (compression, hash, delta)).lastrowid
253 (compression, hash, delta)).lastrowid
253 except sqlite3.IntegrityError:
254 except sqlite3.IntegrityError:
254 return db.execute(
255 return db.execute(
255 r'SELECT id FROM delta WHERE hash=?',
256 r'SELECT id FROM delta WHERE hash=?',
256 (hash,)).fetchone()[0]
257 (hash,)).fetchone()[0]
257
258
258 class SQLiteStoreError(error.StorageError):
259 class SQLiteStoreError(error.StorageError):
259 pass
260 pass
260
261
261 @attr.s
262 @attr.s
262 class revisionentry(object):
263 class revisionentry(object):
263 rid = attr.ib()
264 rid = attr.ib()
264 rev = attr.ib()
265 rev = attr.ib()
265 node = attr.ib()
266 node = attr.ib()
266 p1rev = attr.ib()
267 p1rev = attr.ib()
267 p2rev = attr.ib()
268 p2rev = attr.ib()
268 p1node = attr.ib()
269 p1node = attr.ib()
269 p2node = attr.ib()
270 p2node = attr.ib()
270 linkrev = attr.ib()
271 linkrev = attr.ib()
271 flags = attr.ib()
272 flags = attr.ib()
272
273
273 @interfaceutil.implementer(repository.irevisiondelta)
274 @interfaceutil.implementer(repository.irevisiondelta)
274 @attr.s(slots=True)
275 @attr.s(slots=True)
275 class sqliterevisiondelta(object):
276 class sqliterevisiondelta(object):
276 node = attr.ib()
277 node = attr.ib()
277 p1node = attr.ib()
278 p1node = attr.ib()
278 p2node = attr.ib()
279 p2node = attr.ib()
279 basenode = attr.ib()
280 basenode = attr.ib()
280 flags = attr.ib()
281 flags = attr.ib()
281 baserevisionsize = attr.ib()
282 baserevisionsize = attr.ib()
282 revision = attr.ib()
283 revision = attr.ib()
283 delta = attr.ib()
284 delta = attr.ib()
284 linknode = attr.ib(default=None)
285 linknode = attr.ib(default=None)
285
286
286 @interfaceutil.implementer(repository.iverifyproblem)
287 @interfaceutil.implementer(repository.iverifyproblem)
287 @attr.s(frozen=True)
288 @attr.s(frozen=True)
288 class sqliteproblem(object):
289 class sqliteproblem(object):
289 warning = attr.ib(default=None)
290 warning = attr.ib(default=None)
290 error = attr.ib(default=None)
291 error = attr.ib(default=None)
291 node = attr.ib(default=None)
292 node = attr.ib(default=None)
292
293
293 @interfaceutil.implementer(repository.ifilestorage)
294 @interfaceutil.implementer(repository.ifilestorage)
294 class sqlitefilestore(object):
295 class sqlitefilestore(object):
295 """Implements storage for an individual tracked path."""
296 """Implements storage for an individual tracked path."""
296
297
297 def __init__(self, db, path, compression):
298 def __init__(self, db, path, compression):
298 self._db = db
299 self._db = db
299 self._path = path
300 self._path = path
300
301
301 self._pathid = None
302 self._pathid = None
302
303
303 # revnum -> node
304 # revnum -> node
304 self._revtonode = {}
305 self._revtonode = {}
305 # node -> revnum
306 # node -> revnum
306 self._nodetorev = {}
307 self._nodetorev = {}
307 # node -> data structure
308 # node -> data structure
308 self._revisions = {}
309 self._revisions = {}
309
310
310 self._revisioncache = util.lrucachedict(10)
311 self._revisioncache = util.lrucachedict(10)
311
312
312 self._compengine = compression
313 self._compengine = compression
313
314
314 if compression == 'zstd':
315 if compression == 'zstd':
315 self._cctx = zstd.ZstdCompressor(level=3)
316 self._cctx = zstd.ZstdCompressor(level=3)
316 self._dctx = zstd.ZstdDecompressor()
317 self._dctx = zstd.ZstdDecompressor()
317 else:
318 else:
318 self._cctx = None
319 self._cctx = None
319 self._dctx = None
320 self._dctx = None
320
321
321 self._refreshindex()
322 self._refreshindex()
322
323
323 def _refreshindex(self):
324 def _refreshindex(self):
324 self._revtonode = {}
325 self._revtonode = {}
325 self._nodetorev = {}
326 self._nodetorev = {}
326 self._revisions = {}
327 self._revisions = {}
327
328
328 res = list(self._db.execute(
329 res = list(self._db.execute(
329 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
330 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
330
331
331 if not res:
332 if not res:
332 self._pathid = None
333 self._pathid = None
333 return
334 return
334
335
335 self._pathid = res[0][0]
336 self._pathid = res[0][0]
336
337
337 res = self._db.execute(
338 res = self._db.execute(
338 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
339 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
339 r'FROM fileindex '
340 r'FROM fileindex '
340 r'WHERE pathid=? '
341 r'WHERE pathid=? '
341 r'ORDER BY revnum ASC',
342 r'ORDER BY revnum ASC',
342 (self._pathid,))
343 (self._pathid,))
343
344
344 for i, row in enumerate(res):
345 for i, row in enumerate(res):
345 rid, rev, node, p1rev, p2rev, linkrev, flags = row
346 rid, rev, node, p1rev, p2rev, linkrev, flags = row
346
347
347 if i != rev:
348 if i != rev:
348 raise SQLiteStoreError(_('sqlite database has inconsistent '
349 raise SQLiteStoreError(_('sqlite database has inconsistent '
349 'revision numbers'))
350 'revision numbers'))
350
351
351 if p1rev == nullrev:
352 if p1rev == nullrev:
352 p1node = nullid
353 p1node = nullid
353 else:
354 else:
354 p1node = self._revtonode[p1rev]
355 p1node = self._revtonode[p1rev]
355
356
356 if p2rev == nullrev:
357 if p2rev == nullrev:
357 p2node = nullid
358 p2node = nullid
358 else:
359 else:
359 p2node = self._revtonode[p2rev]
360 p2node = self._revtonode[p2rev]
360
361
361 entry = revisionentry(
362 entry = revisionentry(
362 rid=rid,
363 rid=rid,
363 rev=rev,
364 rev=rev,
364 node=node,
365 node=node,
365 p1rev=p1rev,
366 p1rev=p1rev,
366 p2rev=p2rev,
367 p2rev=p2rev,
367 p1node=p1node,
368 p1node=p1node,
368 p2node=p2node,
369 p2node=p2node,
369 linkrev=linkrev,
370 linkrev=linkrev,
370 flags=flags)
371 flags=flags)
371
372
372 self._revtonode[rev] = node
373 self._revtonode[rev] = node
373 self._nodetorev[node] = rev
374 self._nodetorev[node] = rev
374 self._revisions[node] = entry
375 self._revisions[node] = entry
375
376
376 # Start of ifileindex interface.
377 # Start of ifileindex interface.
377
378
378 def __len__(self):
379 def __len__(self):
379 return len(self._revisions)
380 return len(self._revisions)
380
381
381 def __iter__(self):
382 def __iter__(self):
382 return iter(pycompat.xrange(len(self._revisions)))
383 return iter(pycompat.xrange(len(self._revisions)))
383
384
384 def hasnode(self, node):
385 def hasnode(self, node):
385 if node == nullid:
386 if node == nullid:
386 return False
387 return False
387
388
388 return node in self._nodetorev
389 return node in self._nodetorev
389
390
390 def revs(self, start=0, stop=None):
391 def revs(self, start=0, stop=None):
391 return storageutil.iterrevs(len(self._revisions), start=start,
392 return storageutil.iterrevs(len(self._revisions), start=start,
392 stop=stop)
393 stop=stop)
393
394
394 def parents(self, node):
395 def parents(self, node):
395 if node == nullid:
396 if node == nullid:
396 return nullid, nullid
397 return nullid, nullid
397
398
398 if node not in self._revisions:
399 if node not in self._revisions:
399 raise error.LookupError(node, self._path, _('no node'))
400 raise error.LookupError(node, self._path, _('no node'))
400
401
401 entry = self._revisions[node]
402 entry = self._revisions[node]
402 return entry.p1node, entry.p2node
403 return entry.p1node, entry.p2node
403
404
404 def parentrevs(self, rev):
405 def parentrevs(self, rev):
405 if rev == nullrev:
406 if rev == nullrev:
406 return nullrev, nullrev
407 return nullrev, nullrev
407
408
408 if rev not in self._revtonode:
409 if rev not in self._revtonode:
409 raise IndexError(rev)
410 raise IndexError(rev)
410
411
411 entry = self._revisions[self._revtonode[rev]]
412 entry = self._revisions[self._revtonode[rev]]
412 return entry.p1rev, entry.p2rev
413 return entry.p1rev, entry.p2rev
413
414
414 def rev(self, node):
415 def rev(self, node):
415 if node == nullid:
416 if node == nullid:
416 return nullrev
417 return nullrev
417
418
418 if node not in self._nodetorev:
419 if node not in self._nodetorev:
419 raise error.LookupError(node, self._path, _('no node'))
420 raise error.LookupError(node, self._path, _('no node'))
420
421
421 return self._nodetorev[node]
422 return self._nodetorev[node]
422
423
423 def node(self, rev):
424 def node(self, rev):
424 if rev == nullrev:
425 if rev == nullrev:
425 return nullid
426 return nullid
426
427
427 if rev not in self._revtonode:
428 if rev not in self._revtonode:
428 raise IndexError(rev)
429 raise IndexError(rev)
429
430
430 return self._revtonode[rev]
431 return self._revtonode[rev]
431
432
432 def lookup(self, node):
433 def lookup(self, node):
433 return storageutil.fileidlookup(self, node, self._path)
434 return storageutil.fileidlookup(self, node, self._path)
434
435
435 def linkrev(self, rev):
436 def linkrev(self, rev):
436 if rev == nullrev:
437 if rev == nullrev:
437 return nullrev
438 return nullrev
438
439
439 if rev not in self._revtonode:
440 if rev not in self._revtonode:
440 raise IndexError(rev)
441 raise IndexError(rev)
441
442
442 entry = self._revisions[self._revtonode[rev]]
443 entry = self._revisions[self._revtonode[rev]]
443 return entry.linkrev
444 return entry.linkrev
444
445
445 def iscensored(self, rev):
446 def iscensored(self, rev):
446 if rev == nullrev:
447 if rev == nullrev:
447 return False
448 return False
448
449
449 if rev not in self._revtonode:
450 if rev not in self._revtonode:
450 raise IndexError(rev)
451 raise IndexError(rev)
451
452
452 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
453 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
453
454
454 def commonancestorsheads(self, node1, node2):
455 def commonancestorsheads(self, node1, node2):
455 rev1 = self.rev(node1)
456 rev1 = self.rev(node1)
456 rev2 = self.rev(node2)
457 rev2 = self.rev(node2)
457
458
458 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
459 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
459 return pycompat.maplist(self.node, ancestors)
460 return pycompat.maplist(self.node, ancestors)
460
461
461 def descendants(self, revs):
462 def descendants(self, revs):
462 # TODO we could implement this using a recursive SQL query, which
463 # TODO we could implement this using a recursive SQL query, which
463 # might be faster.
464 # might be faster.
464 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
465 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
465
466
466 def heads(self, start=None, stop=None):
467 def heads(self, start=None, stop=None):
467 if start is None and stop is None:
468 if start is None and stop is None:
468 if not len(self):
469 if not len(self):
469 return [nullid]
470 return [nullid]
470
471
471 startrev = self.rev(start) if start is not None else nullrev
472 startrev = self.rev(start) if start is not None else nullrev
472 stoprevs = {self.rev(n) for n in stop or []}
473 stoprevs = {self.rev(n) for n in stop or []}
473
474
474 revs = dagop.headrevssubset(self.revs, self.parentrevs,
475 revs = dagop.headrevssubset(self.revs, self.parentrevs,
475 startrev=startrev, stoprevs=stoprevs)
476 startrev=startrev, stoprevs=stoprevs)
476
477
477 return [self.node(rev) for rev in revs]
478 return [self.node(rev) for rev in revs]
478
479
479 def children(self, node):
480 def children(self, node):
480 rev = self.rev(node)
481 rev = self.rev(node)
481
482
482 res = self._db.execute(
483 res = self._db.execute(
483 r'SELECT'
484 r'SELECT'
484 r' node '
485 r' node '
485 r' FROM filedata '
486 r' FROM filedata '
486 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
487 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
487 r' ORDER BY revnum ASC',
488 r' ORDER BY revnum ASC',
488 (self._path, rev, rev))
489 (self._path, rev, rev))
489
490
490 return [row[0] for row in res]
491 return [row[0] for row in res]
491
492
492 # End of ifileindex interface.
493 # End of ifileindex interface.
493
494
494 # Start of ifiledata interface.
495 # Start of ifiledata interface.
495
496
496 def size(self, rev):
497 def size(self, rev):
497 if rev == nullrev:
498 if rev == nullrev:
498 return 0
499 return 0
499
500
500 if rev not in self._revtonode:
501 if rev not in self._revtonode:
501 raise IndexError(rev)
502 raise IndexError(rev)
502
503
503 node = self._revtonode[rev]
504 node = self._revtonode[rev]
504
505
505 if self.renamed(node):
506 if self.renamed(node):
506 return len(self.read(node))
507 return len(self.read(node))
507
508
508 return len(self.revision(node))
509 return len(self.revision(node))
509
510
510 def revision(self, node, raw=False, _verifyhash=True):
511 def revision(self, node, raw=False, _verifyhash=True):
511 if node in (nullid, nullrev):
512 if node in (nullid, nullrev):
512 return b''
513 return b''
513
514
514 if isinstance(node, int):
515 if isinstance(node, int):
515 node = self.node(node)
516 node = self.node(node)
516
517
517 if node not in self._nodetorev:
518 if node not in self._nodetorev:
518 raise error.LookupError(node, self._path, _('no node'))
519 raise error.LookupError(node, self._path, _('no node'))
519
520
520 if node in self._revisioncache:
521 if node in self._revisioncache:
521 return self._revisioncache[node]
522 return self._revisioncache[node]
522
523
523 # Because we have a fulltext revision cache, we are able to
524 # Because we have a fulltext revision cache, we are able to
524 # short-circuit delta chain traversal and decompression as soon as
525 # short-circuit delta chain traversal and decompression as soon as
525 # we encounter a revision in the cache.
526 # we encounter a revision in the cache.
526
527
527 stoprids = {self._revisions[n].rid: n
528 stoprids = {self._revisions[n].rid: n
528 for n in self._revisioncache}
529 for n in self._revisioncache}
529
530
530 if not stoprids:
531 if not stoprids:
531 stoprids[-1] = None
532 stoprids[-1] = None
532
533
533 fulltext = resolvedeltachain(self._db, self._pathid, node,
534 fulltext = resolvedeltachain(self._db, self._pathid, node,
534 self._revisioncache, stoprids,
535 self._revisioncache, stoprids,
535 zstddctx=self._dctx)
536 zstddctx=self._dctx)
536
537
537 if _verifyhash:
538 if _verifyhash:
538 self._checkhash(fulltext, node)
539 self._checkhash(fulltext, node)
539 self._revisioncache[node] = fulltext
540 self._revisioncache[node] = fulltext
540
541
541 return fulltext
542 return fulltext
542
543
543 def read(self, node):
544 def read(self, node):
544 return storageutil.filtermetadata(self.revision(node))
545 return storageutil.filtermetadata(self.revision(node))
545
546
546 def renamed(self, node):
547 def renamed(self, node):
547 return storageutil.filerevisioncopied(self, node)
548 return storageutil.filerevisioncopied(self, node)
548
549
549 def cmp(self, node, fulltext):
550 def cmp(self, node, fulltext):
550 return not storageutil.filedataequivalent(self, node, fulltext)
551 return not storageutil.filedataequivalent(self, node, fulltext)
551
552
552 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
553 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
553 assumehaveparentrevisions=False, deltaprevious=False):
554 assumehaveparentrevisions=False, deltaprevious=False):
554 if nodesorder not in ('nodes', 'storage', None):
555 if nodesorder not in ('nodes', 'storage', None):
555 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
556 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
556 nodesorder)
557 nodesorder)
557
558
558 nodes = [n for n in nodes if n != nullid]
559 nodes = [n for n in nodes if n != nullid]
559
560
560 if not nodes:
561 if not nodes:
561 return
562 return
562
563
563 # TODO perform in a single query.
564 # TODO perform in a single query.
564 res = self._db.execute(
565 res = self._db.execute(
565 r'SELECT revnum, deltaid FROM fileindex '
566 r'SELECT revnum, deltaid FROM fileindex '
566 r'WHERE pathid=? '
567 r'WHERE pathid=? '
567 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
568 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
568 tuple([self._pathid] + nodes))
569 tuple([self._pathid] + nodes))
569
570
570 deltabases = {}
571 deltabases = {}
571
572
572 for rev, deltaid in res:
573 for rev, deltaid in res:
573 res = self._db.execute(
574 res = self._db.execute(
574 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
575 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
575 (self._pathid, deltaid))
576 (self._pathid, deltaid))
576 deltabases[rev] = res.fetchone()[0]
577 deltabases[rev] = res.fetchone()[0]
577
578
578 # TODO define revdifffn so we can use delta from storage.
579 # TODO define revdifffn so we can use delta from storage.
579 for delta in storageutil.emitrevisions(
580 for delta in storageutil.emitrevisions(
580 self, nodes, nodesorder, sqliterevisiondelta,
581 self, nodes, nodesorder, sqliterevisiondelta,
581 deltaparentfn=deltabases.__getitem__,
582 deltaparentfn=deltabases.__getitem__,
582 revisiondata=revisiondata,
583 revisiondata=revisiondata,
583 assumehaveparentrevisions=assumehaveparentrevisions,
584 assumehaveparentrevisions=assumehaveparentrevisions,
584 deltaprevious=deltaprevious):
585 deltaprevious=deltaprevious):
585
586
586 yield delta
587 yield delta
587
588
588 # End of ifiledata interface.
589 # End of ifiledata interface.
589
590
590 # Start of ifilemutation interface.
591 # Start of ifilemutation interface.
591
592
592 def add(self, filedata, meta, transaction, linkrev, p1, p2):
593 def add(self, filedata, meta, transaction, linkrev, p1, p2):
593 if meta or filedata.startswith(b'\x01\n'):
594 if meta or filedata.startswith(b'\x01\n'):
594 filedata = storageutil.packmeta(meta, filedata)
595 filedata = storageutil.packmeta(meta, filedata)
595
596
596 return self.addrevision(filedata, transaction, linkrev, p1, p2)
597 return self.addrevision(filedata, transaction, linkrev, p1, p2)
597
598
598 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
599 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
599 flags=0, cachedelta=None):
600 flags=0, cachedelta=None):
600 if flags:
601 if flags:
601 raise SQLiteStoreError(_('flags not supported on revisions'))
602 raise SQLiteStoreError(_('flags not supported on revisions'))
602
603
603 validatehash = node is not None
604 validatehash = node is not None
604 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
605 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
605
606
606 if validatehash:
607 if validatehash:
607 self._checkhash(revisiondata, node, p1, p2)
608 self._checkhash(revisiondata, node, p1, p2)
608
609
609 if node in self._nodetorev:
610 if node in self._nodetorev:
610 return node
611 return node
611
612
612 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
613 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
613 p1, p2)
614 p1, p2)
614
615
615 self._revisioncache[node] = revisiondata
616 self._revisioncache[node] = revisiondata
616 return node
617 return node
617
618
618 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
619 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
619 maybemissingparents=False):
620 maybemissingparents=False):
620 if maybemissingparents:
621 if maybemissingparents:
621 raise error.Abort(_('SQLite storage does not support missing '
622 raise error.Abort(_('SQLite storage does not support missing '
622 'parents write mode'))
623 'parents write mode'))
623
624
624 nodes = []
625 nodes = []
625
626
626 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
627 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
627 storeflags = 0
628 storeflags = 0
628
629
629 if wireflags & repository.REVISION_FLAG_CENSORED:
630 if wireflags & repository.REVISION_FLAG_CENSORED:
630 storeflags |= FLAG_CENSORED
631 storeflags |= FLAG_CENSORED
631
632
632 if wireflags & ~repository.REVISION_FLAG_CENSORED:
633 if wireflags & ~repository.REVISION_FLAG_CENSORED:
633 raise SQLiteStoreError('unhandled revision flag')
634 raise SQLiteStoreError('unhandled revision flag')
634
635
635 baserev = self.rev(deltabase)
636 baserev = self.rev(deltabase)
636
637
637 # If base is censored, delta must be full replacement in a single
638 # If base is censored, delta must be full replacement in a single
638 # patch operation.
639 # patch operation.
639 if baserev != nullrev and self.iscensored(baserev):
640 if baserev != nullrev and self.iscensored(baserev):
640 hlen = struct.calcsize('>lll')
641 hlen = struct.calcsize('>lll')
641 oldlen = len(self.revision(deltabase, raw=True,
642 oldlen = len(self.revision(deltabase, raw=True,
642 _verifyhash=False))
643 _verifyhash=False))
643 newlen = len(delta) - hlen
644 newlen = len(delta) - hlen
644
645
645 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
646 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
646 raise error.CensoredBaseError(self._path,
647 raise error.CensoredBaseError(self._path,
647 deltabase)
648 deltabase)
648
649
649 if (not (storeflags & FLAG_CENSORED)
650 if (not (storeflags & FLAG_CENSORED)
650 and storageutil.deltaiscensored(
651 and storageutil.deltaiscensored(
651 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
652 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
652 storeflags |= FLAG_CENSORED
653 storeflags |= FLAG_CENSORED
653
654
654 linkrev = linkmapper(linknode)
655 linkrev = linkmapper(linknode)
655
656
656 nodes.append(node)
657 nodes.append(node)
657
658
658 if node in self._revisions:
659 if node in self._revisions:
659 continue
660 continue
660
661
661 if deltabase == nullid:
662 if deltabase == nullid:
662 text = mdiff.patch(b'', delta)
663 text = mdiff.patch(b'', delta)
663 storedelta = None
664 storedelta = None
664 else:
665 else:
665 text = None
666 text = None
666 storedelta = (deltabase, delta)
667 storedelta = (deltabase, delta)
667
668
668 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
669 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
669 storedelta=storedelta, flags=storeflags)
670 storedelta=storedelta, flags=storeflags)
670
671
671 if addrevisioncb:
672 if addrevisioncb:
672 addrevisioncb(self, node)
673 addrevisioncb(self, node)
673
674
674 return nodes
675 return nodes
675
676
676 def censorrevision(self, tr, censornode, tombstone=b''):
677 def censorrevision(self, tr, censornode, tombstone=b''):
677 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
678 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
678
679
679 # This restriction is cargo culted from revlogs and makes no sense for
680 # This restriction is cargo culted from revlogs and makes no sense for
680 # SQLite, since columns can be resized at will.
681 # SQLite, since columns can be resized at will.
681 if len(tombstone) > len(self.revision(censornode, raw=True)):
682 if len(tombstone) > len(self.revision(censornode, raw=True)):
682 raise error.Abort(_('censor tombstone must be no longer than '
683 raise error.Abort(_('censor tombstone must be no longer than '
683 'censored data'))
684 'censored data'))
684
685
685 # We need to replace the censored revision's data with the tombstone.
686 # We need to replace the censored revision's data with the tombstone.
686 # But replacing that data will have implications for delta chains that
687 # But replacing that data will have implications for delta chains that
687 # reference it.
688 # reference it.
688 #
689 #
689 # While "better," more complex strategies are possible, we do something
690 # While "better," more complex strategies are possible, we do something
690 # simple: we find delta chain children of the censored revision and we
691 # simple: we find delta chain children of the censored revision and we
691 # replace those incremental deltas with fulltexts of their corresponding
692 # replace those incremental deltas with fulltexts of their corresponding
692 # revision. Then we delete the now-unreferenced delta and original
693 # revision. Then we delete the now-unreferenced delta and original
693 # revision and insert a replacement.
694 # revision and insert a replacement.
694
695
695 # Find the delta to be censored.
696 # Find the delta to be censored.
696 censoreddeltaid = self._db.execute(
697 censoreddeltaid = self._db.execute(
697 r'SELECT deltaid FROM fileindex WHERE id=?',
698 r'SELECT deltaid FROM fileindex WHERE id=?',
698 (self._revisions[censornode].rid,)).fetchone()[0]
699 (self._revisions[censornode].rid,)).fetchone()[0]
699
700
700 # Find all its delta chain children.
701 # Find all its delta chain children.
701 # TODO once we support storing deltas for !files, we'll need to look
702 # TODO once we support storing deltas for !files, we'll need to look
702 # for those delta chains too.
703 # for those delta chains too.
703 rows = list(self._db.execute(
704 rows = list(self._db.execute(
704 r'SELECT id, pathid, node FROM fileindex '
705 r'SELECT id, pathid, node FROM fileindex '
705 r'WHERE deltabaseid=? OR deltaid=?',
706 r'WHERE deltabaseid=? OR deltaid=?',
706 (censoreddeltaid, censoreddeltaid)))
707 (censoreddeltaid, censoreddeltaid)))
707
708
708 for row in rows:
709 for row in rows:
709 rid, pathid, node = row
710 rid, pathid, node = row
710
711
711 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
712 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
712 zstddctx=self._dctx)
713 zstddctx=self._dctx)
713
714
714 deltahash = hashlib.sha1(fulltext).digest()
715 deltahash = hashlib.sha1(fulltext).digest()
715
716
716 if self._compengine == 'zstd':
717 if self._compengine == 'zstd':
717 deltablob = self._cctx.compress(fulltext)
718 deltablob = self._cctx.compress(fulltext)
718 compression = COMPRESSION_ZSTD
719 compression = COMPRESSION_ZSTD
719 elif self._compengine == 'zlib':
720 elif self._compengine == 'zlib':
720 deltablob = zlib.compress(fulltext)
721 deltablob = zlib.compress(fulltext)
721 compression = COMPRESSION_ZLIB
722 compression = COMPRESSION_ZLIB
722 elif self._compengine == 'none':
723 elif self._compengine == 'none':
723 deltablob = fulltext
724 deltablob = fulltext
724 compression = COMPRESSION_NONE
725 compression = COMPRESSION_NONE
725 else:
726 else:
726 raise error.ProgrammingError('unhandled compression engine: %s'
727 raise error.ProgrammingError('unhandled compression engine: %s'
727 % self._compengine)
728 % self._compengine)
728
729
729 if len(deltablob) >= len(fulltext):
730 if len(deltablob) >= len(fulltext):
730 deltablob = fulltext
731 deltablob = fulltext
731 compression = COMPRESSION_NONE
732 compression = COMPRESSION_NONE
732
733
733 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
734 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
734
735
735 self._db.execute(
736 self._db.execute(
736 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
737 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
737 r'WHERE id=?', (deltaid, rid))
738 r'WHERE id=?', (deltaid, rid))
738
739
739 # Now create the tombstone delta and replace the delta on the censored
740 # Now create the tombstone delta and replace the delta on the censored
740 # node.
741 # node.
741 deltahash = hashlib.sha1(tombstone).digest()
742 deltahash = hashlib.sha1(tombstone).digest()
742 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
743 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
743 deltahash, tombstone)
744 deltahash, tombstone)
744
745
745 flags = self._revisions[censornode].flags
746 flags = self._revisions[censornode].flags
746 flags |= FLAG_CENSORED
747 flags |= FLAG_CENSORED
747
748
748 self._db.execute(
749 self._db.execute(
749 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
750 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
750 r'WHERE pathid=? AND node=?',
751 r'WHERE pathid=? AND node=?',
751 (flags, tombstonedeltaid, self._pathid, censornode))
752 (flags, tombstonedeltaid, self._pathid, censornode))
752
753
753 self._db.execute(
754 self._db.execute(
754 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
755 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
755
756
756 self._refreshindex()
757 self._refreshindex()
757 self._revisioncache.clear()
758 self._revisioncache.clear()
758
759
759 def getstrippoint(self, minlink):
760 def getstrippoint(self, minlink):
760 return storageutil.resolvestripinfo(minlink, len(self) - 1,
761 return storageutil.resolvestripinfo(minlink, len(self) - 1,
761 [self.rev(n) for n in self.heads()],
762 [self.rev(n) for n in self.heads()],
762 self.linkrev,
763 self.linkrev,
763 self.parentrevs)
764 self.parentrevs)
764
765
765 def strip(self, minlink, transaction):
766 def strip(self, minlink, transaction):
766 if not len(self):
767 if not len(self):
767 return
768 return
768
769
769 rev, _ignored = self.getstrippoint(minlink)
770 rev, _ignored = self.getstrippoint(minlink)
770
771
771 if rev == len(self):
772 if rev == len(self):
772 return
773 return
773
774
774 for rev in self.revs(rev):
775 for rev in self.revs(rev):
775 self._db.execute(
776 self._db.execute(
776 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
777 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
777 (self._pathid, self.node(rev)))
778 (self._pathid, self.node(rev)))
778
779
779 # TODO how should we garbage collect data in delta table?
780 # TODO how should we garbage collect data in delta table?
780
781
781 self._refreshindex()
782 self._refreshindex()
782
783
783 # End of ifilemutation interface.
784 # End of ifilemutation interface.
784
785
785 # Start of ifilestorage interface.
786 # Start of ifilestorage interface.
786
787
787 def files(self):
788 def files(self):
788 return []
789 return []
789
790
790 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
791 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
791 revisionscount=False, trackedsize=False,
792 revisionscount=False, trackedsize=False,
792 storedsize=False):
793 storedsize=False):
793 d = {}
794 d = {}
794
795
795 if exclusivefiles:
796 if exclusivefiles:
796 d['exclusivefiles'] = []
797 d['exclusivefiles'] = []
797
798
798 if sharedfiles:
799 if sharedfiles:
799 # TODO list sqlite file(s) here.
800 # TODO list sqlite file(s) here.
800 d['sharedfiles'] = []
801 d['sharedfiles'] = []
801
802
802 if revisionscount:
803 if revisionscount:
803 d['revisionscount'] = len(self)
804 d['revisionscount'] = len(self)
804
805
805 if trackedsize:
806 if trackedsize:
806 d['trackedsize'] = sum(len(self.revision(node))
807 d['trackedsize'] = sum(len(self.revision(node))
807 for node in self._nodetorev)
808 for node in self._nodetorev)
808
809
809 if storedsize:
810 if storedsize:
810 # TODO implement this?
811 # TODO implement this?
811 d['storedsize'] = None
812 d['storedsize'] = None
812
813
813 return d
814 return d
814
815
815 def verifyintegrity(self, state):
816 def verifyintegrity(self, state):
816 state['skipread'] = set()
817 state['skipread'] = set()
817
818
818 for rev in self:
819 for rev in self:
819 node = self.node(rev)
820 node = self.node(rev)
820
821
821 try:
822 try:
822 self.revision(node)
823 self.revision(node)
823 except Exception as e:
824 except Exception as e:
824 yield sqliteproblem(
825 yield sqliteproblem(
825 error=_('unpacking %s: %s') % (short(node), e),
826 error=_('unpacking %s: %s') % (short(node), e),
826 node=node)
827 node=node)
827
828
828 state['skipread'].add(node)
829 state['skipread'].add(node)
829
830
830 # End of ifilestorage interface.
831 # End of ifilestorage interface.
831
832
832 def _checkhash(self, fulltext, node, p1=None, p2=None):
833 def _checkhash(self, fulltext, node, p1=None, p2=None):
833 if p1 is None and p2 is None:
834 if p1 is None and p2 is None:
834 p1, p2 = self.parents(node)
835 p1, p2 = self.parents(node)
835
836
836 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
837 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
837 return
838 return
838
839
839 try:
840 try:
840 del self._revisioncache[node]
841 del self._revisioncache[node]
841 except KeyError:
842 except KeyError:
842 pass
843 pass
843
844
844 if storageutil.iscensoredtext(fulltext):
845 if storageutil.iscensoredtext(fulltext):
845 raise error.CensoredNodeError(self._path, node, fulltext)
846 raise error.CensoredNodeError(self._path, node, fulltext)
846
847
847 raise SQLiteStoreError(_('integrity check failed on %s') %
848 raise SQLiteStoreError(_('integrity check failed on %s') %
848 self._path)
849 self._path)
849
850
850 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
851 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
851 p1, p2, storedelta=None, flags=0):
852 p1, p2, storedelta=None, flags=0):
852 if self._pathid is None:
853 if self._pathid is None:
853 res = self._db.execute(
854 res = self._db.execute(
854 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
855 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
855 self._pathid = res.lastrowid
856 self._pathid = res.lastrowid
856
857
857 # For simplicity, always store a delta against p1.
858 # For simplicity, always store a delta against p1.
858 # TODO we need a lot more logic here to make behavior reasonable.
859 # TODO we need a lot more logic here to make behavior reasonable.
859
860
860 if storedelta:
861 if storedelta:
861 deltabase, delta = storedelta
862 deltabase, delta = storedelta
862
863
863 if isinstance(deltabase, int):
864 if isinstance(deltabase, int):
864 deltabase = self.node(deltabase)
865 deltabase = self.node(deltabase)
865
866
866 else:
867 else:
867 assert revisiondata is not None
868 assert revisiondata is not None
868 deltabase = p1
869 deltabase = p1
869
870
870 if deltabase == nullid:
871 if deltabase == nullid:
871 delta = revisiondata
872 delta = revisiondata
872 else:
873 else:
873 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
874 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
874 revisiondata)
875 revisiondata)
875
876
876 # File index stores a pointer to its delta and the parent delta.
877 # File index stores a pointer to its delta and the parent delta.
877 # The parent delta is stored via a pointer to the fileindex PK.
878 # The parent delta is stored via a pointer to the fileindex PK.
878 if deltabase == nullid:
879 if deltabase == nullid:
879 baseid = None
880 baseid = None
880 else:
881 else:
881 baseid = self._revisions[deltabase].rid
882 baseid = self._revisions[deltabase].rid
882
883
883 # Deltas are stored with a hash of their content. This allows
884 # Deltas are stored with a hash of their content. This allows
884 # us to de-duplicate. The table is configured to ignore conflicts
885 # us to de-duplicate. The table is configured to ignore conflicts
885 # and it is faster to just insert and silently noop than to look
886 # and it is faster to just insert and silently noop than to look
886 # first.
887 # first.
887 deltahash = hashlib.sha1(delta).digest()
888 deltahash = hashlib.sha1(delta).digest()
888
889
889 if self._compengine == 'zstd':
890 if self._compengine == 'zstd':
890 deltablob = self._cctx.compress(delta)
891 deltablob = self._cctx.compress(delta)
891 compression = COMPRESSION_ZSTD
892 compression = COMPRESSION_ZSTD
892 elif self._compengine == 'zlib':
893 elif self._compengine == 'zlib':
893 deltablob = zlib.compress(delta)
894 deltablob = zlib.compress(delta)
894 compression = COMPRESSION_ZLIB
895 compression = COMPRESSION_ZLIB
895 elif self._compengine == 'none':
896 elif self._compengine == 'none':
896 deltablob = delta
897 deltablob = delta
897 compression = COMPRESSION_NONE
898 compression = COMPRESSION_NONE
898 else:
899 else:
899 raise error.ProgrammingError('unhandled compression engine: %s' %
900 raise error.ProgrammingError('unhandled compression engine: %s' %
900 self._compengine)
901 self._compengine)
901
902
902 # Don't store compressed data if it isn't practical.
903 # Don't store compressed data if it isn't practical.
903 if len(deltablob) >= len(delta):
904 if len(deltablob) >= len(delta):
904 deltablob = delta
905 deltablob = delta
905 compression = COMPRESSION_NONE
906 compression = COMPRESSION_NONE
906
907
907 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
908 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
908
909
909 rev = len(self)
910 rev = len(self)
910
911
911 if p1 == nullid:
912 if p1 == nullid:
912 p1rev = nullrev
913 p1rev = nullrev
913 else:
914 else:
914 p1rev = self._nodetorev[p1]
915 p1rev = self._nodetorev[p1]
915
916
916 if p2 == nullid:
917 if p2 == nullid:
917 p2rev = nullrev
918 p2rev = nullrev
918 else:
919 else:
919 p2rev = self._nodetorev[p2]
920 p2rev = self._nodetorev[p2]
920
921
921 rid = self._db.execute(
922 rid = self._db.execute(
922 r'INSERT INTO fileindex ('
923 r'INSERT INTO fileindex ('
923 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
924 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
924 r' deltaid, deltabaseid) '
925 r' deltaid, deltabaseid) '
925 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
926 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
926 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
927 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
927 deltaid, baseid)
928 deltaid, baseid)
928 ).lastrowid
929 ).lastrowid
929
930
930 entry = revisionentry(
931 entry = revisionentry(
931 rid=rid,
932 rid=rid,
932 rev=rev,
933 rev=rev,
933 node=node,
934 node=node,
934 p1rev=p1rev,
935 p1rev=p1rev,
935 p2rev=p2rev,
936 p2rev=p2rev,
936 p1node=p1,
937 p1node=p1,
937 p2node=p2,
938 p2node=p2,
938 linkrev=linkrev,
939 linkrev=linkrev,
939 flags=flags)
940 flags=flags)
940
941
941 self._nodetorev[node] = rev
942 self._nodetorev[node] = rev
942 self._revtonode[rev] = node
943 self._revtonode[rev] = node
943 self._revisions[node] = entry
944 self._revisions[node] = entry
944
945
945 return node
946 return node
946
947
947 class sqliterepository(localrepo.localrepository):
948 class sqliterepository(localrepo.localrepository):
948 def cancopy(self):
949 def cancopy(self):
949 return False
950 return False
950
951
951 def transaction(self, *args, **kwargs):
952 def transaction(self, *args, **kwargs):
952 current = self.currenttransaction()
953 current = self.currenttransaction()
953
954
954 tr = super(sqliterepository, self).transaction(*args, **kwargs)
955 tr = super(sqliterepository, self).transaction(*args, **kwargs)
955
956
956 if current:
957 if current:
957 return tr
958 return tr
958
959
959 self._dbconn.execute(r'BEGIN TRANSACTION')
960 self._dbconn.execute(r'BEGIN TRANSACTION')
960
961
961 def committransaction(_):
962 def committransaction(_):
962 self._dbconn.commit()
963 self._dbconn.commit()
963
964
964 tr.addfinalize('sqlitestore', committransaction)
965 tr.addfinalize('sqlitestore', committransaction)
965
966
966 return tr
967 return tr
967
968
968 @property
969 @property
969 def _dbconn(self):
970 def _dbconn(self):
970 # SQLite connections can only be used on the thread that created
971 # SQLite connections can only be used on the thread that created
971 # them. In most cases, this "just works." However, hgweb uses
972 # them. In most cases, this "just works." However, hgweb uses
972 # multiple threads.
973 # multiple threads.
973 tid = threading.current_thread().ident
974 tid = threading.current_thread().ident
974
975
975 if self._db:
976 if self._db:
976 if self._db[0] == tid:
977 if self._db[0] == tid:
977 return self._db[1]
978 return self._db[1]
978
979
979 db = makedb(self.svfs.join('db.sqlite'))
980 db = makedb(self.svfs.join('db.sqlite'))
980 self._db = (tid, db)
981 self._db = (tid, db)
981
982
982 return db
983 return db
983
984
984 def makedb(path):
985 def makedb(path):
985 """Construct a database handle for a database at path."""
986 """Construct a database handle for a database at path."""
986
987
987 db = sqlite3.connect(path)
988 db = sqlite3.connect(path)
988 db.text_factory = bytes
989 db.text_factory = bytes
989
990
990 res = db.execute(r'PRAGMA user_version').fetchone()[0]
991 res = db.execute(r'PRAGMA user_version').fetchone()[0]
991
992
992 # New database.
993 # New database.
993 if res == 0:
994 if res == 0:
994 for statement in CREATE_SCHEMA:
995 for statement in CREATE_SCHEMA:
995 db.execute(statement)
996 db.execute(statement)
996
997
997 db.commit()
998 db.commit()
998
999
999 elif res == CURRENT_SCHEMA_VERSION:
1000 elif res == CURRENT_SCHEMA_VERSION:
1000 pass
1001 pass
1001
1002
1002 else:
1003 else:
1003 raise error.Abort(_('sqlite database has unrecognized version'))
1004 raise error.Abort(_('sqlite database has unrecognized version'))
1004
1005
1005 db.execute(r'PRAGMA journal_mode=WAL')
1006 db.execute(r'PRAGMA journal_mode=WAL')
1006
1007
1007 return db
1008 return db
1008
1009
1009 def featuresetup(ui, supported):
1010 def featuresetup(ui, supported):
1010 supported.add(REQUIREMENT)
1011 supported.add(REQUIREMENT)
1011
1012
1012 if zstd:
1013 if zstd:
1013 supported.add(REQUIREMENT_ZSTD)
1014 supported.add(REQUIREMENT_ZSTD)
1014
1015
1015 supported.add(REQUIREMENT_ZLIB)
1016 supported.add(REQUIREMENT_ZLIB)
1016 supported.add(REQUIREMENT_NONE)
1017 supported.add(REQUIREMENT_NONE)
1018 supported.add(REQUIREMENT_SHALLOW_FILES)
1019 supported.add(repository.NARROW_REQUIREMENT)
1017
1020
1018 def newreporequirements(orig, ui, createopts):
1021 def newreporequirements(orig, ui, createopts):
1019 if createopts['backend'] != 'sqlite':
1022 if createopts['backend'] != 'sqlite':
1020 return orig(ui, createopts)
1023 return orig(ui, createopts)
1021
1024
1022 # This restriction can be lifted once we have more confidence.
1025 # This restriction can be lifted once we have more confidence.
1023 if 'sharedrepo' in createopts:
1026 if 'sharedrepo' in createopts:
1024 raise error.Abort(_('shared repositories not supported with SQLite '
1027 raise error.Abort(_('shared repositories not supported with SQLite '
1025 'store'))
1028 'store'))
1026
1029
1027 # This filtering is out of an abundance of caution: we want to ensure
1030 # This filtering is out of an abundance of caution: we want to ensure
1028 # we honor creation options and we do that by annotating exactly the
1031 # we honor creation options and we do that by annotating exactly the
1029 # creation options we recognize.
1032 # creation options we recognize.
1030 known = {
1033 known = {
1031 'narrowfiles',
1034 'narrowfiles',
1032 'backend',
1035 'backend',
1036 'shallowfilestore',
1033 }
1037 }
1034
1038
1035 unsupported = set(createopts) - known
1039 unsupported = set(createopts) - known
1036 if unsupported:
1040 if unsupported:
1037 raise error.Abort(_('SQLite store does not support repo creation '
1041 raise error.Abort(_('SQLite store does not support repo creation '
1038 'option: %s') % ', '.join(sorted(unsupported)))
1042 'option: %s') % ', '.join(sorted(unsupported)))
1039
1043
1040 # Since we're a hybrid store that still relies on revlogs, we fall back
1044 # Since we're a hybrid store that still relies on revlogs, we fall back
1041 # to using the revlogv1 backend's storage requirements then adding our
1045 # to using the revlogv1 backend's storage requirements then adding our
1042 # own requirement.
1046 # own requirement.
1043 createopts['backend'] = 'revlogv1'
1047 createopts['backend'] = 'revlogv1'
1044 requirements = orig(ui, createopts)
1048 requirements = orig(ui, createopts)
1045 requirements.add(REQUIREMENT)
1049 requirements.add(REQUIREMENT)
1046
1050
1047 compression = ui.config('storage', 'sqlite.compression')
1051 compression = ui.config('storage', 'sqlite.compression')
1048
1052
1049 if compression == 'zstd' and not zstd:
1053 if compression == 'zstd' and not zstd:
1050 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1054 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1051 'zstandard compression not available to this '
1055 'zstandard compression not available to this '
1052 'Mercurial install'))
1056 'Mercurial install'))
1053
1057
1054 if compression == 'zstd':
1058 if compression == 'zstd':
1055 requirements.add(REQUIREMENT_ZSTD)
1059 requirements.add(REQUIREMENT_ZSTD)
1056 elif compression == 'zlib':
1060 elif compression == 'zlib':
1057 requirements.add(REQUIREMENT_ZLIB)
1061 requirements.add(REQUIREMENT_ZLIB)
1058 elif compression == 'none':
1062 elif compression == 'none':
1059 requirements.add(REQUIREMENT_NONE)
1063 requirements.add(REQUIREMENT_NONE)
1060 else:
1064 else:
1061 raise error.Abort(_('unknown compression engine defined in '
1065 raise error.Abort(_('unknown compression engine defined in '
1062 'storage.sqlite.compression: %s') % compression)
1066 'storage.sqlite.compression: %s') % compression)
1063
1067
1068 if createopts.get('shallowfilestore'):
1069 requirements.add(REQUIREMENT_SHALLOW_FILES)
1070
1064 return requirements
1071 return requirements
1065
1072
1066 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1073 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1067 class sqlitefilestorage(object):
1074 class sqlitefilestorage(object):
1068 """Repository file storage backed by SQLite."""
1075 """Repository file storage backed by SQLite."""
1069 def file(self, path):
1076 def file(self, path):
1070 if path[0] == b'/':
1077 if path[0] == b'/':
1071 path = path[1:]
1078 path = path[1:]
1072
1079
1073 if REQUIREMENT_ZSTD in self.requirements:
1080 if REQUIREMENT_ZSTD in self.requirements:
1074 compression = 'zstd'
1081 compression = 'zstd'
1075 elif REQUIREMENT_ZLIB in self.requirements:
1082 elif REQUIREMENT_ZLIB in self.requirements:
1076 compression = 'zlib'
1083 compression = 'zlib'
1077 elif REQUIREMENT_NONE in self.requirements:
1084 elif REQUIREMENT_NONE in self.requirements:
1078 compression = 'none'
1085 compression = 'none'
1079 else:
1086 else:
1080 raise error.Abort(_('unable to determine what compression engine '
1087 raise error.Abort(_('unable to determine what compression engine '
1081 'to use for SQLite storage'))
1088 'to use for SQLite storage'))
1082
1089
1083 return sqlitefilestore(self._dbconn, path, compression)
1090 return sqlitefilestore(self._dbconn, path, compression)
1084
1091
1085 def makefilestorage(orig, requirements, **kwargs):
1092 def makefilestorage(orig, requirements, features, **kwargs):
1086 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1093 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1087 if REQUIREMENT in requirements:
1094 if REQUIREMENT in requirements:
1095 if REQUIREMENT_SHALLOW_FILES in requirements:
1096 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1097
1088 return sqlitefilestorage
1098 return sqlitefilestorage
1089 else:
1099 else:
1090 return orig(requirements=requirements, **kwargs)
1100 return orig(requirements=requirements, features=features, **kwargs)
1091
1101
1092 def makemain(orig, ui, requirements, **kwargs):
1102 def makemain(orig, ui, requirements, **kwargs):
1093 if REQUIREMENT in requirements:
1103 if REQUIREMENT in requirements:
1094 if REQUIREMENT_ZSTD in requirements and not zstd:
1104 if REQUIREMENT_ZSTD in requirements and not zstd:
1095 raise error.Abort(_('repository uses zstandard compression, which '
1105 raise error.Abort(_('repository uses zstandard compression, which '
1096 'is not available to this Mercurial install'))
1106 'is not available to this Mercurial install'))
1097
1107
1098 return sqliterepository
1108 return sqliterepository
1099
1109
1100 return orig(requirements=requirements, **kwargs)
1110 return orig(requirements=requirements, **kwargs)
1101
1111
1102 def verifierinit(orig, self, *args, **kwargs):
1112 def verifierinit(orig, self, *args, **kwargs):
1103 orig(self, *args, **kwargs)
1113 orig(self, *args, **kwargs)
1104
1114
1105 # We don't care that files in the store don't align with what is
1115 # We don't care that files in the store don't align with what is
1106 # advertised. So suppress these warnings.
1116 # advertised. So suppress these warnings.
1107 self.warnorphanstorefiles = False
1117 self.warnorphanstorefiles = False
1108
1118
1109 def extsetup(ui):
1119 def extsetup(ui):
1110 localrepo.featuresetupfuncs.add(featuresetup)
1120 localrepo.featuresetupfuncs.add(featuresetup)
1111 extensions.wrapfunction(localrepo, 'newreporequirements',
1121 extensions.wrapfunction(localrepo, 'newreporequirements',
1112 newreporequirements)
1122 newreporequirements)
1113 extensions.wrapfunction(localrepo, 'makefilestorage',
1123 extensions.wrapfunction(localrepo, 'makefilestorage',
1114 makefilestorage)
1124 makefilestorage)
1115 extensions.wrapfunction(localrepo, 'makemain',
1125 extensions.wrapfunction(localrepo, 'makemain',
1116 makemain)
1126 makemain)
1117 extensions.wrapfunction(verify.verifier, '__init__',
1127 extensions.wrapfunction(verify.verifier, '__init__',
1118 verifierinit)
1128 verifierinit)
1119
1129
1120 def reposetup(ui, repo):
1130 def reposetup(ui, repo):
1121 if isinstance(repo, sqliterepository):
1131 if isinstance(repo, sqliterepository):
1122 repo._db = None
1132 repo._db = None
1123
1133
1124 # TODO check for bundlerepository?
1134 # TODO check for bundlerepository?
@@ -1,1226 +1,1229 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 narrowspec,
38 narrowspec,
39 node,
39 node,
40 phases,
40 phases,
41 scmutil,
41 scmutil,
42 sshpeer,
42 sshpeer,
43 statichttprepo,
43 statichttprepo,
44 ui as uimod,
44 ui as uimod,
45 unionrepo,
45 unionrepo,
46 url,
46 url,
47 util,
47 util,
48 verify as verifymod,
48 verify as verifymod,
49 vfs as vfsmod,
49 vfs as vfsmod,
50 )
50 )
51
51
52 release = lock.release
52 release = lock.release
53
53
54 # shared features
54 # shared features
55 sharedbookmarks = 'bookmarks'
55 sharedbookmarks = 'bookmarks'
56
56
57 def _local(path):
57 def _local(path):
58 path = util.expandpath(util.urllocalpath(path))
58 path = util.expandpath(util.urllocalpath(path))
59 return (os.path.isfile(path) and bundlerepo or localrepo)
59 return (os.path.isfile(path) and bundlerepo or localrepo)
60
60
61 def addbranchrevs(lrepo, other, branches, revs):
61 def addbranchrevs(lrepo, other, branches, revs):
62 peer = other.peer() # a courtesy to callers using a localrepo for other
62 peer = other.peer() # a courtesy to callers using a localrepo for other
63 hashbranch, branches = branches
63 hashbranch, branches = branches
64 if not hashbranch and not branches:
64 if not hashbranch and not branches:
65 x = revs or None
65 x = revs or None
66 if revs:
66 if revs:
67 y = revs[0]
67 y = revs[0]
68 else:
68 else:
69 y = None
69 y = None
70 return x, y
70 return x, y
71 if revs:
71 if revs:
72 revs = list(revs)
72 revs = list(revs)
73 else:
73 else:
74 revs = []
74 revs = []
75
75
76 if not peer.capable('branchmap'):
76 if not peer.capable('branchmap'):
77 if branches:
77 if branches:
78 raise error.Abort(_("remote branch lookup not supported"))
78 raise error.Abort(_("remote branch lookup not supported"))
79 revs.append(hashbranch)
79 revs.append(hashbranch)
80 return revs, revs[0]
80 return revs, revs[0]
81
81
82 with peer.commandexecutor() as e:
82 with peer.commandexecutor() as e:
83 branchmap = e.callcommand('branchmap', {}).result()
83 branchmap = e.callcommand('branchmap', {}).result()
84
84
85 def primary(branch):
85 def primary(branch):
86 if branch == '.':
86 if branch == '.':
87 if not lrepo:
87 if not lrepo:
88 raise error.Abort(_("dirstate branch not accessible"))
88 raise error.Abort(_("dirstate branch not accessible"))
89 branch = lrepo.dirstate.branch()
89 branch = lrepo.dirstate.branch()
90 if branch in branchmap:
90 if branch in branchmap:
91 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
91 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
92 return True
92 return True
93 else:
93 else:
94 return False
94 return False
95
95
96 for branch in branches:
96 for branch in branches:
97 if not primary(branch):
97 if not primary(branch):
98 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
98 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
99 if hashbranch:
99 if hashbranch:
100 if not primary(hashbranch):
100 if not primary(hashbranch):
101 revs.append(hashbranch)
101 revs.append(hashbranch)
102 return revs, revs[0]
102 return revs, revs[0]
103
103
104 def parseurl(path, branches=None):
104 def parseurl(path, branches=None):
105 '''parse url#branch, returning (url, (branch, branches))'''
105 '''parse url#branch, returning (url, (branch, branches))'''
106
106
107 u = util.url(path)
107 u = util.url(path)
108 branch = None
108 branch = None
109 if u.fragment:
109 if u.fragment:
110 branch = u.fragment
110 branch = u.fragment
111 u.fragment = None
111 u.fragment = None
112 return bytes(u), (branch, branches or [])
112 return bytes(u), (branch, branches or [])
113
113
114 schemes = {
114 schemes = {
115 'bundle': bundlerepo,
115 'bundle': bundlerepo,
116 'union': unionrepo,
116 'union': unionrepo,
117 'file': _local,
117 'file': _local,
118 'http': httppeer,
118 'http': httppeer,
119 'https': httppeer,
119 'https': httppeer,
120 'ssh': sshpeer,
120 'ssh': sshpeer,
121 'static-http': statichttprepo,
121 'static-http': statichttprepo,
122 }
122 }
123
123
124 def _peerlookup(path):
124 def _peerlookup(path):
125 u = util.url(path)
125 u = util.url(path)
126 scheme = u.scheme or 'file'
126 scheme = u.scheme or 'file'
127 thing = schemes.get(scheme) or schemes['file']
127 thing = schemes.get(scheme) or schemes['file']
128 try:
128 try:
129 return thing(path)
129 return thing(path)
130 except TypeError:
130 except TypeError:
131 # we can't test callable(thing) because 'thing' can be an unloaded
131 # we can't test callable(thing) because 'thing' can be an unloaded
132 # module that implements __call__
132 # module that implements __call__
133 if not util.safehasattr(thing, 'instance'):
133 if not util.safehasattr(thing, 'instance'):
134 raise
134 raise
135 return thing
135 return thing
136
136
137 def islocal(repo):
137 def islocal(repo):
138 '''return true if repo (or path pointing to repo) is local'''
138 '''return true if repo (or path pointing to repo) is local'''
139 if isinstance(repo, bytes):
139 if isinstance(repo, bytes):
140 try:
140 try:
141 return _peerlookup(repo).islocal(repo)
141 return _peerlookup(repo).islocal(repo)
142 except AttributeError:
142 except AttributeError:
143 return False
143 return False
144 return repo.local()
144 return repo.local()
145
145
146 def openpath(ui, path):
146 def openpath(ui, path):
147 '''open path with open if local, url.open if remote'''
147 '''open path with open if local, url.open if remote'''
148 pathurl = util.url(path, parsequery=False, parsefragment=False)
148 pathurl = util.url(path, parsequery=False, parsefragment=False)
149 if pathurl.islocal():
149 if pathurl.islocal():
150 return util.posixfile(pathurl.localpath(), 'rb')
150 return util.posixfile(pathurl.localpath(), 'rb')
151 else:
151 else:
152 return url.open(ui, path)
152 return url.open(ui, path)
153
153
154 # a list of (ui, repo) functions called for wire peer initialization
154 # a list of (ui, repo) functions called for wire peer initialization
155 wirepeersetupfuncs = []
155 wirepeersetupfuncs = []
156
156
157 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
157 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
158 intents=None, createopts=None):
158 intents=None, createopts=None):
159 """return a repository object for the specified path"""
159 """return a repository object for the specified path"""
160 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
160 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
161 createopts=createopts)
161 createopts=createopts)
162 ui = getattr(obj, "ui", ui)
162 ui = getattr(obj, "ui", ui)
163 if ui.configbool('devel', 'debug.extensions'):
163 if ui.configbool('devel', 'debug.extensions'):
164 log = lambda msg, *values: ui.debug('debug.extensions: ',
164 log = lambda msg, *values: ui.debug('debug.extensions: ',
165 msg % values, label='debug.extensions')
165 msg % values, label='debug.extensions')
166 else:
166 else:
167 log = lambda *a, **kw: None
167 log = lambda *a, **kw: None
168 for f in presetupfuncs or []:
168 for f in presetupfuncs or []:
169 f(ui, obj)
169 f(ui, obj)
170 log('- executing reposetup hooks\n')
170 log('- executing reposetup hooks\n')
171 with util.timedcm('all reposetup') as allreposetupstats:
171 with util.timedcm('all reposetup') as allreposetupstats:
172 for name, module in extensions.extensions(ui):
172 for name, module in extensions.extensions(ui):
173 log(' - running reposetup for %s\n' % (name,))
173 log(' - running reposetup for %s\n' % (name,))
174 hook = getattr(module, 'reposetup', None)
174 hook = getattr(module, 'reposetup', None)
175 if hook:
175 if hook:
176 with util.timedcm('reposetup %r', name) as stats:
176 with util.timedcm('reposetup %r', name) as stats:
177 hook(ui, obj)
177 hook(ui, obj)
178 log(' > reposetup for %r took %s\n', name, stats)
178 log(' > reposetup for %r took %s\n', name, stats)
179 log('> all reposetup took %s\n', allreposetupstats)
179 log('> all reposetup took %s\n', allreposetupstats)
180 if not obj.local():
180 if not obj.local():
181 for f in wirepeersetupfuncs:
181 for f in wirepeersetupfuncs:
182 f(ui, obj)
182 f(ui, obj)
183 return obj
183 return obj
184
184
185 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
185 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
186 createopts=None):
186 createopts=None):
187 """return a repository object for the specified path"""
187 """return a repository object for the specified path"""
188 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
188 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
189 intents=intents, createopts=createopts)
189 intents=intents, createopts=createopts)
190 repo = peer.local()
190 repo = peer.local()
191 if not repo:
191 if not repo:
192 raise error.Abort(_("repository '%s' is not local") %
192 raise error.Abort(_("repository '%s' is not local") %
193 (path or peer.url()))
193 (path or peer.url()))
194 return repo.filtered('visible')
194 return repo.filtered('visible')
195
195
196 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
196 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
197 '''return a repository peer for the specified path'''
197 '''return a repository peer for the specified path'''
198 rui = remoteui(uiorrepo, opts)
198 rui = remoteui(uiorrepo, opts)
199 return _peerorrepo(rui, path, create, intents=intents,
199 return _peerorrepo(rui, path, create, intents=intents,
200 createopts=createopts).peer()
200 createopts=createopts).peer()
201
201
202 def defaultdest(source):
202 def defaultdest(source):
203 '''return default destination of clone if none is given
203 '''return default destination of clone if none is given
204
204
205 >>> defaultdest(b'foo')
205 >>> defaultdest(b'foo')
206 'foo'
206 'foo'
207 >>> defaultdest(b'/foo/bar')
207 >>> defaultdest(b'/foo/bar')
208 'bar'
208 'bar'
209 >>> defaultdest(b'/')
209 >>> defaultdest(b'/')
210 ''
210 ''
211 >>> defaultdest(b'')
211 >>> defaultdest(b'')
212 ''
212 ''
213 >>> defaultdest(b'http://example.org/')
213 >>> defaultdest(b'http://example.org/')
214 ''
214 ''
215 >>> defaultdest(b'http://example.org/foo/')
215 >>> defaultdest(b'http://example.org/foo/')
216 'foo'
216 'foo'
217 '''
217 '''
218 path = util.url(source).path
218 path = util.url(source).path
219 if not path:
219 if not path:
220 return ''
220 return ''
221 return os.path.basename(os.path.normpath(path))
221 return os.path.basename(os.path.normpath(path))
222
222
223 def sharedreposource(repo):
223 def sharedreposource(repo):
224 """Returns repository object for source repository of a shared repo.
224 """Returns repository object for source repository of a shared repo.
225
225
226 If repo is not a shared repository, returns None.
226 If repo is not a shared repository, returns None.
227 """
227 """
228 if repo.sharedpath == repo.path:
228 if repo.sharedpath == repo.path:
229 return None
229 return None
230
230
231 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
231 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
232 return repo.srcrepo
232 return repo.srcrepo
233
233
234 # the sharedpath always ends in the .hg; we want the path to the repo
234 # the sharedpath always ends in the .hg; we want the path to the repo
235 source = repo.vfs.split(repo.sharedpath)[0]
235 source = repo.vfs.split(repo.sharedpath)[0]
236 srcurl, branches = parseurl(source)
236 srcurl, branches = parseurl(source)
237 srcrepo = repository(repo.ui, srcurl)
237 srcrepo = repository(repo.ui, srcurl)
238 repo.srcrepo = srcrepo
238 repo.srcrepo = srcrepo
239 return srcrepo
239 return srcrepo
240
240
241 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
241 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
242 relative=False):
242 relative=False):
243 '''create a shared repository'''
243 '''create a shared repository'''
244
244
245 if not islocal(source):
245 if not islocal(source):
246 raise error.Abort(_('can only share local repositories'))
246 raise error.Abort(_('can only share local repositories'))
247
247
248 if not dest:
248 if not dest:
249 dest = defaultdest(source)
249 dest = defaultdest(source)
250 else:
250 else:
251 dest = ui.expandpath(dest)
251 dest = ui.expandpath(dest)
252
252
253 if isinstance(source, bytes):
253 if isinstance(source, bytes):
254 origsource = ui.expandpath(source)
254 origsource = ui.expandpath(source)
255 source, branches = parseurl(origsource)
255 source, branches = parseurl(origsource)
256 srcrepo = repository(ui, source)
256 srcrepo = repository(ui, source)
257 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
257 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
258 else:
258 else:
259 srcrepo = source.local()
259 srcrepo = source.local()
260 checkout = None
260 checkout = None
261
261
262 shareditems = set()
262 shareditems = set()
263 if bookmarks:
263 if bookmarks:
264 shareditems.add(sharedbookmarks)
264 shareditems.add(sharedbookmarks)
265
265
266 r = repository(ui, dest, create=True, createopts={
266 r = repository(ui, dest, create=True, createopts={
267 'sharedrepo': srcrepo,
267 'sharedrepo': srcrepo,
268 'sharedrelative': relative,
268 'sharedrelative': relative,
269 'shareditems': shareditems,
269 'shareditems': shareditems,
270 })
270 })
271
271
272 postshare(srcrepo, r, defaultpath=defaultpath)
272 postshare(srcrepo, r, defaultpath=defaultpath)
273 _postshareupdate(r, update, checkout=checkout)
273 _postshareupdate(r, update, checkout=checkout)
274 return r
274 return r
275
275
276 def unshare(ui, repo):
276 def unshare(ui, repo):
277 """convert a shared repository to a normal one
277 """convert a shared repository to a normal one
278
278
279 Copy the store data to the repo and remove the sharedpath data.
279 Copy the store data to the repo and remove the sharedpath data.
280
280
281 Returns a new repository object representing the unshared repository.
281 Returns a new repository object representing the unshared repository.
282
282
283 The passed repository object is not usable after this function is
283 The passed repository object is not usable after this function is
284 called.
284 called.
285 """
285 """
286
286
287 destlock = lock = None
287 destlock = lock = None
288 lock = repo.lock()
288 lock = repo.lock()
289 try:
289 try:
290 # we use locks here because if we race with commit, we
290 # we use locks here because if we race with commit, we
291 # can end up with extra data in the cloned revlogs that's
291 # can end up with extra data in the cloned revlogs that's
292 # not pointed to by changesets, thus causing verify to
292 # not pointed to by changesets, thus causing verify to
293 # fail
293 # fail
294
294
295 destlock = copystore(ui, repo, repo.path)
295 destlock = copystore(ui, repo, repo.path)
296
296
297 sharefile = repo.vfs.join('sharedpath')
297 sharefile = repo.vfs.join('sharedpath')
298 util.rename(sharefile, sharefile + '.old')
298 util.rename(sharefile, sharefile + '.old')
299
299
300 repo.requirements.discard('shared')
300 repo.requirements.discard('shared')
301 repo.requirements.discard('relshared')
301 repo.requirements.discard('relshared')
302 repo._writerequirements()
302 repo._writerequirements()
303 finally:
303 finally:
304 destlock and destlock.release()
304 destlock and destlock.release()
305 lock and lock.release()
305 lock and lock.release()
306
306
307 # Removing share changes some fundamental properties of the repo instance.
307 # Removing share changes some fundamental properties of the repo instance.
308 # So we instantiate a new repo object and operate on it rather than
308 # So we instantiate a new repo object and operate on it rather than
309 # try to keep the existing repo usable.
309 # try to keep the existing repo usable.
310 newrepo = repository(repo.baseui, repo.root, create=False)
310 newrepo = repository(repo.baseui, repo.root, create=False)
311
311
312 # TODO: figure out how to access subrepos that exist, but were previously
312 # TODO: figure out how to access subrepos that exist, but were previously
313 # removed from .hgsub
313 # removed from .hgsub
314 c = newrepo['.']
314 c = newrepo['.']
315 subs = c.substate
315 subs = c.substate
316 for s in sorted(subs):
316 for s in sorted(subs):
317 c.sub(s).unshare()
317 c.sub(s).unshare()
318
318
319 localrepo.poisonrepository(repo)
319 localrepo.poisonrepository(repo)
320
320
321 return newrepo
321 return newrepo
322
322
323 def postshare(sourcerepo, destrepo, defaultpath=None):
323 def postshare(sourcerepo, destrepo, defaultpath=None):
324 """Called after a new shared repo is created.
324 """Called after a new shared repo is created.
325
325
326 The new repo only has a requirements file and pointer to the source.
326 The new repo only has a requirements file and pointer to the source.
327 This function configures additional shared data.
327 This function configures additional shared data.
328
328
329 Extensions can wrap this function and write additional entries to
329 Extensions can wrap this function and write additional entries to
330 destrepo/.hg/shared to indicate additional pieces of data to be shared.
330 destrepo/.hg/shared to indicate additional pieces of data to be shared.
331 """
331 """
332 default = defaultpath or sourcerepo.ui.config('paths', 'default')
332 default = defaultpath or sourcerepo.ui.config('paths', 'default')
333 if default:
333 if default:
334 template = ('[paths]\n'
334 template = ('[paths]\n'
335 'default = %s\n')
335 'default = %s\n')
336 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
336 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
337
337
338 def _postshareupdate(repo, update, checkout=None):
338 def _postshareupdate(repo, update, checkout=None):
339 """Maybe perform a working directory update after a shared repo is created.
339 """Maybe perform a working directory update after a shared repo is created.
340
340
341 ``update`` can be a boolean or a revision to update to.
341 ``update`` can be a boolean or a revision to update to.
342 """
342 """
343 if not update:
343 if not update:
344 return
344 return
345
345
346 repo.ui.status(_("updating working directory\n"))
346 repo.ui.status(_("updating working directory\n"))
347 if update is not True:
347 if update is not True:
348 checkout = update
348 checkout = update
349 for test in (checkout, 'default', 'tip'):
349 for test in (checkout, 'default', 'tip'):
350 if test is None:
350 if test is None:
351 continue
351 continue
352 try:
352 try:
353 uprev = repo.lookup(test)
353 uprev = repo.lookup(test)
354 break
354 break
355 except error.RepoLookupError:
355 except error.RepoLookupError:
356 continue
356 continue
357 _update(repo, uprev)
357 _update(repo, uprev)
358
358
359 def copystore(ui, srcrepo, destpath):
359 def copystore(ui, srcrepo, destpath):
360 '''copy files from store of srcrepo in destpath
360 '''copy files from store of srcrepo in destpath
361
361
362 returns destlock
362 returns destlock
363 '''
363 '''
364 destlock = None
364 destlock = None
365 try:
365 try:
366 hardlink = None
366 hardlink = None
367 topic = _('linking') if hardlink else _('copying')
367 topic = _('linking') if hardlink else _('copying')
368 with ui.makeprogress(topic) as progress:
368 with ui.makeprogress(topic) as progress:
369 num = 0
369 num = 0
370 srcpublishing = srcrepo.publishing()
370 srcpublishing = srcrepo.publishing()
371 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
371 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
372 dstvfs = vfsmod.vfs(destpath)
372 dstvfs = vfsmod.vfs(destpath)
373 for f in srcrepo.store.copylist():
373 for f in srcrepo.store.copylist():
374 if srcpublishing and f.endswith('phaseroots'):
374 if srcpublishing and f.endswith('phaseroots'):
375 continue
375 continue
376 dstbase = os.path.dirname(f)
376 dstbase = os.path.dirname(f)
377 if dstbase and not dstvfs.exists(dstbase):
377 if dstbase and not dstvfs.exists(dstbase):
378 dstvfs.mkdir(dstbase)
378 dstvfs.mkdir(dstbase)
379 if srcvfs.exists(f):
379 if srcvfs.exists(f):
380 if f.endswith('data'):
380 if f.endswith('data'):
381 # 'dstbase' may be empty (e.g. revlog format 0)
381 # 'dstbase' may be empty (e.g. revlog format 0)
382 lockfile = os.path.join(dstbase, "lock")
382 lockfile = os.path.join(dstbase, "lock")
383 # lock to avoid premature writing to the target
383 # lock to avoid premature writing to the target
384 destlock = lock.lock(dstvfs, lockfile)
384 destlock = lock.lock(dstvfs, lockfile)
385 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
385 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
386 hardlink, progress)
386 hardlink, progress)
387 num += n
387 num += n
388 if hardlink:
388 if hardlink:
389 ui.debug("linked %d files\n" % num)
389 ui.debug("linked %d files\n" % num)
390 else:
390 else:
391 ui.debug("copied %d files\n" % num)
391 ui.debug("copied %d files\n" % num)
392 return destlock
392 return destlock
393 except: # re-raises
393 except: # re-raises
394 release(destlock)
394 release(destlock)
395 raise
395 raise
396
396
397 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
397 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
398 rev=None, update=True, stream=False):
398 rev=None, update=True, stream=False):
399 """Perform a clone using a shared repo.
399 """Perform a clone using a shared repo.
400
400
401 The store for the repository will be located at <sharepath>/.hg. The
401 The store for the repository will be located at <sharepath>/.hg. The
402 specified revisions will be cloned or pulled from "source". A shared repo
402 specified revisions will be cloned or pulled from "source". A shared repo
403 will be created at "dest" and a working copy will be created if "update" is
403 will be created at "dest" and a working copy will be created if "update" is
404 True.
404 True.
405 """
405 """
406 revs = None
406 revs = None
407 if rev:
407 if rev:
408 if not srcpeer.capable('lookup'):
408 if not srcpeer.capable('lookup'):
409 raise error.Abort(_("src repository does not support "
409 raise error.Abort(_("src repository does not support "
410 "revision lookup and so doesn't "
410 "revision lookup and so doesn't "
411 "support clone by revision"))
411 "support clone by revision"))
412
412
413 # TODO this is batchable.
413 # TODO this is batchable.
414 remoterevs = []
414 remoterevs = []
415 for r in rev:
415 for r in rev:
416 with srcpeer.commandexecutor() as e:
416 with srcpeer.commandexecutor() as e:
417 remoterevs.append(e.callcommand('lookup', {
417 remoterevs.append(e.callcommand('lookup', {
418 'key': r,
418 'key': r,
419 }).result())
419 }).result())
420 revs = remoterevs
420 revs = remoterevs
421
421
422 # Obtain a lock before checking for or cloning the pooled repo otherwise
422 # Obtain a lock before checking for or cloning the pooled repo otherwise
423 # 2 clients may race creating or populating it.
423 # 2 clients may race creating or populating it.
424 pooldir = os.path.dirname(sharepath)
424 pooldir = os.path.dirname(sharepath)
425 # lock class requires the directory to exist.
425 # lock class requires the directory to exist.
426 try:
426 try:
427 util.makedir(pooldir, False)
427 util.makedir(pooldir, False)
428 except OSError as e:
428 except OSError as e:
429 if e.errno != errno.EEXIST:
429 if e.errno != errno.EEXIST:
430 raise
430 raise
431
431
432 poolvfs = vfsmod.vfs(pooldir)
432 poolvfs = vfsmod.vfs(pooldir)
433 basename = os.path.basename(sharepath)
433 basename = os.path.basename(sharepath)
434
434
435 with lock.lock(poolvfs, '%s.lock' % basename):
435 with lock.lock(poolvfs, '%s.lock' % basename):
436 if os.path.exists(sharepath):
436 if os.path.exists(sharepath):
437 ui.status(_('(sharing from existing pooled repository %s)\n') %
437 ui.status(_('(sharing from existing pooled repository %s)\n') %
438 basename)
438 basename)
439 else:
439 else:
440 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
440 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
441 # Always use pull mode because hardlinks in share mode don't work
441 # Always use pull mode because hardlinks in share mode don't work
442 # well. Never update because working copies aren't necessary in
442 # well. Never update because working copies aren't necessary in
443 # share mode.
443 # share mode.
444 clone(ui, peeropts, source, dest=sharepath, pull=True,
444 clone(ui, peeropts, source, dest=sharepath, pull=True,
445 revs=rev, update=False, stream=stream)
445 revs=rev, update=False, stream=stream)
446
446
447 # Resolve the value to put in [paths] section for the source.
447 # Resolve the value to put in [paths] section for the source.
448 if islocal(source):
448 if islocal(source):
449 defaultpath = os.path.abspath(util.urllocalpath(source))
449 defaultpath = os.path.abspath(util.urllocalpath(source))
450 else:
450 else:
451 defaultpath = source
451 defaultpath = source
452
452
453 sharerepo = repository(ui, path=sharepath)
453 sharerepo = repository(ui, path=sharepath)
454 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
454 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
455 defaultpath=defaultpath)
455 defaultpath=defaultpath)
456
456
457 # We need to perform a pull against the dest repo to fetch bookmarks
457 # We need to perform a pull against the dest repo to fetch bookmarks
458 # and other non-store data that isn't shared by default. In the case of
458 # and other non-store data that isn't shared by default. In the case of
459 # non-existing shared repo, this means we pull from the remote twice. This
459 # non-existing shared repo, this means we pull from the remote twice. This
460 # is a bit weird. But at the time it was implemented, there wasn't an easy
460 # is a bit weird. But at the time it was implemented, there wasn't an easy
461 # way to pull just non-changegroup data.
461 # way to pull just non-changegroup data.
462 destrepo = repository(ui, path=dest)
462 destrepo = repository(ui, path=dest)
463 exchange.pull(destrepo, srcpeer, heads=revs)
463 exchange.pull(destrepo, srcpeer, heads=revs)
464
464
465 _postshareupdate(destrepo, update)
465 _postshareupdate(destrepo, update)
466
466
467 return srcpeer, peer(ui, peeropts, dest)
467 return srcpeer, peer(ui, peeropts, dest)
468
468
469 # Recomputing branch cache might be slow on big repos,
469 # Recomputing branch cache might be slow on big repos,
470 # so just copy it
470 # so just copy it
471 def _copycache(srcrepo, dstcachedir, fname):
471 def _copycache(srcrepo, dstcachedir, fname):
472 """copy a cache from srcrepo to destcachedir (if it exists)"""
472 """copy a cache from srcrepo to destcachedir (if it exists)"""
473 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
473 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
474 dstbranchcache = os.path.join(dstcachedir, fname)
474 dstbranchcache = os.path.join(dstcachedir, fname)
475 if os.path.exists(srcbranchcache):
475 if os.path.exists(srcbranchcache):
476 if not os.path.exists(dstcachedir):
476 if not os.path.exists(dstcachedir):
477 os.mkdir(dstcachedir)
477 os.mkdir(dstcachedir)
478 util.copyfile(srcbranchcache, dstbranchcache)
478 util.copyfile(srcbranchcache, dstbranchcache)
479
479
480 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
480 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
481 update=True, stream=False, branch=None, shareopts=None,
481 update=True, stream=False, branch=None, shareopts=None,
482 storeincludepats=None, storeexcludepats=None, depth=None):
482 storeincludepats=None, storeexcludepats=None, depth=None):
483 """Make a copy of an existing repository.
483 """Make a copy of an existing repository.
484
484
485 Create a copy of an existing repository in a new directory. The
485 Create a copy of an existing repository in a new directory. The
486 source and destination are URLs, as passed to the repository
486 source and destination are URLs, as passed to the repository
487 function. Returns a pair of repository peers, the source and
487 function. Returns a pair of repository peers, the source and
488 newly created destination.
488 newly created destination.
489
489
490 The location of the source is added to the new repository's
490 The location of the source is added to the new repository's
491 .hg/hgrc file, as the default to be used for future pulls and
491 .hg/hgrc file, as the default to be used for future pulls and
492 pushes.
492 pushes.
493
493
494 If an exception is raised, the partly cloned/updated destination
494 If an exception is raised, the partly cloned/updated destination
495 repository will be deleted.
495 repository will be deleted.
496
496
497 Arguments:
497 Arguments:
498
498
499 source: repository object or URL
499 source: repository object or URL
500
500
501 dest: URL of destination repository to create (defaults to base
501 dest: URL of destination repository to create (defaults to base
502 name of source repository)
502 name of source repository)
503
503
504 pull: always pull from source repository, even in local case or if the
504 pull: always pull from source repository, even in local case or if the
505 server prefers streaming
505 server prefers streaming
506
506
507 stream: stream raw data uncompressed from repository (fast over
507 stream: stream raw data uncompressed from repository (fast over
508 LAN, slow over WAN)
508 LAN, slow over WAN)
509
509
510 revs: revision to clone up to (implies pull=True)
510 revs: revision to clone up to (implies pull=True)
511
511
512 update: update working directory after clone completes, if
512 update: update working directory after clone completes, if
513 destination is local repository (True means update to default rev,
513 destination is local repository (True means update to default rev,
514 anything else is treated as a revision)
514 anything else is treated as a revision)
515
515
516 branch: branches to clone
516 branch: branches to clone
517
517
518 shareopts: dict of options to control auto sharing behavior. The "pool" key
518 shareopts: dict of options to control auto sharing behavior. The "pool" key
519 activates auto sharing mode and defines the directory for stores. The
519 activates auto sharing mode and defines the directory for stores. The
520 "mode" key determines how to construct the directory name of the shared
520 "mode" key determines how to construct the directory name of the shared
521 repository. "identity" means the name is derived from the node of the first
521 repository. "identity" means the name is derived from the node of the first
522 changeset in the repository. "remote" means the name is derived from the
522 changeset in the repository. "remote" means the name is derived from the
523 remote's path/URL. Defaults to "identity."
523 remote's path/URL. Defaults to "identity."
524
524
525 storeincludepats and storeexcludepats: sets of file patterns to include and
525 storeincludepats and storeexcludepats: sets of file patterns to include and
526 exclude in the repository copy, respectively. If not defined, all files
526 exclude in the repository copy, respectively. If not defined, all files
527 will be included (a "full" clone). Otherwise a "narrow" clone containing
527 will be included (a "full" clone). Otherwise a "narrow" clone containing
528 only the requested files will be performed. If ``storeincludepats`` is not
528 only the requested files will be performed. If ``storeincludepats`` is not
529 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
529 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
530 ``path:.``. If both are empty sets, no files will be cloned.
530 ``path:.``. If both are empty sets, no files will be cloned.
531 """
531 """
532
532
533 if isinstance(source, bytes):
533 if isinstance(source, bytes):
534 origsource = ui.expandpath(source)
534 origsource = ui.expandpath(source)
535 source, branches = parseurl(origsource, branch)
535 source, branches = parseurl(origsource, branch)
536 srcpeer = peer(ui, peeropts, source)
536 srcpeer = peer(ui, peeropts, source)
537 else:
537 else:
538 srcpeer = source.peer() # in case we were called with a localrepo
538 srcpeer = source.peer() # in case we were called with a localrepo
539 branches = (None, branch or [])
539 branches = (None, branch or [])
540 origsource = source = srcpeer.url()
540 origsource = source = srcpeer.url()
541 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
541 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
542
542
543 if dest is None:
543 if dest is None:
544 dest = defaultdest(source)
544 dest = defaultdest(source)
545 if dest:
545 if dest:
546 ui.status(_("destination directory: %s\n") % dest)
546 ui.status(_("destination directory: %s\n") % dest)
547 else:
547 else:
548 dest = ui.expandpath(dest)
548 dest = ui.expandpath(dest)
549
549
550 dest = util.urllocalpath(dest)
550 dest = util.urllocalpath(dest)
551 source = util.urllocalpath(source)
551 source = util.urllocalpath(source)
552
552
553 if not dest:
553 if not dest:
554 raise error.Abort(_("empty destination path is not valid"))
554 raise error.Abort(_("empty destination path is not valid"))
555
555
556 destvfs = vfsmod.vfs(dest, expandpath=True)
556 destvfs = vfsmod.vfs(dest, expandpath=True)
557 if destvfs.lexists():
557 if destvfs.lexists():
558 if not destvfs.isdir():
558 if not destvfs.isdir():
559 raise error.Abort(_("destination '%s' already exists") % dest)
559 raise error.Abort(_("destination '%s' already exists") % dest)
560 elif destvfs.listdir():
560 elif destvfs.listdir():
561 raise error.Abort(_("destination '%s' is not empty") % dest)
561 raise error.Abort(_("destination '%s' is not empty") % dest)
562
562
563 createopts = {}
563 createopts = {}
564 narrow = False
564 narrow = False
565
565
566 if storeincludepats is not None:
566 if storeincludepats is not None:
567 narrowspec.validatepatterns(storeincludepats)
567 narrowspec.validatepatterns(storeincludepats)
568 narrow = True
568 narrow = True
569
569
570 if storeexcludepats is not None:
570 if storeexcludepats is not None:
571 narrowspec.validatepatterns(storeexcludepats)
571 narrowspec.validatepatterns(storeexcludepats)
572 narrow = True
572 narrow = True
573
573
574 if narrow:
574 if narrow:
575 # Include everything by default if only exclusion patterns defined.
575 # Include everything by default if only exclusion patterns defined.
576 if storeexcludepats and not storeincludepats:
576 if storeexcludepats and not storeincludepats:
577 storeincludepats = {'path:.'}
577 storeincludepats = {'path:.'}
578
578
579 createopts['narrowfiles'] = True
579 createopts['narrowfiles'] = True
580
580
581 if depth:
582 createopts['shallowfilestore'] = True
583
581 if srcpeer.capable(b'lfs-serve'):
584 if srcpeer.capable(b'lfs-serve'):
582 # Repository creation honors the config if it disabled the extension, so
585 # Repository creation honors the config if it disabled the extension, so
583 # we can't just announce that lfs will be enabled. This check avoids
586 # we can't just announce that lfs will be enabled. This check avoids
584 # saying that lfs will be enabled, and then saying it's an unknown
587 # saying that lfs will be enabled, and then saying it's an unknown
585 # feature. The lfs creation option is set in either case so that a
588 # feature. The lfs creation option is set in either case so that a
586 # requirement is added. If the extension is explicitly disabled but the
589 # requirement is added. If the extension is explicitly disabled but the
587 # requirement is set, the clone aborts early, before transferring any
590 # requirement is set, the clone aborts early, before transferring any
588 # data.
591 # data.
589 createopts['lfs'] = True
592 createopts['lfs'] = True
590
593
591 if extensions.disabledext('lfs'):
594 if extensions.disabledext('lfs'):
592 ui.status(_('(remote is using large file support (lfs), but it is '
595 ui.status(_('(remote is using large file support (lfs), but it is '
593 'explicitly disabled in the local configuration)\n'))
596 'explicitly disabled in the local configuration)\n'))
594 else:
597 else:
595 ui.status(_('(remote is using large file support (lfs); lfs will '
598 ui.status(_('(remote is using large file support (lfs); lfs will '
596 'be enabled for this repository)\n'))
599 'be enabled for this repository)\n'))
597
600
598 shareopts = shareopts or {}
601 shareopts = shareopts or {}
599 sharepool = shareopts.get('pool')
602 sharepool = shareopts.get('pool')
600 sharenamemode = shareopts.get('mode')
603 sharenamemode = shareopts.get('mode')
601 if sharepool and islocal(dest):
604 if sharepool and islocal(dest):
602 sharepath = None
605 sharepath = None
603 if sharenamemode == 'identity':
606 if sharenamemode == 'identity':
604 # Resolve the name from the initial changeset in the remote
607 # Resolve the name from the initial changeset in the remote
605 # repository. This returns nullid when the remote is empty. It
608 # repository. This returns nullid when the remote is empty. It
606 # raises RepoLookupError if revision 0 is filtered or otherwise
609 # raises RepoLookupError if revision 0 is filtered or otherwise
607 # not available. If we fail to resolve, sharing is not enabled.
610 # not available. If we fail to resolve, sharing is not enabled.
608 try:
611 try:
609 with srcpeer.commandexecutor() as e:
612 with srcpeer.commandexecutor() as e:
610 rootnode = e.callcommand('lookup', {
613 rootnode = e.callcommand('lookup', {
611 'key': '0',
614 'key': '0',
612 }).result()
615 }).result()
613
616
614 if rootnode != node.nullid:
617 if rootnode != node.nullid:
615 sharepath = os.path.join(sharepool, node.hex(rootnode))
618 sharepath = os.path.join(sharepool, node.hex(rootnode))
616 else:
619 else:
617 ui.status(_('(not using pooled storage: '
620 ui.status(_('(not using pooled storage: '
618 'remote appears to be empty)\n'))
621 'remote appears to be empty)\n'))
619 except error.RepoLookupError:
622 except error.RepoLookupError:
620 ui.status(_('(not using pooled storage: '
623 ui.status(_('(not using pooled storage: '
621 'unable to resolve identity of remote)\n'))
624 'unable to resolve identity of remote)\n'))
622 elif sharenamemode == 'remote':
625 elif sharenamemode == 'remote':
623 sharepath = os.path.join(
626 sharepath = os.path.join(
624 sharepool, node.hex(hashlib.sha1(source).digest()))
627 sharepool, node.hex(hashlib.sha1(source).digest()))
625 else:
628 else:
626 raise error.Abort(_('unknown share naming mode: %s') %
629 raise error.Abort(_('unknown share naming mode: %s') %
627 sharenamemode)
630 sharenamemode)
628
631
629 # TODO this is a somewhat arbitrary restriction.
632 # TODO this is a somewhat arbitrary restriction.
630 if narrow:
633 if narrow:
631 ui.status(_('(pooled storage not supported for narrow clones)\n'))
634 ui.status(_('(pooled storage not supported for narrow clones)\n'))
632 sharepath = None
635 sharepath = None
633
636
634 if sharepath:
637 if sharepath:
635 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
638 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
636 dest, pull=pull, rev=revs, update=update,
639 dest, pull=pull, rev=revs, update=update,
637 stream=stream)
640 stream=stream)
638
641
639 srclock = destlock = cleandir = None
642 srclock = destlock = cleandir = None
640 srcrepo = srcpeer.local()
643 srcrepo = srcpeer.local()
641 try:
644 try:
642 abspath = origsource
645 abspath = origsource
643 if islocal(origsource):
646 if islocal(origsource):
644 abspath = os.path.abspath(util.urllocalpath(origsource))
647 abspath = os.path.abspath(util.urllocalpath(origsource))
645
648
646 if islocal(dest):
649 if islocal(dest):
647 cleandir = dest
650 cleandir = dest
648
651
649 copy = False
652 copy = False
650 if (srcrepo and srcrepo.cancopy() and islocal(dest)
653 if (srcrepo and srcrepo.cancopy() and islocal(dest)
651 and not phases.hassecret(srcrepo)):
654 and not phases.hassecret(srcrepo)):
652 copy = not pull and not revs
655 copy = not pull and not revs
653
656
654 # TODO this is a somewhat arbitrary restriction.
657 # TODO this is a somewhat arbitrary restriction.
655 if narrow:
658 if narrow:
656 copy = False
659 copy = False
657
660
658 if copy:
661 if copy:
659 try:
662 try:
660 # we use a lock here because if we race with commit, we
663 # we use a lock here because if we race with commit, we
661 # can end up with extra data in the cloned revlogs that's
664 # can end up with extra data in the cloned revlogs that's
662 # not pointed to by changesets, thus causing verify to
665 # not pointed to by changesets, thus causing verify to
663 # fail
666 # fail
664 srclock = srcrepo.lock(wait=False)
667 srclock = srcrepo.lock(wait=False)
665 except error.LockError:
668 except error.LockError:
666 copy = False
669 copy = False
667
670
668 if copy:
671 if copy:
669 srcrepo.hook('preoutgoing', throw=True, source='clone')
672 srcrepo.hook('preoutgoing', throw=True, source='clone')
670 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
673 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
671 if not os.path.exists(dest):
674 if not os.path.exists(dest):
672 util.makedirs(dest)
675 util.makedirs(dest)
673 else:
676 else:
674 # only clean up directories we create ourselves
677 # only clean up directories we create ourselves
675 cleandir = hgdir
678 cleandir = hgdir
676 try:
679 try:
677 destpath = hgdir
680 destpath = hgdir
678 util.makedir(destpath, notindexed=True)
681 util.makedir(destpath, notindexed=True)
679 except OSError as inst:
682 except OSError as inst:
680 if inst.errno == errno.EEXIST:
683 if inst.errno == errno.EEXIST:
681 cleandir = None
684 cleandir = None
682 raise error.Abort(_("destination '%s' already exists")
685 raise error.Abort(_("destination '%s' already exists")
683 % dest)
686 % dest)
684 raise
687 raise
685
688
686 destlock = copystore(ui, srcrepo, destpath)
689 destlock = copystore(ui, srcrepo, destpath)
687 # copy bookmarks over
690 # copy bookmarks over
688 srcbookmarks = srcrepo.vfs.join('bookmarks')
691 srcbookmarks = srcrepo.vfs.join('bookmarks')
689 dstbookmarks = os.path.join(destpath, 'bookmarks')
692 dstbookmarks = os.path.join(destpath, 'bookmarks')
690 if os.path.exists(srcbookmarks):
693 if os.path.exists(srcbookmarks):
691 util.copyfile(srcbookmarks, dstbookmarks)
694 util.copyfile(srcbookmarks, dstbookmarks)
692
695
693 dstcachedir = os.path.join(destpath, 'cache')
696 dstcachedir = os.path.join(destpath, 'cache')
694 for cache in cacheutil.cachetocopy(srcrepo):
697 for cache in cacheutil.cachetocopy(srcrepo):
695 _copycache(srcrepo, dstcachedir, cache)
698 _copycache(srcrepo, dstcachedir, cache)
696
699
697 # we need to re-init the repo after manually copying the data
700 # we need to re-init the repo after manually copying the data
698 # into it
701 # into it
699 destpeer = peer(srcrepo, peeropts, dest)
702 destpeer = peer(srcrepo, peeropts, dest)
700 srcrepo.hook('outgoing', source='clone',
703 srcrepo.hook('outgoing', source='clone',
701 node=node.hex(node.nullid))
704 node=node.hex(node.nullid))
702 else:
705 else:
703 try:
706 try:
704 # only pass ui when no srcrepo
707 # only pass ui when no srcrepo
705 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
708 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
706 createopts=createopts)
709 createopts=createopts)
707 except OSError as inst:
710 except OSError as inst:
708 if inst.errno == errno.EEXIST:
711 if inst.errno == errno.EEXIST:
709 cleandir = None
712 cleandir = None
710 raise error.Abort(_("destination '%s' already exists")
713 raise error.Abort(_("destination '%s' already exists")
711 % dest)
714 % dest)
712 raise
715 raise
713
716
714 if revs:
717 if revs:
715 if not srcpeer.capable('lookup'):
718 if not srcpeer.capable('lookup'):
716 raise error.Abort(_("src repository does not support "
719 raise error.Abort(_("src repository does not support "
717 "revision lookup and so doesn't "
720 "revision lookup and so doesn't "
718 "support clone by revision"))
721 "support clone by revision"))
719
722
720 # TODO this is batchable.
723 # TODO this is batchable.
721 remoterevs = []
724 remoterevs = []
722 for rev in revs:
725 for rev in revs:
723 with srcpeer.commandexecutor() as e:
726 with srcpeer.commandexecutor() as e:
724 remoterevs.append(e.callcommand('lookup', {
727 remoterevs.append(e.callcommand('lookup', {
725 'key': rev,
728 'key': rev,
726 }).result())
729 }).result())
727 revs = remoterevs
730 revs = remoterevs
728
731
729 checkout = revs[0]
732 checkout = revs[0]
730 else:
733 else:
731 revs = None
734 revs = None
732 local = destpeer.local()
735 local = destpeer.local()
733 if local:
736 if local:
734 if narrow:
737 if narrow:
735 with local.lock():
738 with local.lock():
736 local.setnarrowpats(storeincludepats, storeexcludepats)
739 local.setnarrowpats(storeincludepats, storeexcludepats)
737
740
738 u = util.url(abspath)
741 u = util.url(abspath)
739 defaulturl = bytes(u)
742 defaulturl = bytes(u)
740 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
743 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
741 if not stream:
744 if not stream:
742 if pull:
745 if pull:
743 stream = False
746 stream = False
744 else:
747 else:
745 stream = None
748 stream = None
746 # internal config: ui.quietbookmarkmove
749 # internal config: ui.quietbookmarkmove
747 overrides = {('ui', 'quietbookmarkmove'): True}
750 overrides = {('ui', 'quietbookmarkmove'): True}
748 with local.ui.configoverride(overrides, 'clone'):
751 with local.ui.configoverride(overrides, 'clone'):
749 exchange.pull(local, srcpeer, revs,
752 exchange.pull(local, srcpeer, revs,
750 streamclonerequested=stream,
753 streamclonerequested=stream,
751 includepats=storeincludepats,
754 includepats=storeincludepats,
752 excludepats=storeexcludepats,
755 excludepats=storeexcludepats,
753 depth=depth)
756 depth=depth)
754 elif srcrepo:
757 elif srcrepo:
755 # TODO lift restriction once exchange.push() accepts narrow
758 # TODO lift restriction once exchange.push() accepts narrow
756 # push.
759 # push.
757 if narrow:
760 if narrow:
758 raise error.Abort(_('narrow clone not available for '
761 raise error.Abort(_('narrow clone not available for '
759 'remote destinations'))
762 'remote destinations'))
760
763
761 exchange.push(srcrepo, destpeer, revs=revs,
764 exchange.push(srcrepo, destpeer, revs=revs,
762 bookmarks=srcrepo._bookmarks.keys())
765 bookmarks=srcrepo._bookmarks.keys())
763 else:
766 else:
764 raise error.Abort(_("clone from remote to remote not supported")
767 raise error.Abort(_("clone from remote to remote not supported")
765 )
768 )
766
769
767 cleandir = None
770 cleandir = None
768
771
769 destrepo = destpeer.local()
772 destrepo = destpeer.local()
770 if destrepo:
773 if destrepo:
771 template = uimod.samplehgrcs['cloned']
774 template = uimod.samplehgrcs['cloned']
772 u = util.url(abspath)
775 u = util.url(abspath)
773 u.passwd = None
776 u.passwd = None
774 defaulturl = bytes(u)
777 defaulturl = bytes(u)
775 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
778 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
776 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
779 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
777
780
778 if ui.configbool('experimental', 'remotenames'):
781 if ui.configbool('experimental', 'remotenames'):
779 logexchange.pullremotenames(destrepo, srcpeer)
782 logexchange.pullremotenames(destrepo, srcpeer)
780
783
781 if update:
784 if update:
782 if update is not True:
785 if update is not True:
783 with srcpeer.commandexecutor() as e:
786 with srcpeer.commandexecutor() as e:
784 checkout = e.callcommand('lookup', {
787 checkout = e.callcommand('lookup', {
785 'key': update,
788 'key': update,
786 }).result()
789 }).result()
787
790
788 uprev = None
791 uprev = None
789 status = None
792 status = None
790 if checkout is not None:
793 if checkout is not None:
791 # Some extensions (at least hg-git and hg-subversion) have
794 # Some extensions (at least hg-git and hg-subversion) have
792 # a peer.lookup() implementation that returns a name instead
795 # a peer.lookup() implementation that returns a name instead
793 # of a nodeid. We work around it here until we've figured
796 # of a nodeid. We work around it here until we've figured
794 # out a better solution.
797 # out a better solution.
795 if len(checkout) == 20 and checkout in destrepo:
798 if len(checkout) == 20 and checkout in destrepo:
796 uprev = checkout
799 uprev = checkout
797 elif scmutil.isrevsymbol(destrepo, checkout):
800 elif scmutil.isrevsymbol(destrepo, checkout):
798 uprev = scmutil.revsymbol(destrepo, checkout).node()
801 uprev = scmutil.revsymbol(destrepo, checkout).node()
799 else:
802 else:
800 if update is not True:
803 if update is not True:
801 try:
804 try:
802 uprev = destrepo.lookup(update)
805 uprev = destrepo.lookup(update)
803 except error.RepoLookupError:
806 except error.RepoLookupError:
804 pass
807 pass
805 if uprev is None:
808 if uprev is None:
806 try:
809 try:
807 uprev = destrepo._bookmarks['@']
810 uprev = destrepo._bookmarks['@']
808 update = '@'
811 update = '@'
809 bn = destrepo[uprev].branch()
812 bn = destrepo[uprev].branch()
810 if bn == 'default':
813 if bn == 'default':
811 status = _("updating to bookmark @\n")
814 status = _("updating to bookmark @\n")
812 else:
815 else:
813 status = (_("updating to bookmark @ on branch %s\n")
816 status = (_("updating to bookmark @ on branch %s\n")
814 % bn)
817 % bn)
815 except KeyError:
818 except KeyError:
816 try:
819 try:
817 uprev = destrepo.branchtip('default')
820 uprev = destrepo.branchtip('default')
818 except error.RepoLookupError:
821 except error.RepoLookupError:
819 uprev = destrepo.lookup('tip')
822 uprev = destrepo.lookup('tip')
820 if not status:
823 if not status:
821 bn = destrepo[uprev].branch()
824 bn = destrepo[uprev].branch()
822 status = _("updating to branch %s\n") % bn
825 status = _("updating to branch %s\n") % bn
823 destrepo.ui.status(status)
826 destrepo.ui.status(status)
824 _update(destrepo, uprev)
827 _update(destrepo, uprev)
825 if update in destrepo._bookmarks:
828 if update in destrepo._bookmarks:
826 bookmarks.activate(destrepo, update)
829 bookmarks.activate(destrepo, update)
827 finally:
830 finally:
828 release(srclock, destlock)
831 release(srclock, destlock)
829 if cleandir is not None:
832 if cleandir is not None:
830 shutil.rmtree(cleandir, True)
833 shutil.rmtree(cleandir, True)
831 if srcpeer is not None:
834 if srcpeer is not None:
832 srcpeer.close()
835 srcpeer.close()
833 return srcpeer, destpeer
836 return srcpeer, destpeer
834
837
835 def _showstats(repo, stats, quietempty=False):
838 def _showstats(repo, stats, quietempty=False):
836 if quietempty and stats.isempty():
839 if quietempty and stats.isempty():
837 return
840 return
838 repo.ui.status(_("%d files updated, %d files merged, "
841 repo.ui.status(_("%d files updated, %d files merged, "
839 "%d files removed, %d files unresolved\n") % (
842 "%d files removed, %d files unresolved\n") % (
840 stats.updatedcount, stats.mergedcount,
843 stats.updatedcount, stats.mergedcount,
841 stats.removedcount, stats.unresolvedcount))
844 stats.removedcount, stats.unresolvedcount))
842
845
843 def updaterepo(repo, node, overwrite, updatecheck=None):
846 def updaterepo(repo, node, overwrite, updatecheck=None):
844 """Update the working directory to node.
847 """Update the working directory to node.
845
848
846 When overwrite is set, changes are clobbered, merged else
849 When overwrite is set, changes are clobbered, merged else
847
850
848 returns stats (see pydoc mercurial.merge.applyupdates)"""
851 returns stats (see pydoc mercurial.merge.applyupdates)"""
849 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
852 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
850 labels=['working copy', 'destination'],
853 labels=['working copy', 'destination'],
851 updatecheck=updatecheck)
854 updatecheck=updatecheck)
852
855
853 def update(repo, node, quietempty=False, updatecheck=None):
856 def update(repo, node, quietempty=False, updatecheck=None):
854 """update the working directory to node"""
857 """update the working directory to node"""
855 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
858 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
856 _showstats(repo, stats, quietempty)
859 _showstats(repo, stats, quietempty)
857 if stats.unresolvedcount:
860 if stats.unresolvedcount:
858 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
861 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
859 return stats.unresolvedcount > 0
862 return stats.unresolvedcount > 0
860
863
861 # naming conflict in clone()
864 # naming conflict in clone()
862 _update = update
865 _update = update
863
866
864 def clean(repo, node, show_stats=True, quietempty=False):
867 def clean(repo, node, show_stats=True, quietempty=False):
865 """forcibly switch the working directory to node, clobbering changes"""
868 """forcibly switch the working directory to node, clobbering changes"""
866 stats = updaterepo(repo, node, True)
869 stats = updaterepo(repo, node, True)
867 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
870 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
868 if show_stats:
871 if show_stats:
869 _showstats(repo, stats, quietempty)
872 _showstats(repo, stats, quietempty)
870 return stats.unresolvedcount > 0
873 return stats.unresolvedcount > 0
871
874
872 # naming conflict in updatetotally()
875 # naming conflict in updatetotally()
873 _clean = clean
876 _clean = clean
874
877
875 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
878 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
876 """Update the working directory with extra care for non-file components
879 """Update the working directory with extra care for non-file components
877
880
878 This takes care of non-file components below:
881 This takes care of non-file components below:
879
882
880 :bookmark: might be advanced or (in)activated
883 :bookmark: might be advanced or (in)activated
881
884
882 This takes arguments below:
885 This takes arguments below:
883
886
884 :checkout: to which revision the working directory is updated
887 :checkout: to which revision the working directory is updated
885 :brev: a name, which might be a bookmark to be activated after updating
888 :brev: a name, which might be a bookmark to be activated after updating
886 :clean: whether changes in the working directory can be discarded
889 :clean: whether changes in the working directory can be discarded
887 :updatecheck: how to deal with a dirty working directory
890 :updatecheck: how to deal with a dirty working directory
888
891
889 Valid values for updatecheck are (None => linear):
892 Valid values for updatecheck are (None => linear):
890
893
891 * abort: abort if the working directory is dirty
894 * abort: abort if the working directory is dirty
892 * none: don't check (merge working directory changes into destination)
895 * none: don't check (merge working directory changes into destination)
893 * linear: check that update is linear before merging working directory
896 * linear: check that update is linear before merging working directory
894 changes into destination
897 changes into destination
895 * noconflict: check that the update does not result in file merges
898 * noconflict: check that the update does not result in file merges
896
899
897 This returns whether conflict is detected at updating or not.
900 This returns whether conflict is detected at updating or not.
898 """
901 """
899 if updatecheck is None:
902 if updatecheck is None:
900 updatecheck = ui.config('commands', 'update.check')
903 updatecheck = ui.config('commands', 'update.check')
901 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
904 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
902 # If not configured, or invalid value configured
905 # If not configured, or invalid value configured
903 updatecheck = 'linear'
906 updatecheck = 'linear'
904 with repo.wlock():
907 with repo.wlock():
905 movemarkfrom = None
908 movemarkfrom = None
906 warndest = False
909 warndest = False
907 if checkout is None:
910 if checkout is None:
908 updata = destutil.destupdate(repo, clean=clean)
911 updata = destutil.destupdate(repo, clean=clean)
909 checkout, movemarkfrom, brev = updata
912 checkout, movemarkfrom, brev = updata
910 warndest = True
913 warndest = True
911
914
912 if clean:
915 if clean:
913 ret = _clean(repo, checkout)
916 ret = _clean(repo, checkout)
914 else:
917 else:
915 if updatecheck == 'abort':
918 if updatecheck == 'abort':
916 cmdutil.bailifchanged(repo, merge=False)
919 cmdutil.bailifchanged(repo, merge=False)
917 updatecheck = 'none'
920 updatecheck = 'none'
918 ret = _update(repo, checkout, updatecheck=updatecheck)
921 ret = _update(repo, checkout, updatecheck=updatecheck)
919
922
920 if not ret and movemarkfrom:
923 if not ret and movemarkfrom:
921 if movemarkfrom == repo['.'].node():
924 if movemarkfrom == repo['.'].node():
922 pass # no-op update
925 pass # no-op update
923 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
926 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
924 b = ui.label(repo._activebookmark, 'bookmarks.active')
927 b = ui.label(repo._activebookmark, 'bookmarks.active')
925 ui.status(_("updating bookmark %s\n") % b)
928 ui.status(_("updating bookmark %s\n") % b)
926 else:
929 else:
927 # this can happen with a non-linear update
930 # this can happen with a non-linear update
928 b = ui.label(repo._activebookmark, 'bookmarks')
931 b = ui.label(repo._activebookmark, 'bookmarks')
929 ui.status(_("(leaving bookmark %s)\n") % b)
932 ui.status(_("(leaving bookmark %s)\n") % b)
930 bookmarks.deactivate(repo)
933 bookmarks.deactivate(repo)
931 elif brev in repo._bookmarks:
934 elif brev in repo._bookmarks:
932 if brev != repo._activebookmark:
935 if brev != repo._activebookmark:
933 b = ui.label(brev, 'bookmarks.active')
936 b = ui.label(brev, 'bookmarks.active')
934 ui.status(_("(activating bookmark %s)\n") % b)
937 ui.status(_("(activating bookmark %s)\n") % b)
935 bookmarks.activate(repo, brev)
938 bookmarks.activate(repo, brev)
936 elif brev:
939 elif brev:
937 if repo._activebookmark:
940 if repo._activebookmark:
938 b = ui.label(repo._activebookmark, 'bookmarks')
941 b = ui.label(repo._activebookmark, 'bookmarks')
939 ui.status(_("(leaving bookmark %s)\n") % b)
942 ui.status(_("(leaving bookmark %s)\n") % b)
940 bookmarks.deactivate(repo)
943 bookmarks.deactivate(repo)
941
944
942 if warndest:
945 if warndest:
943 destutil.statusotherdests(ui, repo)
946 destutil.statusotherdests(ui, repo)
944
947
945 return ret
948 return ret
946
949
947 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
950 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
948 abort=False):
951 abort=False):
949 """Branch merge with node, resolving changes. Return true if any
952 """Branch merge with node, resolving changes. Return true if any
950 unresolved conflicts."""
953 unresolved conflicts."""
951 if not abort:
954 if not abort:
952 stats = mergemod.update(repo, node, branchmerge=True, force=force,
955 stats = mergemod.update(repo, node, branchmerge=True, force=force,
953 mergeforce=mergeforce, labels=labels)
956 mergeforce=mergeforce, labels=labels)
954 else:
957 else:
955 ms = mergemod.mergestate.read(repo)
958 ms = mergemod.mergestate.read(repo)
956 if ms.active():
959 if ms.active():
957 # there were conflicts
960 # there were conflicts
958 node = ms.localctx.hex()
961 node = ms.localctx.hex()
959 else:
962 else:
960 # there were no conficts, mergestate was not stored
963 # there were no conficts, mergestate was not stored
961 node = repo['.'].hex()
964 node = repo['.'].hex()
962
965
963 repo.ui.status(_("aborting the merge, updating back to"
966 repo.ui.status(_("aborting the merge, updating back to"
964 " %s\n") % node[:12])
967 " %s\n") % node[:12])
965 stats = mergemod.update(repo, node, branchmerge=False, force=True,
968 stats = mergemod.update(repo, node, branchmerge=False, force=True,
966 labels=labels)
969 labels=labels)
967
970
968 _showstats(repo, stats)
971 _showstats(repo, stats)
969 if stats.unresolvedcount:
972 if stats.unresolvedcount:
970 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
973 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
971 "or 'hg merge --abort' to abandon\n"))
974 "or 'hg merge --abort' to abandon\n"))
972 elif remind and not abort:
975 elif remind and not abort:
973 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
976 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
974 return stats.unresolvedcount > 0
977 return stats.unresolvedcount > 0
975
978
976 def _incoming(displaychlist, subreporecurse, ui, repo, source,
979 def _incoming(displaychlist, subreporecurse, ui, repo, source,
977 opts, buffered=False):
980 opts, buffered=False):
978 """
981 """
979 Helper for incoming / gincoming.
982 Helper for incoming / gincoming.
980 displaychlist gets called with
983 displaychlist gets called with
981 (remoterepo, incomingchangesetlist, displayer) parameters,
984 (remoterepo, incomingchangesetlist, displayer) parameters,
982 and is supposed to contain only code that can't be unified.
985 and is supposed to contain only code that can't be unified.
983 """
986 """
984 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
987 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
985 other = peer(repo, opts, source)
988 other = peer(repo, opts, source)
986 ui.status(_('comparing with %s\n') % util.hidepassword(source))
989 ui.status(_('comparing with %s\n') % util.hidepassword(source))
987 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
990 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
988
991
989 if revs:
992 if revs:
990 revs = [other.lookup(rev) for rev in revs]
993 revs = [other.lookup(rev) for rev in revs]
991 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
994 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
992 revs, opts["bundle"], opts["force"])
995 revs, opts["bundle"], opts["force"])
993 try:
996 try:
994 if not chlist:
997 if not chlist:
995 ui.status(_("no changes found\n"))
998 ui.status(_("no changes found\n"))
996 return subreporecurse()
999 return subreporecurse()
997 ui.pager('incoming')
1000 ui.pager('incoming')
998 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1001 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
999 buffered=buffered)
1002 buffered=buffered)
1000 displaychlist(other, chlist, displayer)
1003 displaychlist(other, chlist, displayer)
1001 displayer.close()
1004 displayer.close()
1002 finally:
1005 finally:
1003 cleanupfn()
1006 cleanupfn()
1004 subreporecurse()
1007 subreporecurse()
1005 return 0 # exit code is zero since we found incoming changes
1008 return 0 # exit code is zero since we found incoming changes
1006
1009
1007 def incoming(ui, repo, source, opts):
1010 def incoming(ui, repo, source, opts):
1008 def subreporecurse():
1011 def subreporecurse():
1009 ret = 1
1012 ret = 1
1010 if opts.get('subrepos'):
1013 if opts.get('subrepos'):
1011 ctx = repo[None]
1014 ctx = repo[None]
1012 for subpath in sorted(ctx.substate):
1015 for subpath in sorted(ctx.substate):
1013 sub = ctx.sub(subpath)
1016 sub = ctx.sub(subpath)
1014 ret = min(ret, sub.incoming(ui, source, opts))
1017 ret = min(ret, sub.incoming(ui, source, opts))
1015 return ret
1018 return ret
1016
1019
1017 def display(other, chlist, displayer):
1020 def display(other, chlist, displayer):
1018 limit = logcmdutil.getlimit(opts)
1021 limit = logcmdutil.getlimit(opts)
1019 if opts.get('newest_first'):
1022 if opts.get('newest_first'):
1020 chlist.reverse()
1023 chlist.reverse()
1021 count = 0
1024 count = 0
1022 for n in chlist:
1025 for n in chlist:
1023 if limit is not None and count >= limit:
1026 if limit is not None and count >= limit:
1024 break
1027 break
1025 parents = [p for p in other.changelog.parents(n) if p != nullid]
1028 parents = [p for p in other.changelog.parents(n) if p != nullid]
1026 if opts.get('no_merges') and len(parents) == 2:
1029 if opts.get('no_merges') and len(parents) == 2:
1027 continue
1030 continue
1028 count += 1
1031 count += 1
1029 displayer.show(other[n])
1032 displayer.show(other[n])
1030 return _incoming(display, subreporecurse, ui, repo, source, opts)
1033 return _incoming(display, subreporecurse, ui, repo, source, opts)
1031
1034
1032 def _outgoing(ui, repo, dest, opts):
1035 def _outgoing(ui, repo, dest, opts):
1033 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1036 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1034 if not path:
1037 if not path:
1035 raise error.Abort(_('default repository not configured!'),
1038 raise error.Abort(_('default repository not configured!'),
1036 hint=_("see 'hg help config.paths'"))
1039 hint=_("see 'hg help config.paths'"))
1037 dest = path.pushloc or path.loc
1040 dest = path.pushloc or path.loc
1038 branches = path.branch, opts.get('branch') or []
1041 branches = path.branch, opts.get('branch') or []
1039
1042
1040 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1043 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1041 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1044 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1042 if revs:
1045 if revs:
1043 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1046 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1044
1047
1045 other = peer(repo, opts, dest)
1048 other = peer(repo, opts, dest)
1046 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1049 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1047 force=opts.get('force'))
1050 force=opts.get('force'))
1048 o = outgoing.missing
1051 o = outgoing.missing
1049 if not o:
1052 if not o:
1050 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1053 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1051 return o, other
1054 return o, other
1052
1055
1053 def outgoing(ui, repo, dest, opts):
1056 def outgoing(ui, repo, dest, opts):
1054 def recurse():
1057 def recurse():
1055 ret = 1
1058 ret = 1
1056 if opts.get('subrepos'):
1059 if opts.get('subrepos'):
1057 ctx = repo[None]
1060 ctx = repo[None]
1058 for subpath in sorted(ctx.substate):
1061 for subpath in sorted(ctx.substate):
1059 sub = ctx.sub(subpath)
1062 sub = ctx.sub(subpath)
1060 ret = min(ret, sub.outgoing(ui, dest, opts))
1063 ret = min(ret, sub.outgoing(ui, dest, opts))
1061 return ret
1064 return ret
1062
1065
1063 limit = logcmdutil.getlimit(opts)
1066 limit = logcmdutil.getlimit(opts)
1064 o, other = _outgoing(ui, repo, dest, opts)
1067 o, other = _outgoing(ui, repo, dest, opts)
1065 if not o:
1068 if not o:
1066 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1069 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1067 return recurse()
1070 return recurse()
1068
1071
1069 if opts.get('newest_first'):
1072 if opts.get('newest_first'):
1070 o.reverse()
1073 o.reverse()
1071 ui.pager('outgoing')
1074 ui.pager('outgoing')
1072 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1075 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1073 count = 0
1076 count = 0
1074 for n in o:
1077 for n in o:
1075 if limit is not None and count >= limit:
1078 if limit is not None and count >= limit:
1076 break
1079 break
1077 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1080 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1078 if opts.get('no_merges') and len(parents) == 2:
1081 if opts.get('no_merges') and len(parents) == 2:
1079 continue
1082 continue
1080 count += 1
1083 count += 1
1081 displayer.show(repo[n])
1084 displayer.show(repo[n])
1082 displayer.close()
1085 displayer.close()
1083 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1086 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1084 recurse()
1087 recurse()
1085 return 0 # exit code is zero since we found outgoing changes
1088 return 0 # exit code is zero since we found outgoing changes
1086
1089
1087 def verify(repo):
1090 def verify(repo):
1088 """verify the consistency of a repository"""
1091 """verify the consistency of a repository"""
1089 ret = verifymod.verify(repo)
1092 ret = verifymod.verify(repo)
1090
1093
1091 # Broken subrepo references in hidden csets don't seem worth worrying about,
1094 # Broken subrepo references in hidden csets don't seem worth worrying about,
1092 # since they can't be pushed/pulled, and --hidden can be used if they are a
1095 # since they can't be pushed/pulled, and --hidden can be used if they are a
1093 # concern.
1096 # concern.
1094
1097
1095 # pathto() is needed for -R case
1098 # pathto() is needed for -R case
1096 revs = repo.revs("filelog(%s)",
1099 revs = repo.revs("filelog(%s)",
1097 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1100 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1098
1101
1099 if revs:
1102 if revs:
1100 repo.ui.status(_('checking subrepo links\n'))
1103 repo.ui.status(_('checking subrepo links\n'))
1101 for rev in revs:
1104 for rev in revs:
1102 ctx = repo[rev]
1105 ctx = repo[rev]
1103 try:
1106 try:
1104 for subpath in ctx.substate:
1107 for subpath in ctx.substate:
1105 try:
1108 try:
1106 ret = (ctx.sub(subpath, allowcreate=False).verify()
1109 ret = (ctx.sub(subpath, allowcreate=False).verify()
1107 or ret)
1110 or ret)
1108 except error.RepoError as e:
1111 except error.RepoError as e:
1109 repo.ui.warn(('%d: %s\n') % (rev, e))
1112 repo.ui.warn(('%d: %s\n') % (rev, e))
1110 except Exception:
1113 except Exception:
1111 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1114 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1112 node.short(ctx.node()))
1115 node.short(ctx.node()))
1113
1116
1114 return ret
1117 return ret
1115
1118
1116 def remoteui(src, opts):
1119 def remoteui(src, opts):
1117 'build a remote ui from ui or repo and opts'
1120 'build a remote ui from ui or repo and opts'
1118 if util.safehasattr(src, 'baseui'): # looks like a repository
1121 if util.safehasattr(src, 'baseui'): # looks like a repository
1119 dst = src.baseui.copy() # drop repo-specific config
1122 dst = src.baseui.copy() # drop repo-specific config
1120 src = src.ui # copy target options from repo
1123 src = src.ui # copy target options from repo
1121 else: # assume it's a global ui object
1124 else: # assume it's a global ui object
1122 dst = src.copy() # keep all global options
1125 dst = src.copy() # keep all global options
1123
1126
1124 # copy ssh-specific options
1127 # copy ssh-specific options
1125 for o in 'ssh', 'remotecmd':
1128 for o in 'ssh', 'remotecmd':
1126 v = opts.get(o) or src.config('ui', o)
1129 v = opts.get(o) or src.config('ui', o)
1127 if v:
1130 if v:
1128 dst.setconfig("ui", o, v, 'copied')
1131 dst.setconfig("ui", o, v, 'copied')
1129
1132
1130 # copy bundle-specific options
1133 # copy bundle-specific options
1131 r = src.config('bundle', 'mainreporoot')
1134 r = src.config('bundle', 'mainreporoot')
1132 if r:
1135 if r:
1133 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1136 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1134
1137
1135 # copy selected local settings to the remote ui
1138 # copy selected local settings to the remote ui
1136 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1139 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1137 for key, val in src.configitems(sect):
1140 for key, val in src.configitems(sect):
1138 dst.setconfig(sect, key, val, 'copied')
1141 dst.setconfig(sect, key, val, 'copied')
1139 v = src.config('web', 'cacerts')
1142 v = src.config('web', 'cacerts')
1140 if v:
1143 if v:
1141 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1144 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1142
1145
1143 return dst
1146 return dst
1144
1147
1145 # Files of interest
1148 # Files of interest
1146 # Used to check if the repository has changed looking at mtime and size of
1149 # Used to check if the repository has changed looking at mtime and size of
1147 # these files.
1150 # these files.
1148 foi = [('spath', '00changelog.i'),
1151 foi = [('spath', '00changelog.i'),
1149 ('spath', 'phaseroots'), # ! phase can change content at the same size
1152 ('spath', 'phaseroots'), # ! phase can change content at the same size
1150 ('spath', 'obsstore'),
1153 ('spath', 'obsstore'),
1151 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1154 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1152 ]
1155 ]
1153
1156
1154 class cachedlocalrepo(object):
1157 class cachedlocalrepo(object):
1155 """Holds a localrepository that can be cached and reused."""
1158 """Holds a localrepository that can be cached and reused."""
1156
1159
1157 def __init__(self, repo):
1160 def __init__(self, repo):
1158 """Create a new cached repo from an existing repo.
1161 """Create a new cached repo from an existing repo.
1159
1162
1160 We assume the passed in repo was recently created. If the
1163 We assume the passed in repo was recently created. If the
1161 repo has changed between when it was created and when it was
1164 repo has changed between when it was created and when it was
1162 turned into a cache, it may not refresh properly.
1165 turned into a cache, it may not refresh properly.
1163 """
1166 """
1164 assert isinstance(repo, localrepo.localrepository)
1167 assert isinstance(repo, localrepo.localrepository)
1165 self._repo = repo
1168 self._repo = repo
1166 self._state, self.mtime = self._repostate()
1169 self._state, self.mtime = self._repostate()
1167 self._filtername = repo.filtername
1170 self._filtername = repo.filtername
1168
1171
1169 def fetch(self):
1172 def fetch(self):
1170 """Refresh (if necessary) and return a repository.
1173 """Refresh (if necessary) and return a repository.
1171
1174
1172 If the cached instance is out of date, it will be recreated
1175 If the cached instance is out of date, it will be recreated
1173 automatically and returned.
1176 automatically and returned.
1174
1177
1175 Returns a tuple of the repo and a boolean indicating whether a new
1178 Returns a tuple of the repo and a boolean indicating whether a new
1176 repo instance was created.
1179 repo instance was created.
1177 """
1180 """
1178 # We compare the mtimes and sizes of some well-known files to
1181 # We compare the mtimes and sizes of some well-known files to
1179 # determine if the repo changed. This is not precise, as mtimes
1182 # determine if the repo changed. This is not precise, as mtimes
1180 # are susceptible to clock skew and imprecise filesystems and
1183 # are susceptible to clock skew and imprecise filesystems and
1181 # file content can change while maintaining the same size.
1184 # file content can change while maintaining the same size.
1182
1185
1183 state, mtime = self._repostate()
1186 state, mtime = self._repostate()
1184 if state == self._state:
1187 if state == self._state:
1185 return self._repo, False
1188 return self._repo, False
1186
1189
1187 repo = repository(self._repo.baseui, self._repo.url())
1190 repo = repository(self._repo.baseui, self._repo.url())
1188 if self._filtername:
1191 if self._filtername:
1189 self._repo = repo.filtered(self._filtername)
1192 self._repo = repo.filtered(self._filtername)
1190 else:
1193 else:
1191 self._repo = repo.unfiltered()
1194 self._repo = repo.unfiltered()
1192 self._state = state
1195 self._state = state
1193 self.mtime = mtime
1196 self.mtime = mtime
1194
1197
1195 return self._repo, True
1198 return self._repo, True
1196
1199
1197 def _repostate(self):
1200 def _repostate(self):
1198 state = []
1201 state = []
1199 maxmtime = -1
1202 maxmtime = -1
1200 for attr, fname in foi:
1203 for attr, fname in foi:
1201 prefix = getattr(self._repo, attr)
1204 prefix = getattr(self._repo, attr)
1202 p = os.path.join(prefix, fname)
1205 p = os.path.join(prefix, fname)
1203 try:
1206 try:
1204 st = os.stat(p)
1207 st = os.stat(p)
1205 except OSError:
1208 except OSError:
1206 st = os.stat(prefix)
1209 st = os.stat(prefix)
1207 state.append((st[stat.ST_MTIME], st.st_size))
1210 state.append((st[stat.ST_MTIME], st.st_size))
1208 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1211 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1209
1212
1210 return tuple(state), maxmtime
1213 return tuple(state), maxmtime
1211
1214
1212 def copy(self):
1215 def copy(self):
1213 """Obtain a copy of this class instance.
1216 """Obtain a copy of this class instance.
1214
1217
1215 A new localrepository instance is obtained. The new instance should be
1218 A new localrepository instance is obtained. The new instance should be
1216 completely independent of the original.
1219 completely independent of the original.
1217 """
1220 """
1218 repo = repository(self._repo.baseui, self._repo.origroot)
1221 repo = repository(self._repo.baseui, self._repo.origroot)
1219 if self._filtername:
1222 if self._filtername:
1220 repo = repo.filtered(self._filtername)
1223 repo = repo.filtered(self._filtername)
1221 else:
1224 else:
1222 repo = repo.unfiltered()
1225 repo = repo.unfiltered()
1223 c = cachedlocalrepo(repo)
1226 c = cachedlocalrepo(repo)
1224 c._state = self._state
1227 c._state = self._state
1225 c.mtime = self.mtime
1228 c.mtime = self.mtime
1226 return c
1229 return c
@@ -1,3040 +1,3044 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
95 def __set__(self, repo, value):
95 def __set__(self, repo, value):
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
97 def __delete__(self, repo):
97 def __delete__(self, repo):
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
99
99
100 class repofilecache(_basefilecache):
100 class repofilecache(_basefilecache):
101 """filecache for files in .hg but outside of .hg/store"""
101 """filecache for files in .hg but outside of .hg/store"""
102 def __init__(self, *paths):
102 def __init__(self, *paths):
103 super(repofilecache, self).__init__(*paths)
103 super(repofilecache, self).__init__(*paths)
104 for path in paths:
104 for path in paths:
105 _cachedfiles.add((path, 'plain'))
105 _cachedfiles.add((path, 'plain'))
106
106
107 def join(self, obj, fname):
107 def join(self, obj, fname):
108 return obj.vfs.join(fname)
108 return obj.vfs.join(fname)
109
109
110 class storecache(_basefilecache):
110 class storecache(_basefilecache):
111 """filecache for files in the store"""
111 """filecache for files in the store"""
112 def __init__(self, *paths):
112 def __init__(self, *paths):
113 super(storecache, self).__init__(*paths)
113 super(storecache, self).__init__(*paths)
114 for path in paths:
114 for path in paths:
115 _cachedfiles.add((path, ''))
115 _cachedfiles.add((path, ''))
116
116
117 def join(self, obj, fname):
117 def join(self, obj, fname):
118 return obj.sjoin(fname)
118 return obj.sjoin(fname)
119
119
120 def isfilecached(repo, name):
120 def isfilecached(repo, name):
121 """check if a repo has already cached "name" filecache-ed property
121 """check if a repo has already cached "name" filecache-ed property
122
122
123 This returns (cachedobj-or-None, iscached) tuple.
123 This returns (cachedobj-or-None, iscached) tuple.
124 """
124 """
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
126 if not cacheentry:
126 if not cacheentry:
127 return None, False
127 return None, False
128 return cacheentry.obj, True
128 return cacheentry.obj, True
129
129
130 class unfilteredpropertycache(util.propertycache):
130 class unfilteredpropertycache(util.propertycache):
131 """propertycache that apply to unfiltered repo only"""
131 """propertycache that apply to unfiltered repo only"""
132
132
133 def __get__(self, repo, type=None):
133 def __get__(self, repo, type=None):
134 unfi = repo.unfiltered()
134 unfi = repo.unfiltered()
135 if unfi is repo:
135 if unfi is repo:
136 return super(unfilteredpropertycache, self).__get__(unfi)
136 return super(unfilteredpropertycache, self).__get__(unfi)
137 return getattr(unfi, self.name)
137 return getattr(unfi, self.name)
138
138
139 class filteredpropertycache(util.propertycache):
139 class filteredpropertycache(util.propertycache):
140 """propertycache that must take filtering in account"""
140 """propertycache that must take filtering in account"""
141
141
142 def cachevalue(self, obj, value):
142 def cachevalue(self, obj, value):
143 object.__setattr__(obj, self.name, value)
143 object.__setattr__(obj, self.name, value)
144
144
145
145
146 def hasunfilteredcache(repo, name):
146 def hasunfilteredcache(repo, name):
147 """check if a repo has an unfilteredpropertycache value for <name>"""
147 """check if a repo has an unfilteredpropertycache value for <name>"""
148 return name in vars(repo.unfiltered())
148 return name in vars(repo.unfiltered())
149
149
150 def unfilteredmethod(orig):
150 def unfilteredmethod(orig):
151 """decorate method that always need to be run on unfiltered version"""
151 """decorate method that always need to be run on unfiltered version"""
152 def wrapper(repo, *args, **kwargs):
152 def wrapper(repo, *args, **kwargs):
153 return orig(repo.unfiltered(), *args, **kwargs)
153 return orig(repo.unfiltered(), *args, **kwargs)
154 return wrapper
154 return wrapper
155
155
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
157 'unbundle'}
157 'unbundle'}
158 legacycaps = moderncaps.union({'changegroupsubset'})
158 legacycaps = moderncaps.union({'changegroupsubset'})
159
159
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
161 class localcommandexecutor(object):
161 class localcommandexecutor(object):
162 def __init__(self, peer):
162 def __init__(self, peer):
163 self._peer = peer
163 self._peer = peer
164 self._sent = False
164 self._sent = False
165 self._closed = False
165 self._closed = False
166
166
167 def __enter__(self):
167 def __enter__(self):
168 return self
168 return self
169
169
170 def __exit__(self, exctype, excvalue, exctb):
170 def __exit__(self, exctype, excvalue, exctb):
171 self.close()
171 self.close()
172
172
173 def callcommand(self, command, args):
173 def callcommand(self, command, args):
174 if self._sent:
174 if self._sent:
175 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
176 'sendcommands()')
176 'sendcommands()')
177
177
178 if self._closed:
178 if self._closed:
179 raise error.ProgrammingError('callcommand() cannot be used after '
179 raise error.ProgrammingError('callcommand() cannot be used after '
180 'close()')
180 'close()')
181
181
182 # We don't need to support anything fancy. Just call the named
182 # We don't need to support anything fancy. Just call the named
183 # method on the peer and return a resolved future.
183 # method on the peer and return a resolved future.
184 fn = getattr(self._peer, pycompat.sysstr(command))
184 fn = getattr(self._peer, pycompat.sysstr(command))
185
185
186 f = pycompat.futures.Future()
186 f = pycompat.futures.Future()
187
187
188 try:
188 try:
189 result = fn(**pycompat.strkwargs(args))
189 result = fn(**pycompat.strkwargs(args))
190 except Exception:
190 except Exception:
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
192 else:
192 else:
193 f.set_result(result)
193 f.set_result(result)
194
194
195 return f
195 return f
196
196
197 def sendcommands(self):
197 def sendcommands(self):
198 self._sent = True
198 self._sent = True
199
199
200 def close(self):
200 def close(self):
201 self._closed = True
201 self._closed = True
202
202
203 @interfaceutil.implementer(repository.ipeercommands)
203 @interfaceutil.implementer(repository.ipeercommands)
204 class localpeer(repository.peer):
204 class localpeer(repository.peer):
205 '''peer for a local repo; reflects only the most recent API'''
205 '''peer for a local repo; reflects only the most recent API'''
206
206
207 def __init__(self, repo, caps=None):
207 def __init__(self, repo, caps=None):
208 super(localpeer, self).__init__()
208 super(localpeer, self).__init__()
209
209
210 if caps is None:
210 if caps is None:
211 caps = moderncaps.copy()
211 caps = moderncaps.copy()
212 self._repo = repo.filtered('served')
212 self._repo = repo.filtered('served')
213 self.ui = repo.ui
213 self.ui = repo.ui
214 self._caps = repo._restrictcapabilities(caps)
214 self._caps = repo._restrictcapabilities(caps)
215
215
216 # Begin of _basepeer interface.
216 # Begin of _basepeer interface.
217
217
218 def url(self):
218 def url(self):
219 return self._repo.url()
219 return self._repo.url()
220
220
221 def local(self):
221 def local(self):
222 return self._repo
222 return self._repo
223
223
224 def peer(self):
224 def peer(self):
225 return self
225 return self
226
226
227 def canpush(self):
227 def canpush(self):
228 return True
228 return True
229
229
230 def close(self):
230 def close(self):
231 self._repo.close()
231 self._repo.close()
232
232
233 # End of _basepeer interface.
233 # End of _basepeer interface.
234
234
235 # Begin of _basewirecommands interface.
235 # Begin of _basewirecommands interface.
236
236
237 def branchmap(self):
237 def branchmap(self):
238 return self._repo.branchmap()
238 return self._repo.branchmap()
239
239
240 def capabilities(self):
240 def capabilities(self):
241 return self._caps
241 return self._caps
242
242
243 def clonebundles(self):
243 def clonebundles(self):
244 return self._repo.tryread('clonebundles.manifest')
244 return self._repo.tryread('clonebundles.manifest')
245
245
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
247 """Used to test argument passing over the wire"""
247 """Used to test argument passing over the wire"""
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
249 pycompat.bytestr(four),
249 pycompat.bytestr(four),
250 pycompat.bytestr(five))
250 pycompat.bytestr(five))
251
251
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
253 **kwargs):
253 **kwargs):
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
255 common=common, bundlecaps=bundlecaps,
255 common=common, bundlecaps=bundlecaps,
256 **kwargs)[1]
256 **kwargs)[1]
257 cb = util.chunkbuffer(chunks)
257 cb = util.chunkbuffer(chunks)
258
258
259 if exchange.bundle2requested(bundlecaps):
259 if exchange.bundle2requested(bundlecaps):
260 # When requesting a bundle2, getbundle returns a stream to make the
260 # When requesting a bundle2, getbundle returns a stream to make the
261 # wire level function happier. We need to build a proper object
261 # wire level function happier. We need to build a proper object
262 # from it in local peer.
262 # from it in local peer.
263 return bundle2.getunbundler(self.ui, cb)
263 return bundle2.getunbundler(self.ui, cb)
264 else:
264 else:
265 return changegroup.getunbundler('01', cb, None)
265 return changegroup.getunbundler('01', cb, None)
266
266
267 def heads(self):
267 def heads(self):
268 return self._repo.heads()
268 return self._repo.heads()
269
269
270 def known(self, nodes):
270 def known(self, nodes):
271 return self._repo.known(nodes)
271 return self._repo.known(nodes)
272
272
273 def listkeys(self, namespace):
273 def listkeys(self, namespace):
274 return self._repo.listkeys(namespace)
274 return self._repo.listkeys(namespace)
275
275
276 def lookup(self, key):
276 def lookup(self, key):
277 return self._repo.lookup(key)
277 return self._repo.lookup(key)
278
278
279 def pushkey(self, namespace, key, old, new):
279 def pushkey(self, namespace, key, old, new):
280 return self._repo.pushkey(namespace, key, old, new)
280 return self._repo.pushkey(namespace, key, old, new)
281
281
282 def stream_out(self):
282 def stream_out(self):
283 raise error.Abort(_('cannot perform stream clone against local '
283 raise error.Abort(_('cannot perform stream clone against local '
284 'peer'))
284 'peer'))
285
285
286 def unbundle(self, bundle, heads, url):
286 def unbundle(self, bundle, heads, url):
287 """apply a bundle on a repo
287 """apply a bundle on a repo
288
288
289 This function handles the repo locking itself."""
289 This function handles the repo locking itself."""
290 try:
290 try:
291 try:
291 try:
292 bundle = exchange.readbundle(self.ui, bundle, None)
292 bundle = exchange.readbundle(self.ui, bundle, None)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
294 if util.safehasattr(ret, 'getchunks'):
294 if util.safehasattr(ret, 'getchunks'):
295 # This is a bundle20 object, turn it into an unbundler.
295 # This is a bundle20 object, turn it into an unbundler.
296 # This little dance should be dropped eventually when the
296 # This little dance should be dropped eventually when the
297 # API is finally improved.
297 # API is finally improved.
298 stream = util.chunkbuffer(ret.getchunks())
298 stream = util.chunkbuffer(ret.getchunks())
299 ret = bundle2.getunbundler(self.ui, stream)
299 ret = bundle2.getunbundler(self.ui, stream)
300 return ret
300 return ret
301 except Exception as exc:
301 except Exception as exc:
302 # If the exception contains output salvaged from a bundle2
302 # If the exception contains output salvaged from a bundle2
303 # reply, we need to make sure it is printed before continuing
303 # reply, we need to make sure it is printed before continuing
304 # to fail. So we build a bundle2 with such output and consume
304 # to fail. So we build a bundle2 with such output and consume
305 # it directly.
305 # it directly.
306 #
306 #
307 # This is not very elegant but allows a "simple" solution for
307 # This is not very elegant but allows a "simple" solution for
308 # issue4594
308 # issue4594
309 output = getattr(exc, '_bundle2salvagedoutput', ())
309 output = getattr(exc, '_bundle2salvagedoutput', ())
310 if output:
310 if output:
311 bundler = bundle2.bundle20(self._repo.ui)
311 bundler = bundle2.bundle20(self._repo.ui)
312 for out in output:
312 for out in output:
313 bundler.addpart(out)
313 bundler.addpart(out)
314 stream = util.chunkbuffer(bundler.getchunks())
314 stream = util.chunkbuffer(bundler.getchunks())
315 b = bundle2.getunbundler(self.ui, stream)
315 b = bundle2.getunbundler(self.ui, stream)
316 bundle2.processbundle(self._repo, b)
316 bundle2.processbundle(self._repo, b)
317 raise
317 raise
318 except error.PushRaced as exc:
318 except error.PushRaced as exc:
319 raise error.ResponseError(_('push failed:'),
319 raise error.ResponseError(_('push failed:'),
320 stringutil.forcebytestr(exc))
320 stringutil.forcebytestr(exc))
321
321
322 # End of _basewirecommands interface.
322 # End of _basewirecommands interface.
323
323
324 # Begin of peer interface.
324 # Begin of peer interface.
325
325
326 def commandexecutor(self):
326 def commandexecutor(self):
327 return localcommandexecutor(self)
327 return localcommandexecutor(self)
328
328
329 # End of peer interface.
329 # End of peer interface.
330
330
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
332 class locallegacypeer(localpeer):
332 class locallegacypeer(localpeer):
333 '''peer extension which implements legacy methods too; used for tests with
333 '''peer extension which implements legacy methods too; used for tests with
334 restricted capabilities'''
334 restricted capabilities'''
335
335
336 def __init__(self, repo):
336 def __init__(self, repo):
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
338
338
339 # Begin of baselegacywirecommands interface.
339 # Begin of baselegacywirecommands interface.
340
340
341 def between(self, pairs):
341 def between(self, pairs):
342 return self._repo.between(pairs)
342 return self._repo.between(pairs)
343
343
344 def branches(self, nodes):
344 def branches(self, nodes):
345 return self._repo.branches(nodes)
345 return self._repo.branches(nodes)
346
346
347 def changegroup(self, nodes, source):
347 def changegroup(self, nodes, source):
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
349 missingheads=self._repo.heads())
349 missingheads=self._repo.heads())
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351
351
352 def changegroupsubset(self, bases, heads, source):
352 def changegroupsubset(self, bases, heads, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
354 missingheads=heads)
354 missingheads=heads)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 # End of baselegacywirecommands interface.
357 # End of baselegacywirecommands interface.
358
358
359 # Increment the sub-version when the revlog v2 format changes to lock out old
359 # Increment the sub-version when the revlog v2 format changes to lock out old
360 # clients.
360 # clients.
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
362
362
363 # A repository with the sparserevlog feature will have delta chains that
363 # A repository with the sparserevlog feature will have delta chains that
364 # can spread over a larger span. Sparse reading cuts these large spans into
364 # can spread over a larger span. Sparse reading cuts these large spans into
365 # pieces, so that each piece isn't too big.
365 # pieces, so that each piece isn't too big.
366 # Without the sparserevlog capability, reading from the repository could use
366 # Without the sparserevlog capability, reading from the repository could use
367 # huge amounts of memory, because the whole span would be read at once,
367 # huge amounts of memory, because the whole span would be read at once,
368 # including all the intermediate revisions that aren't pertinent for the chain.
368 # including all the intermediate revisions that aren't pertinent for the chain.
369 # This is why once a repository has enabled sparse-read, it becomes required.
369 # This is why once a repository has enabled sparse-read, it becomes required.
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
371
371
372 # Functions receiving (ui, features) that extensions can register to impact
372 # Functions receiving (ui, features) that extensions can register to impact
373 # the ability to load repositories with custom requirements. Only
373 # the ability to load repositories with custom requirements. Only
374 # functions defined in loaded extensions are called.
374 # functions defined in loaded extensions are called.
375 #
375 #
376 # The function receives a set of requirement strings that the repository
376 # The function receives a set of requirement strings that the repository
377 # is capable of opening. Functions will typically add elements to the
377 # is capable of opening. Functions will typically add elements to the
378 # set to reflect that the extension knows how to handle that requirements.
378 # set to reflect that the extension knows how to handle that requirements.
379 featuresetupfuncs = set()
379 featuresetupfuncs = set()
380
380
381 def makelocalrepository(baseui, path, intents=None):
381 def makelocalrepository(baseui, path, intents=None):
382 """Create a local repository object.
382 """Create a local repository object.
383
383
384 Given arguments needed to construct a local repository, this function
384 Given arguments needed to construct a local repository, this function
385 performs various early repository loading functionality (such as
385 performs various early repository loading functionality (such as
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
387 the repository can be opened, derives a type suitable for representing
387 the repository can be opened, derives a type suitable for representing
388 that repository, and returns an instance of it.
388 that repository, and returns an instance of it.
389
389
390 The returned object conforms to the ``repository.completelocalrepository``
390 The returned object conforms to the ``repository.completelocalrepository``
391 interface.
391 interface.
392
392
393 The repository type is derived by calling a series of factory functions
393 The repository type is derived by calling a series of factory functions
394 for each aspect/interface of the final repository. These are defined by
394 for each aspect/interface of the final repository. These are defined by
395 ``REPO_INTERFACES``.
395 ``REPO_INTERFACES``.
396
396
397 Each factory function is called to produce a type implementing a specific
397 Each factory function is called to produce a type implementing a specific
398 interface. The cumulative list of returned types will be combined into a
398 interface. The cumulative list of returned types will be combined into a
399 new type and that type will be instantiated to represent the local
399 new type and that type will be instantiated to represent the local
400 repository.
400 repository.
401
401
402 The factory functions each receive various state that may be consulted
402 The factory functions each receive various state that may be consulted
403 as part of deriving a type.
403 as part of deriving a type.
404
404
405 Extensions should wrap these factory functions to customize repository type
405 Extensions should wrap these factory functions to customize repository type
406 creation. Note that an extension's wrapped function may be called even if
406 creation. Note that an extension's wrapped function may be called even if
407 that extension is not loaded for the repo being constructed. Extensions
407 that extension is not loaded for the repo being constructed. Extensions
408 should check if their ``__name__`` appears in the
408 should check if their ``__name__`` appears in the
409 ``extensionmodulenames`` set passed to the factory function and no-op if
409 ``extensionmodulenames`` set passed to the factory function and no-op if
410 not.
410 not.
411 """
411 """
412 ui = baseui.copy()
412 ui = baseui.copy()
413 # Prevent copying repo configuration.
413 # Prevent copying repo configuration.
414 ui.copy = baseui.copy
414 ui.copy = baseui.copy
415
415
416 # Working directory VFS rooted at repository root.
416 # Working directory VFS rooted at repository root.
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418
418
419 # Main VFS for .hg/ directory.
419 # Main VFS for .hg/ directory.
420 hgpath = wdirvfs.join(b'.hg')
420 hgpath = wdirvfs.join(b'.hg')
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
422
422
423 # The .hg/ path should exist and should be a directory. All other
423 # The .hg/ path should exist and should be a directory. All other
424 # cases are errors.
424 # cases are errors.
425 if not hgvfs.isdir():
425 if not hgvfs.isdir():
426 try:
426 try:
427 hgvfs.stat()
427 hgvfs.stat()
428 except OSError as e:
428 except OSError as e:
429 if e.errno != errno.ENOENT:
429 if e.errno != errno.ENOENT:
430 raise
430 raise
431
431
432 raise error.RepoError(_(b'repository %s not found') % path)
432 raise error.RepoError(_(b'repository %s not found') % path)
433
433
434 # .hg/requires file contains a newline-delimited list of
434 # .hg/requires file contains a newline-delimited list of
435 # features/capabilities the opener (us) must have in order to use
435 # features/capabilities the opener (us) must have in order to use
436 # the repository. This file was introduced in Mercurial 0.9.2,
436 # the repository. This file was introduced in Mercurial 0.9.2,
437 # which means very old repositories may not have one. We assume
437 # which means very old repositories may not have one. We assume
438 # a missing file translates to no requirements.
438 # a missing file translates to no requirements.
439 try:
439 try:
440 requirements = set(hgvfs.read(b'requires').splitlines())
440 requirements = set(hgvfs.read(b'requires').splitlines())
441 except IOError as e:
441 except IOError as e:
442 if e.errno != errno.ENOENT:
442 if e.errno != errno.ENOENT:
443 raise
443 raise
444 requirements = set()
444 requirements = set()
445
445
446 # The .hg/hgrc file may load extensions or contain config options
446 # The .hg/hgrc file may load extensions or contain config options
447 # that influence repository construction. Attempt to load it and
447 # that influence repository construction. Attempt to load it and
448 # process any new extensions that it may have pulled in.
448 # process any new extensions that it may have pulled in.
449 try:
449 try:
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
451 # Run this before extensions.loadall() so extensions can be
451 # Run this before extensions.loadall() so extensions can be
452 # automatically enabled.
452 # automatically enabled.
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
454 except IOError:
454 except IOError:
455 pass
455 pass
456 else:
456 else:
457 extensions.loadall(ui)
457 extensions.loadall(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511
511
512 # The store has changed over time and the exact layout is dictated by
512 # The store has changed over time and the exact layout is dictated by
513 # requirements. The store interface abstracts differences across all
513 # requirements. The store interface abstracts differences across all
514 # of them.
514 # of them.
515 store = makestore(requirements, storebasepath,
515 store = makestore(requirements, storebasepath,
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 hgvfs.createmode = store.createmode
517 hgvfs.createmode = store.createmode
518
518
519 storevfs = store.vfs
519 storevfs = store.vfs
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521
521
522 # The cache vfs is used to manage cache files.
522 # The cache vfs is used to manage cache files.
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 cachevfs.createmode = store.createmode
524 cachevfs.createmode = store.createmode
525
525
526 # Now resolve the type for the repository object. We do this by repeatedly
526 # Now resolve the type for the repository object. We do this by repeatedly
527 # calling a factory function to produces types for specific aspects of the
527 # calling a factory function to produces types for specific aspects of the
528 # repo's operation. The aggregate returned types are used as base classes
528 # repo's operation. The aggregate returned types are used as base classes
529 # for a dynamically-derived type, which will represent our new repository.
529 # for a dynamically-derived type, which will represent our new repository.
530
530
531 bases = []
531 bases = []
532 extrastate = {}
532 extrastate = {}
533
533
534 for iface, fn in REPO_INTERFACES:
534 for iface, fn in REPO_INTERFACES:
535 # We pass all potentially useful state to give extensions tons of
535 # We pass all potentially useful state to give extensions tons of
536 # flexibility.
536 # flexibility.
537 typ = fn()(ui=ui,
537 typ = fn()(ui=ui,
538 intents=intents,
538 intents=intents,
539 requirements=requirements,
539 requirements=requirements,
540 features=features,
540 features=features,
541 wdirvfs=wdirvfs,
541 wdirvfs=wdirvfs,
542 hgvfs=hgvfs,
542 hgvfs=hgvfs,
543 store=store,
543 store=store,
544 storevfs=storevfs,
544 storevfs=storevfs,
545 storeoptions=storevfs.options,
545 storeoptions=storevfs.options,
546 cachevfs=cachevfs,
546 cachevfs=cachevfs,
547 extensionmodulenames=extensionmodulenames,
547 extensionmodulenames=extensionmodulenames,
548 extrastate=extrastate,
548 extrastate=extrastate,
549 baseclasses=bases)
549 baseclasses=bases)
550
550
551 if not isinstance(typ, type):
551 if not isinstance(typ, type):
552 raise error.ProgrammingError('unable to construct type for %s' %
552 raise error.ProgrammingError('unable to construct type for %s' %
553 iface)
553 iface)
554
554
555 bases.append(typ)
555 bases.append(typ)
556
556
557 # type() allows you to use characters in type names that wouldn't be
557 # type() allows you to use characters in type names that wouldn't be
558 # recognized as Python symbols in source code. We abuse that to add
558 # recognized as Python symbols in source code. We abuse that to add
559 # rich information about our constructed repo.
559 # rich information about our constructed repo.
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 wdirvfs.base,
561 wdirvfs.base,
562 b','.join(sorted(requirements))))
562 b','.join(sorted(requirements))))
563
563
564 cls = type(name, tuple(bases), {})
564 cls = type(name, tuple(bases), {})
565
565
566 return cls(
566 return cls(
567 baseui=baseui,
567 baseui=baseui,
568 ui=ui,
568 ui=ui,
569 origroot=path,
569 origroot=path,
570 wdirvfs=wdirvfs,
570 wdirvfs=wdirvfs,
571 hgvfs=hgvfs,
571 hgvfs=hgvfs,
572 requirements=requirements,
572 requirements=requirements,
573 supportedrequirements=supportedrequirements,
573 supportedrequirements=supportedrequirements,
574 sharedpath=storebasepath,
574 sharedpath=storebasepath,
575 store=store,
575 store=store,
576 cachevfs=cachevfs,
576 cachevfs=cachevfs,
577 features=features,
577 features=features,
578 intents=intents)
578 intents=intents)
579
579
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
581 """Perform additional actions after .hg/hgrc is loaded.
581 """Perform additional actions after .hg/hgrc is loaded.
582
582
583 This function is called during repository loading immediately after
583 This function is called during repository loading immediately after
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
585
585
586 The function can be used to validate configs, automatically add
586 The function can be used to validate configs, automatically add
587 options (including extensions) based on requirements, etc.
587 options (including extensions) based on requirements, etc.
588 """
588 """
589
589
590 # Map of requirements to list of extensions to load automatically when
590 # Map of requirements to list of extensions to load automatically when
591 # requirement is present.
591 # requirement is present.
592 autoextensions = {
592 autoextensions = {
593 b'largefiles': [b'largefiles'],
593 b'largefiles': [b'largefiles'],
594 b'lfs': [b'lfs'],
594 b'lfs': [b'lfs'],
595 }
595 }
596
596
597 for requirement, names in sorted(autoextensions.items()):
597 for requirement, names in sorted(autoextensions.items()):
598 if requirement not in requirements:
598 if requirement not in requirements:
599 continue
599 continue
600
600
601 for name in names:
601 for name in names:
602 if not ui.hasconfig(b'extensions', name):
602 if not ui.hasconfig(b'extensions', name):
603 ui.setconfig(b'extensions', name, b'', source='autoload')
603 ui.setconfig(b'extensions', name, b'', source='autoload')
604
604
605 def gathersupportedrequirements(ui):
605 def gathersupportedrequirements(ui):
606 """Determine the complete set of recognized requirements."""
606 """Determine the complete set of recognized requirements."""
607 # Start with all requirements supported by this file.
607 # Start with all requirements supported by this file.
608 supported = set(localrepository._basesupported)
608 supported = set(localrepository._basesupported)
609
609
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
611 # relevant to this ui instance.
611 # relevant to this ui instance.
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
613
613
614 for fn in featuresetupfuncs:
614 for fn in featuresetupfuncs:
615 if fn.__module__ in modules:
615 if fn.__module__ in modules:
616 fn(ui, supported)
616 fn(ui, supported)
617
617
618 # Add derived requirements from registered compression engines.
618 # Add derived requirements from registered compression engines.
619 for name in util.compengines:
619 for name in util.compengines:
620 engine = util.compengines[name]
620 engine = util.compengines[name]
621 if engine.revlogheader():
621 if engine.revlogheader():
622 supported.add(b'exp-compression-%s' % name)
622 supported.add(b'exp-compression-%s' % name)
623
623
624 return supported
624 return supported
625
625
626 def ensurerequirementsrecognized(requirements, supported):
626 def ensurerequirementsrecognized(requirements, supported):
627 """Validate that a set of local requirements is recognized.
627 """Validate that a set of local requirements is recognized.
628
628
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
630 exists any requirement in that set that currently loaded code doesn't
630 exists any requirement in that set that currently loaded code doesn't
631 recognize.
631 recognize.
632
632
633 Returns a set of supported requirements.
633 Returns a set of supported requirements.
634 """
634 """
635 missing = set()
635 missing = set()
636
636
637 for requirement in requirements:
637 for requirement in requirements:
638 if requirement in supported:
638 if requirement in supported:
639 continue
639 continue
640
640
641 if not requirement or not requirement[0:1].isalnum():
641 if not requirement or not requirement[0:1].isalnum():
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
643
643
644 missing.add(requirement)
644 missing.add(requirement)
645
645
646 if missing:
646 if missing:
647 raise error.RequirementError(
647 raise error.RequirementError(
648 _(b'repository requires features unknown to this Mercurial: %s') %
648 _(b'repository requires features unknown to this Mercurial: %s') %
649 b' '.join(sorted(missing)),
649 b' '.join(sorted(missing)),
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
651 b'for more information'))
651 b'for more information'))
652
652
653 def ensurerequirementscompatible(ui, requirements):
653 def ensurerequirementscompatible(ui, requirements):
654 """Validates that a set of recognized requirements is mutually compatible.
654 """Validates that a set of recognized requirements is mutually compatible.
655
655
656 Some requirements may not be compatible with others or require
656 Some requirements may not be compatible with others or require
657 config options that aren't enabled. This function is called during
657 config options that aren't enabled. This function is called during
658 repository opening to ensure that the set of requirements needed
658 repository opening to ensure that the set of requirements needed
659 to open a repository is sane and compatible with config options.
659 to open a repository is sane and compatible with config options.
660
660
661 Extensions can monkeypatch this function to perform additional
661 Extensions can monkeypatch this function to perform additional
662 checking.
662 checking.
663
663
664 ``error.RepoError`` should be raised on failure.
664 ``error.RepoError`` should be raised on failure.
665 """
665 """
666 if b'exp-sparse' in requirements and not sparse.enabled:
666 if b'exp-sparse' in requirements and not sparse.enabled:
667 raise error.RepoError(_(b'repository is using sparse feature but '
667 raise error.RepoError(_(b'repository is using sparse feature but '
668 b'sparse is not enabled; enable the '
668 b'sparse is not enabled; enable the '
669 b'"sparse" extensions to access'))
669 b'"sparse" extensions to access'))
670
670
671 def makestore(requirements, path, vfstype):
671 def makestore(requirements, path, vfstype):
672 """Construct a storage object for a repository."""
672 """Construct a storage object for a repository."""
673 if b'store' in requirements:
673 if b'store' in requirements:
674 if b'fncache' in requirements:
674 if b'fncache' in requirements:
675 return storemod.fncachestore(path, vfstype,
675 return storemod.fncachestore(path, vfstype,
676 b'dotencode' in requirements)
676 b'dotencode' in requirements)
677
677
678 return storemod.encodedstore(path, vfstype)
678 return storemod.encodedstore(path, vfstype)
679
679
680 return storemod.basicstore(path, vfstype)
680 return storemod.basicstore(path, vfstype)
681
681
682 def resolvestorevfsoptions(ui, requirements, features):
682 def resolvestorevfsoptions(ui, requirements, features):
683 """Resolve the options to pass to the store vfs opener.
683 """Resolve the options to pass to the store vfs opener.
684
684
685 The returned dict is used to influence behavior of the storage layer.
685 The returned dict is used to influence behavior of the storage layer.
686 """
686 """
687 options = {}
687 options = {}
688
688
689 if b'treemanifest' in requirements:
689 if b'treemanifest' in requirements:
690 options[b'treemanifest'] = True
690 options[b'treemanifest'] = True
691
691
692 # experimental config: format.manifestcachesize
692 # experimental config: format.manifestcachesize
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
694 if manifestcachesize is not None:
694 if manifestcachesize is not None:
695 options[b'manifestcachesize'] = manifestcachesize
695 options[b'manifestcachesize'] = manifestcachesize
696
696
697 # In the absence of another requirement superseding a revlog-related
697 # In the absence of another requirement superseding a revlog-related
698 # requirement, we have to assume the repo is using revlog version 0.
698 # requirement, we have to assume the repo is using revlog version 0.
699 # This revlog format is super old and we don't bother trying to parse
699 # This revlog format is super old and we don't bother trying to parse
700 # opener options for it because those options wouldn't do anything
700 # opener options for it because those options wouldn't do anything
701 # meaningful on such old repos.
701 # meaningful on such old repos.
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
704
704
705 return options
705 return options
706
706
707 def resolverevlogstorevfsoptions(ui, requirements, features):
707 def resolverevlogstorevfsoptions(ui, requirements, features):
708 """Resolve opener options specific to revlogs."""
708 """Resolve opener options specific to revlogs."""
709
709
710 options = {}
710 options = {}
711 options[b'flagprocessors'] = {}
711 options[b'flagprocessors'] = {}
712
712
713 if b'revlogv1' in requirements:
713 if b'revlogv1' in requirements:
714 options[b'revlogv1'] = True
714 options[b'revlogv1'] = True
715 if REVLOGV2_REQUIREMENT in requirements:
715 if REVLOGV2_REQUIREMENT in requirements:
716 options[b'revlogv2'] = True
716 options[b'revlogv2'] = True
717
717
718 if b'generaldelta' in requirements:
718 if b'generaldelta' in requirements:
719 options[b'generaldelta'] = True
719 options[b'generaldelta'] = True
720
720
721 # experimental config: format.chunkcachesize
721 # experimental config: format.chunkcachesize
722 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
722 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
723 if chunkcachesize is not None:
723 if chunkcachesize is not None:
724 options[b'chunkcachesize'] = chunkcachesize
724 options[b'chunkcachesize'] = chunkcachesize
725
725
726 deltabothparents = ui.configbool(b'storage',
726 deltabothparents = ui.configbool(b'storage',
727 b'revlog.optimize-delta-parent-choice')
727 b'revlog.optimize-delta-parent-choice')
728 options[b'deltabothparents'] = deltabothparents
728 options[b'deltabothparents'] = deltabothparents
729
729
730 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
730 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
731
731
732 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
732 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
733 if 0 <= chainspan:
733 if 0 <= chainspan:
734 options[b'maxdeltachainspan'] = chainspan
734 options[b'maxdeltachainspan'] = chainspan
735
735
736 mmapindexthreshold = ui.configbytes(b'experimental',
736 mmapindexthreshold = ui.configbytes(b'experimental',
737 b'mmapindexthreshold')
737 b'mmapindexthreshold')
738 if mmapindexthreshold is not None:
738 if mmapindexthreshold is not None:
739 options[b'mmapindexthreshold'] = mmapindexthreshold
739 options[b'mmapindexthreshold'] = mmapindexthreshold
740
740
741 withsparseread = ui.configbool(b'experimental', b'sparse-read')
741 withsparseread = ui.configbool(b'experimental', b'sparse-read')
742 srdensitythres = float(ui.config(b'experimental',
742 srdensitythres = float(ui.config(b'experimental',
743 b'sparse-read.density-threshold'))
743 b'sparse-read.density-threshold'))
744 srmingapsize = ui.configbytes(b'experimental',
744 srmingapsize = ui.configbytes(b'experimental',
745 b'sparse-read.min-gap-size')
745 b'sparse-read.min-gap-size')
746 options[b'with-sparse-read'] = withsparseread
746 options[b'with-sparse-read'] = withsparseread
747 options[b'sparse-read-density-threshold'] = srdensitythres
747 options[b'sparse-read-density-threshold'] = srdensitythres
748 options[b'sparse-read-min-gap-size'] = srmingapsize
748 options[b'sparse-read-min-gap-size'] = srmingapsize
749
749
750 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
750 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
751 options[b'sparse-revlog'] = sparserevlog
751 options[b'sparse-revlog'] = sparserevlog
752 if sparserevlog:
752 if sparserevlog:
753 options[b'generaldelta'] = True
753 options[b'generaldelta'] = True
754
754
755 maxchainlen = None
755 maxchainlen = None
756 if sparserevlog:
756 if sparserevlog:
757 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
757 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
758 # experimental config: format.maxchainlen
758 # experimental config: format.maxchainlen
759 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
759 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
760 if maxchainlen is not None:
760 if maxchainlen is not None:
761 options[b'maxchainlen'] = maxchainlen
761 options[b'maxchainlen'] = maxchainlen
762
762
763 for r in requirements:
763 for r in requirements:
764 if r.startswith(b'exp-compression-'):
764 if r.startswith(b'exp-compression-'):
765 options[b'compengine'] = r[len(b'exp-compression-'):]
765 options[b'compengine'] = r[len(b'exp-compression-'):]
766
766
767 if repository.NARROW_REQUIREMENT in requirements:
767 if repository.NARROW_REQUIREMENT in requirements:
768 options[b'enableellipsis'] = True
768 options[b'enableellipsis'] = True
769
769
770 return options
770 return options
771
771
772 def makemain(**kwargs):
772 def makemain(**kwargs):
773 """Produce a type conforming to ``ilocalrepositorymain``."""
773 """Produce a type conforming to ``ilocalrepositorymain``."""
774 return localrepository
774 return localrepository
775
775
776 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
776 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
777 class revlogfilestorage(object):
777 class revlogfilestorage(object):
778 """File storage when using revlogs."""
778 """File storage when using revlogs."""
779
779
780 def file(self, path):
780 def file(self, path):
781 if path[0] == b'/':
781 if path[0] == b'/':
782 path = path[1:]
782 path = path[1:]
783
783
784 return filelog.filelog(self.svfs, path)
784 return filelog.filelog(self.svfs, path)
785
785
786 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
786 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
787 class revlognarrowfilestorage(object):
787 class revlognarrowfilestorage(object):
788 """File storage when using revlogs and narrow files."""
788 """File storage when using revlogs and narrow files."""
789
789
790 def file(self, path):
790 def file(self, path):
791 if path[0] == b'/':
791 if path[0] == b'/':
792 path = path[1:]
792 path = path[1:]
793
793
794 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
794 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
795
795
796 def makefilestorage(requirements, features, **kwargs):
796 def makefilestorage(requirements, features, **kwargs):
797 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
797 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
798 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
798 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
799 features.add(repository.REPO_FEATURE_STREAM_CLONE)
799 features.add(repository.REPO_FEATURE_STREAM_CLONE)
800
800
801 if repository.NARROW_REQUIREMENT in requirements:
801 if repository.NARROW_REQUIREMENT in requirements:
802 return revlognarrowfilestorage
802 return revlognarrowfilestorage
803 else:
803 else:
804 return revlogfilestorage
804 return revlogfilestorage
805
805
806 # List of repository interfaces and factory functions for them. Each
806 # List of repository interfaces and factory functions for them. Each
807 # will be called in order during ``makelocalrepository()`` to iteratively
807 # will be called in order during ``makelocalrepository()`` to iteratively
808 # derive the final type for a local repository instance. We capture the
808 # derive the final type for a local repository instance. We capture the
809 # function as a lambda so we don't hold a reference and the module-level
809 # function as a lambda so we don't hold a reference and the module-level
810 # functions can be wrapped.
810 # functions can be wrapped.
811 REPO_INTERFACES = [
811 REPO_INTERFACES = [
812 (repository.ilocalrepositorymain, lambda: makemain),
812 (repository.ilocalrepositorymain, lambda: makemain),
813 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
813 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
814 ]
814 ]
815
815
816 @interfaceutil.implementer(repository.ilocalrepositorymain)
816 @interfaceutil.implementer(repository.ilocalrepositorymain)
817 class localrepository(object):
817 class localrepository(object):
818 """Main class for representing local repositories.
818 """Main class for representing local repositories.
819
819
820 All local repositories are instances of this class.
820 All local repositories are instances of this class.
821
821
822 Constructed on its own, instances of this class are not usable as
822 Constructed on its own, instances of this class are not usable as
823 repository objects. To obtain a usable repository object, call
823 repository objects. To obtain a usable repository object, call
824 ``hg.repository()``, ``localrepo.instance()``, or
824 ``hg.repository()``, ``localrepo.instance()``, or
825 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
825 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
826 ``instance()`` adds support for creating new repositories.
826 ``instance()`` adds support for creating new repositories.
827 ``hg.repository()`` adds more extension integration, including calling
827 ``hg.repository()`` adds more extension integration, including calling
828 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
828 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
829 used.
829 used.
830 """
830 """
831
831
832 # obsolete experimental requirements:
832 # obsolete experimental requirements:
833 # - manifestv2: An experimental new manifest format that allowed
833 # - manifestv2: An experimental new manifest format that allowed
834 # for stem compression of long paths. Experiment ended up not
834 # for stem compression of long paths. Experiment ended up not
835 # being successful (repository sizes went up due to worse delta
835 # being successful (repository sizes went up due to worse delta
836 # chains), and the code was deleted in 4.6.
836 # chains), and the code was deleted in 4.6.
837 supportedformats = {
837 supportedformats = {
838 'revlogv1',
838 'revlogv1',
839 'generaldelta',
839 'generaldelta',
840 'treemanifest',
840 'treemanifest',
841 REVLOGV2_REQUIREMENT,
841 REVLOGV2_REQUIREMENT,
842 SPARSEREVLOG_REQUIREMENT,
842 SPARSEREVLOG_REQUIREMENT,
843 }
843 }
844 _basesupported = supportedformats | {
844 _basesupported = supportedformats | {
845 'store',
845 'store',
846 'fncache',
846 'fncache',
847 'shared',
847 'shared',
848 'relshared',
848 'relshared',
849 'dotencode',
849 'dotencode',
850 'exp-sparse',
850 'exp-sparse',
851 'internal-phase'
851 'internal-phase'
852 }
852 }
853
853
854 # list of prefix for file which can be written without 'wlock'
854 # list of prefix for file which can be written without 'wlock'
855 # Extensions should extend this list when needed
855 # Extensions should extend this list when needed
856 _wlockfreeprefix = {
856 _wlockfreeprefix = {
857 # We migh consider requiring 'wlock' for the next
857 # We migh consider requiring 'wlock' for the next
858 # two, but pretty much all the existing code assume
858 # two, but pretty much all the existing code assume
859 # wlock is not needed so we keep them excluded for
859 # wlock is not needed so we keep them excluded for
860 # now.
860 # now.
861 'hgrc',
861 'hgrc',
862 'requires',
862 'requires',
863 # XXX cache is a complicatged business someone
863 # XXX cache is a complicatged business someone
864 # should investigate this in depth at some point
864 # should investigate this in depth at some point
865 'cache/',
865 'cache/',
866 # XXX shouldn't be dirstate covered by the wlock?
866 # XXX shouldn't be dirstate covered by the wlock?
867 'dirstate',
867 'dirstate',
868 # XXX bisect was still a bit too messy at the time
868 # XXX bisect was still a bit too messy at the time
869 # this changeset was introduced. Someone should fix
869 # this changeset was introduced. Someone should fix
870 # the remainig bit and drop this line
870 # the remainig bit and drop this line
871 'bisect.state',
871 'bisect.state',
872 }
872 }
873
873
874 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
874 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
875 supportedrequirements, sharedpath, store, cachevfs,
875 supportedrequirements, sharedpath, store, cachevfs,
876 features, intents=None):
876 features, intents=None):
877 """Create a new local repository instance.
877 """Create a new local repository instance.
878
878
879 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
879 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
880 or ``localrepo.makelocalrepository()`` for obtaining a new repository
880 or ``localrepo.makelocalrepository()`` for obtaining a new repository
881 object.
881 object.
882
882
883 Arguments:
883 Arguments:
884
884
885 baseui
885 baseui
886 ``ui.ui`` instance that ``ui`` argument was based off of.
886 ``ui.ui`` instance that ``ui`` argument was based off of.
887
887
888 ui
888 ui
889 ``ui.ui`` instance for use by the repository.
889 ``ui.ui`` instance for use by the repository.
890
890
891 origroot
891 origroot
892 ``bytes`` path to working directory root of this repository.
892 ``bytes`` path to working directory root of this repository.
893
893
894 wdirvfs
894 wdirvfs
895 ``vfs.vfs`` rooted at the working directory.
895 ``vfs.vfs`` rooted at the working directory.
896
896
897 hgvfs
897 hgvfs
898 ``vfs.vfs`` rooted at .hg/
898 ``vfs.vfs`` rooted at .hg/
899
899
900 requirements
900 requirements
901 ``set`` of bytestrings representing repository opening requirements.
901 ``set`` of bytestrings representing repository opening requirements.
902
902
903 supportedrequirements
903 supportedrequirements
904 ``set`` of bytestrings representing repository requirements that we
904 ``set`` of bytestrings representing repository requirements that we
905 know how to open. May be a supetset of ``requirements``.
905 know how to open. May be a supetset of ``requirements``.
906
906
907 sharedpath
907 sharedpath
908 ``bytes`` Defining path to storage base directory. Points to a
908 ``bytes`` Defining path to storage base directory. Points to a
909 ``.hg/`` directory somewhere.
909 ``.hg/`` directory somewhere.
910
910
911 store
911 store
912 ``store.basicstore`` (or derived) instance providing access to
912 ``store.basicstore`` (or derived) instance providing access to
913 versioned storage.
913 versioned storage.
914
914
915 cachevfs
915 cachevfs
916 ``vfs.vfs`` used for cache files.
916 ``vfs.vfs`` used for cache files.
917
917
918 features
918 features
919 ``set`` of bytestrings defining features/capabilities of this
919 ``set`` of bytestrings defining features/capabilities of this
920 instance.
920 instance.
921
921
922 intents
922 intents
923 ``set`` of system strings indicating what this repo will be used
923 ``set`` of system strings indicating what this repo will be used
924 for.
924 for.
925 """
925 """
926 self.baseui = baseui
926 self.baseui = baseui
927 self.ui = ui
927 self.ui = ui
928 self.origroot = origroot
928 self.origroot = origroot
929 # vfs rooted at working directory.
929 # vfs rooted at working directory.
930 self.wvfs = wdirvfs
930 self.wvfs = wdirvfs
931 self.root = wdirvfs.base
931 self.root = wdirvfs.base
932 # vfs rooted at .hg/. Used to access most non-store paths.
932 # vfs rooted at .hg/. Used to access most non-store paths.
933 self.vfs = hgvfs
933 self.vfs = hgvfs
934 self.path = hgvfs.base
934 self.path = hgvfs.base
935 self.requirements = requirements
935 self.requirements = requirements
936 self.supported = supportedrequirements
936 self.supported = supportedrequirements
937 self.sharedpath = sharedpath
937 self.sharedpath = sharedpath
938 self.store = store
938 self.store = store
939 self.cachevfs = cachevfs
939 self.cachevfs = cachevfs
940 self.features = features
940 self.features = features
941
941
942 self.filtername = None
942 self.filtername = None
943
943
944 if (self.ui.configbool('devel', 'all-warnings') or
944 if (self.ui.configbool('devel', 'all-warnings') or
945 self.ui.configbool('devel', 'check-locks')):
945 self.ui.configbool('devel', 'check-locks')):
946 self.vfs.audit = self._getvfsward(self.vfs.audit)
946 self.vfs.audit = self._getvfsward(self.vfs.audit)
947 # A list of callback to shape the phase if no data were found.
947 # A list of callback to shape the phase if no data were found.
948 # Callback are in the form: func(repo, roots) --> processed root.
948 # Callback are in the form: func(repo, roots) --> processed root.
949 # This list it to be filled by extension during repo setup
949 # This list it to be filled by extension during repo setup
950 self._phasedefaults = []
950 self._phasedefaults = []
951
951
952 color.setup(self.ui)
952 color.setup(self.ui)
953
953
954 self.spath = self.store.path
954 self.spath = self.store.path
955 self.svfs = self.store.vfs
955 self.svfs = self.store.vfs
956 self.sjoin = self.store.join
956 self.sjoin = self.store.join
957 if (self.ui.configbool('devel', 'all-warnings') or
957 if (self.ui.configbool('devel', 'all-warnings') or
958 self.ui.configbool('devel', 'check-locks')):
958 self.ui.configbool('devel', 'check-locks')):
959 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
959 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
960 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
960 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
961 else: # standard vfs
961 else: # standard vfs
962 self.svfs.audit = self._getsvfsward(self.svfs.audit)
962 self.svfs.audit = self._getsvfsward(self.svfs.audit)
963
963
964 self._dirstatevalidatewarned = False
964 self._dirstatevalidatewarned = False
965
965
966 self._branchcaches = {}
966 self._branchcaches = {}
967 self._revbranchcache = None
967 self._revbranchcache = None
968 self._filterpats = {}
968 self._filterpats = {}
969 self._datafilters = {}
969 self._datafilters = {}
970 self._transref = self._lockref = self._wlockref = None
970 self._transref = self._lockref = self._wlockref = None
971
971
972 # A cache for various files under .hg/ that tracks file changes,
972 # A cache for various files under .hg/ that tracks file changes,
973 # (used by the filecache decorator)
973 # (used by the filecache decorator)
974 #
974 #
975 # Maps a property name to its util.filecacheentry
975 # Maps a property name to its util.filecacheentry
976 self._filecache = {}
976 self._filecache = {}
977
977
978 # hold sets of revision to be filtered
978 # hold sets of revision to be filtered
979 # should be cleared when something might have changed the filter value:
979 # should be cleared when something might have changed the filter value:
980 # - new changesets,
980 # - new changesets,
981 # - phase change,
981 # - phase change,
982 # - new obsolescence marker,
982 # - new obsolescence marker,
983 # - working directory parent change,
983 # - working directory parent change,
984 # - bookmark changes
984 # - bookmark changes
985 self.filteredrevcache = {}
985 self.filteredrevcache = {}
986
986
987 # post-dirstate-status hooks
987 # post-dirstate-status hooks
988 self._postdsstatus = []
988 self._postdsstatus = []
989
989
990 # generic mapping between names and nodes
990 # generic mapping between names and nodes
991 self.names = namespaces.namespaces()
991 self.names = namespaces.namespaces()
992
992
993 # Key to signature value.
993 # Key to signature value.
994 self._sparsesignaturecache = {}
994 self._sparsesignaturecache = {}
995 # Signature to cached matcher instance.
995 # Signature to cached matcher instance.
996 self._sparsematchercache = {}
996 self._sparsematchercache = {}
997
997
998 def _getvfsward(self, origfunc):
998 def _getvfsward(self, origfunc):
999 """build a ward for self.vfs"""
999 """build a ward for self.vfs"""
1000 rref = weakref.ref(self)
1000 rref = weakref.ref(self)
1001 def checkvfs(path, mode=None):
1001 def checkvfs(path, mode=None):
1002 ret = origfunc(path, mode=mode)
1002 ret = origfunc(path, mode=mode)
1003 repo = rref()
1003 repo = rref()
1004 if (repo is None
1004 if (repo is None
1005 or not util.safehasattr(repo, '_wlockref')
1005 or not util.safehasattr(repo, '_wlockref')
1006 or not util.safehasattr(repo, '_lockref')):
1006 or not util.safehasattr(repo, '_lockref')):
1007 return
1007 return
1008 if mode in (None, 'r', 'rb'):
1008 if mode in (None, 'r', 'rb'):
1009 return
1009 return
1010 if path.startswith(repo.path):
1010 if path.startswith(repo.path):
1011 # truncate name relative to the repository (.hg)
1011 # truncate name relative to the repository (.hg)
1012 path = path[len(repo.path) + 1:]
1012 path = path[len(repo.path) + 1:]
1013 if path.startswith('cache/'):
1013 if path.startswith('cache/'):
1014 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1014 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1015 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1015 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1016 if path.startswith('journal.'):
1016 if path.startswith('journal.'):
1017 # journal is covered by 'lock'
1017 # journal is covered by 'lock'
1018 if repo._currentlock(repo._lockref) is None:
1018 if repo._currentlock(repo._lockref) is None:
1019 repo.ui.develwarn('write with no lock: "%s"' % path,
1019 repo.ui.develwarn('write with no lock: "%s"' % path,
1020 stacklevel=2, config='check-locks')
1020 stacklevel=2, config='check-locks')
1021 elif repo._currentlock(repo._wlockref) is None:
1021 elif repo._currentlock(repo._wlockref) is None:
1022 # rest of vfs files are covered by 'wlock'
1022 # rest of vfs files are covered by 'wlock'
1023 #
1023 #
1024 # exclude special files
1024 # exclude special files
1025 for prefix in self._wlockfreeprefix:
1025 for prefix in self._wlockfreeprefix:
1026 if path.startswith(prefix):
1026 if path.startswith(prefix):
1027 return
1027 return
1028 repo.ui.develwarn('write with no wlock: "%s"' % path,
1028 repo.ui.develwarn('write with no wlock: "%s"' % path,
1029 stacklevel=2, config='check-locks')
1029 stacklevel=2, config='check-locks')
1030 return ret
1030 return ret
1031 return checkvfs
1031 return checkvfs
1032
1032
1033 def _getsvfsward(self, origfunc):
1033 def _getsvfsward(self, origfunc):
1034 """build a ward for self.svfs"""
1034 """build a ward for self.svfs"""
1035 rref = weakref.ref(self)
1035 rref = weakref.ref(self)
1036 def checksvfs(path, mode=None):
1036 def checksvfs(path, mode=None):
1037 ret = origfunc(path, mode=mode)
1037 ret = origfunc(path, mode=mode)
1038 repo = rref()
1038 repo = rref()
1039 if repo is None or not util.safehasattr(repo, '_lockref'):
1039 if repo is None or not util.safehasattr(repo, '_lockref'):
1040 return
1040 return
1041 if mode in (None, 'r', 'rb'):
1041 if mode in (None, 'r', 'rb'):
1042 return
1042 return
1043 if path.startswith(repo.sharedpath):
1043 if path.startswith(repo.sharedpath):
1044 # truncate name relative to the repository (.hg)
1044 # truncate name relative to the repository (.hg)
1045 path = path[len(repo.sharedpath) + 1:]
1045 path = path[len(repo.sharedpath) + 1:]
1046 if repo._currentlock(repo._lockref) is None:
1046 if repo._currentlock(repo._lockref) is None:
1047 repo.ui.develwarn('write with no lock: "%s"' % path,
1047 repo.ui.develwarn('write with no lock: "%s"' % path,
1048 stacklevel=3)
1048 stacklevel=3)
1049 return ret
1049 return ret
1050 return checksvfs
1050 return checksvfs
1051
1051
1052 def close(self):
1052 def close(self):
1053 self._writecaches()
1053 self._writecaches()
1054
1054
1055 def _writecaches(self):
1055 def _writecaches(self):
1056 if self._revbranchcache:
1056 if self._revbranchcache:
1057 self._revbranchcache.write()
1057 self._revbranchcache.write()
1058
1058
1059 def _restrictcapabilities(self, caps):
1059 def _restrictcapabilities(self, caps):
1060 if self.ui.configbool('experimental', 'bundle2-advertise'):
1060 if self.ui.configbool('experimental', 'bundle2-advertise'):
1061 caps = set(caps)
1061 caps = set(caps)
1062 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1062 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1063 role='client'))
1063 role='client'))
1064 caps.add('bundle2=' + urlreq.quote(capsblob))
1064 caps.add('bundle2=' + urlreq.quote(capsblob))
1065 return caps
1065 return caps
1066
1066
1067 def _writerequirements(self):
1067 def _writerequirements(self):
1068 scmutil.writerequires(self.vfs, self.requirements)
1068 scmutil.writerequires(self.vfs, self.requirements)
1069
1069
1070 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1070 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1071 # self -> auditor -> self._checknested -> self
1071 # self -> auditor -> self._checknested -> self
1072
1072
1073 @property
1073 @property
1074 def auditor(self):
1074 def auditor(self):
1075 # This is only used by context.workingctx.match in order to
1075 # This is only used by context.workingctx.match in order to
1076 # detect files in subrepos.
1076 # detect files in subrepos.
1077 return pathutil.pathauditor(self.root, callback=self._checknested)
1077 return pathutil.pathauditor(self.root, callback=self._checknested)
1078
1078
1079 @property
1079 @property
1080 def nofsauditor(self):
1080 def nofsauditor(self):
1081 # This is only used by context.basectx.match in order to detect
1081 # This is only used by context.basectx.match in order to detect
1082 # files in subrepos.
1082 # files in subrepos.
1083 return pathutil.pathauditor(self.root, callback=self._checknested,
1083 return pathutil.pathauditor(self.root, callback=self._checknested,
1084 realfs=False, cached=True)
1084 realfs=False, cached=True)
1085
1085
1086 def _checknested(self, path):
1086 def _checknested(self, path):
1087 """Determine if path is a legal nested repository."""
1087 """Determine if path is a legal nested repository."""
1088 if not path.startswith(self.root):
1088 if not path.startswith(self.root):
1089 return False
1089 return False
1090 subpath = path[len(self.root) + 1:]
1090 subpath = path[len(self.root) + 1:]
1091 normsubpath = util.pconvert(subpath)
1091 normsubpath = util.pconvert(subpath)
1092
1092
1093 # XXX: Checking against the current working copy is wrong in
1093 # XXX: Checking against the current working copy is wrong in
1094 # the sense that it can reject things like
1094 # the sense that it can reject things like
1095 #
1095 #
1096 # $ hg cat -r 10 sub/x.txt
1096 # $ hg cat -r 10 sub/x.txt
1097 #
1097 #
1098 # if sub/ is no longer a subrepository in the working copy
1098 # if sub/ is no longer a subrepository in the working copy
1099 # parent revision.
1099 # parent revision.
1100 #
1100 #
1101 # However, it can of course also allow things that would have
1101 # However, it can of course also allow things that would have
1102 # been rejected before, such as the above cat command if sub/
1102 # been rejected before, such as the above cat command if sub/
1103 # is a subrepository now, but was a normal directory before.
1103 # is a subrepository now, but was a normal directory before.
1104 # The old path auditor would have rejected by mistake since it
1104 # The old path auditor would have rejected by mistake since it
1105 # panics when it sees sub/.hg/.
1105 # panics when it sees sub/.hg/.
1106 #
1106 #
1107 # All in all, checking against the working copy seems sensible
1107 # All in all, checking against the working copy seems sensible
1108 # since we want to prevent access to nested repositories on
1108 # since we want to prevent access to nested repositories on
1109 # the filesystem *now*.
1109 # the filesystem *now*.
1110 ctx = self[None]
1110 ctx = self[None]
1111 parts = util.splitpath(subpath)
1111 parts = util.splitpath(subpath)
1112 while parts:
1112 while parts:
1113 prefix = '/'.join(parts)
1113 prefix = '/'.join(parts)
1114 if prefix in ctx.substate:
1114 if prefix in ctx.substate:
1115 if prefix == normsubpath:
1115 if prefix == normsubpath:
1116 return True
1116 return True
1117 else:
1117 else:
1118 sub = ctx.sub(prefix)
1118 sub = ctx.sub(prefix)
1119 return sub.checknested(subpath[len(prefix) + 1:])
1119 return sub.checknested(subpath[len(prefix) + 1:])
1120 else:
1120 else:
1121 parts.pop()
1121 parts.pop()
1122 return False
1122 return False
1123
1123
1124 def peer(self):
1124 def peer(self):
1125 return localpeer(self) # not cached to avoid reference cycle
1125 return localpeer(self) # not cached to avoid reference cycle
1126
1126
1127 def unfiltered(self):
1127 def unfiltered(self):
1128 """Return unfiltered version of the repository
1128 """Return unfiltered version of the repository
1129
1129
1130 Intended to be overwritten by filtered repo."""
1130 Intended to be overwritten by filtered repo."""
1131 return self
1131 return self
1132
1132
1133 def filtered(self, name, visibilityexceptions=None):
1133 def filtered(self, name, visibilityexceptions=None):
1134 """Return a filtered version of a repository"""
1134 """Return a filtered version of a repository"""
1135 cls = repoview.newtype(self.unfiltered().__class__)
1135 cls = repoview.newtype(self.unfiltered().__class__)
1136 return cls(self, name, visibilityexceptions)
1136 return cls(self, name, visibilityexceptions)
1137
1137
1138 @repofilecache('bookmarks', 'bookmarks.current')
1138 @repofilecache('bookmarks', 'bookmarks.current')
1139 def _bookmarks(self):
1139 def _bookmarks(self):
1140 return bookmarks.bmstore(self)
1140 return bookmarks.bmstore(self)
1141
1141
1142 @property
1142 @property
1143 def _activebookmark(self):
1143 def _activebookmark(self):
1144 return self._bookmarks.active
1144 return self._bookmarks.active
1145
1145
1146 # _phasesets depend on changelog. what we need is to call
1146 # _phasesets depend on changelog. what we need is to call
1147 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1147 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1148 # can't be easily expressed in filecache mechanism.
1148 # can't be easily expressed in filecache mechanism.
1149 @storecache('phaseroots', '00changelog.i')
1149 @storecache('phaseroots', '00changelog.i')
1150 def _phasecache(self):
1150 def _phasecache(self):
1151 return phases.phasecache(self, self._phasedefaults)
1151 return phases.phasecache(self, self._phasedefaults)
1152
1152
1153 @storecache('obsstore')
1153 @storecache('obsstore')
1154 def obsstore(self):
1154 def obsstore(self):
1155 return obsolete.makestore(self.ui, self)
1155 return obsolete.makestore(self.ui, self)
1156
1156
1157 @storecache('00changelog.i')
1157 @storecache('00changelog.i')
1158 def changelog(self):
1158 def changelog(self):
1159 return changelog.changelog(self.svfs,
1159 return changelog.changelog(self.svfs,
1160 trypending=txnutil.mayhavepending(self.root))
1160 trypending=txnutil.mayhavepending(self.root))
1161
1161
1162 @storecache('00manifest.i')
1162 @storecache('00manifest.i')
1163 def manifestlog(self):
1163 def manifestlog(self):
1164 rootstore = manifest.manifestrevlog(self.svfs)
1164 rootstore = manifest.manifestrevlog(self.svfs)
1165 return manifest.manifestlog(self.svfs, self, rootstore)
1165 return manifest.manifestlog(self.svfs, self, rootstore)
1166
1166
1167 @repofilecache('dirstate')
1167 @repofilecache('dirstate')
1168 def dirstate(self):
1168 def dirstate(self):
1169 return self._makedirstate()
1169 return self._makedirstate()
1170
1170
1171 def _makedirstate(self):
1171 def _makedirstate(self):
1172 """Extension point for wrapping the dirstate per-repo."""
1172 """Extension point for wrapping the dirstate per-repo."""
1173 sparsematchfn = lambda: sparse.matcher(self)
1173 sparsematchfn = lambda: sparse.matcher(self)
1174
1174
1175 return dirstate.dirstate(self.vfs, self.ui, self.root,
1175 return dirstate.dirstate(self.vfs, self.ui, self.root,
1176 self._dirstatevalidate, sparsematchfn)
1176 self._dirstatevalidate, sparsematchfn)
1177
1177
1178 def _dirstatevalidate(self, node):
1178 def _dirstatevalidate(self, node):
1179 try:
1179 try:
1180 self.changelog.rev(node)
1180 self.changelog.rev(node)
1181 return node
1181 return node
1182 except error.LookupError:
1182 except error.LookupError:
1183 if not self._dirstatevalidatewarned:
1183 if not self._dirstatevalidatewarned:
1184 self._dirstatevalidatewarned = True
1184 self._dirstatevalidatewarned = True
1185 self.ui.warn(_("warning: ignoring unknown"
1185 self.ui.warn(_("warning: ignoring unknown"
1186 " working parent %s!\n") % short(node))
1186 " working parent %s!\n") % short(node))
1187 return nullid
1187 return nullid
1188
1188
1189 @storecache(narrowspec.FILENAME)
1189 @storecache(narrowspec.FILENAME)
1190 def narrowpats(self):
1190 def narrowpats(self):
1191 """matcher patterns for this repository's narrowspec
1191 """matcher patterns for this repository's narrowspec
1192
1192
1193 A tuple of (includes, excludes).
1193 A tuple of (includes, excludes).
1194 """
1194 """
1195 return narrowspec.load(self)
1195 return narrowspec.load(self)
1196
1196
1197 @storecache(narrowspec.FILENAME)
1197 @storecache(narrowspec.FILENAME)
1198 def _narrowmatch(self):
1198 def _narrowmatch(self):
1199 if repository.NARROW_REQUIREMENT not in self.requirements:
1199 if repository.NARROW_REQUIREMENT not in self.requirements:
1200 return matchmod.always(self.root, '')
1200 return matchmod.always(self.root, '')
1201 include, exclude = self.narrowpats
1201 include, exclude = self.narrowpats
1202 return narrowspec.match(self.root, include=include, exclude=exclude)
1202 return narrowspec.match(self.root, include=include, exclude=exclude)
1203
1203
1204 def narrowmatch(self, match=None, includeexact=False):
1204 def narrowmatch(self, match=None, includeexact=False):
1205 """matcher corresponding the the repo's narrowspec
1205 """matcher corresponding the the repo's narrowspec
1206
1206
1207 If `match` is given, then that will be intersected with the narrow
1207 If `match` is given, then that will be intersected with the narrow
1208 matcher.
1208 matcher.
1209
1209
1210 If `includeexact` is True, then any exact matches from `match` will
1210 If `includeexact` is True, then any exact matches from `match` will
1211 be included even if they're outside the narrowspec.
1211 be included even if they're outside the narrowspec.
1212 """
1212 """
1213 if match:
1213 if match:
1214 if includeexact and not self._narrowmatch.always():
1214 if includeexact and not self._narrowmatch.always():
1215 # do not exclude explicitly-specified paths so that they can
1215 # do not exclude explicitly-specified paths so that they can
1216 # be warned later on
1216 # be warned later on
1217 em = matchmod.exact(match._root, match._cwd, match.files())
1217 em = matchmod.exact(match._root, match._cwd, match.files())
1218 nm = matchmod.unionmatcher([self._narrowmatch, em])
1218 nm = matchmod.unionmatcher([self._narrowmatch, em])
1219 return matchmod.intersectmatchers(match, nm)
1219 return matchmod.intersectmatchers(match, nm)
1220 return matchmod.intersectmatchers(match, self._narrowmatch)
1220 return matchmod.intersectmatchers(match, self._narrowmatch)
1221 return self._narrowmatch
1221 return self._narrowmatch
1222
1222
1223 def setnarrowpats(self, newincludes, newexcludes):
1223 def setnarrowpats(self, newincludes, newexcludes):
1224 narrowspec.save(self, newincludes, newexcludes)
1224 narrowspec.save(self, newincludes, newexcludes)
1225 self.invalidate(clearfilecache=True)
1225 self.invalidate(clearfilecache=True)
1226
1226
1227 def __getitem__(self, changeid):
1227 def __getitem__(self, changeid):
1228 if changeid is None:
1228 if changeid is None:
1229 return context.workingctx(self)
1229 return context.workingctx(self)
1230 if isinstance(changeid, context.basectx):
1230 if isinstance(changeid, context.basectx):
1231 return changeid
1231 return changeid
1232 if isinstance(changeid, slice):
1232 if isinstance(changeid, slice):
1233 # wdirrev isn't contiguous so the slice shouldn't include it
1233 # wdirrev isn't contiguous so the slice shouldn't include it
1234 return [self[i]
1234 return [self[i]
1235 for i in pycompat.xrange(*changeid.indices(len(self)))
1235 for i in pycompat.xrange(*changeid.indices(len(self)))
1236 if i not in self.changelog.filteredrevs]
1236 if i not in self.changelog.filteredrevs]
1237 try:
1237 try:
1238 if isinstance(changeid, int):
1238 if isinstance(changeid, int):
1239 node = self.changelog.node(changeid)
1239 node = self.changelog.node(changeid)
1240 rev = changeid
1240 rev = changeid
1241 elif changeid == 'null':
1241 elif changeid == 'null':
1242 node = nullid
1242 node = nullid
1243 rev = nullrev
1243 rev = nullrev
1244 elif changeid == 'tip':
1244 elif changeid == 'tip':
1245 node = self.changelog.tip()
1245 node = self.changelog.tip()
1246 rev = self.changelog.rev(node)
1246 rev = self.changelog.rev(node)
1247 elif changeid == '.':
1247 elif changeid == '.':
1248 # this is a hack to delay/avoid loading obsmarkers
1248 # this is a hack to delay/avoid loading obsmarkers
1249 # when we know that '.' won't be hidden
1249 # when we know that '.' won't be hidden
1250 node = self.dirstate.p1()
1250 node = self.dirstate.p1()
1251 rev = self.unfiltered().changelog.rev(node)
1251 rev = self.unfiltered().changelog.rev(node)
1252 elif len(changeid) == 20:
1252 elif len(changeid) == 20:
1253 try:
1253 try:
1254 node = changeid
1254 node = changeid
1255 rev = self.changelog.rev(changeid)
1255 rev = self.changelog.rev(changeid)
1256 except error.FilteredLookupError:
1256 except error.FilteredLookupError:
1257 changeid = hex(changeid) # for the error message
1257 changeid = hex(changeid) # for the error message
1258 raise
1258 raise
1259 except LookupError:
1259 except LookupError:
1260 # check if it might have come from damaged dirstate
1260 # check if it might have come from damaged dirstate
1261 #
1261 #
1262 # XXX we could avoid the unfiltered if we had a recognizable
1262 # XXX we could avoid the unfiltered if we had a recognizable
1263 # exception for filtered changeset access
1263 # exception for filtered changeset access
1264 if (self.local()
1264 if (self.local()
1265 and changeid in self.unfiltered().dirstate.parents()):
1265 and changeid in self.unfiltered().dirstate.parents()):
1266 msg = _("working directory has unknown parent '%s'!")
1266 msg = _("working directory has unknown parent '%s'!")
1267 raise error.Abort(msg % short(changeid))
1267 raise error.Abort(msg % short(changeid))
1268 changeid = hex(changeid) # for the error message
1268 changeid = hex(changeid) # for the error message
1269 raise
1269 raise
1270
1270
1271 elif len(changeid) == 40:
1271 elif len(changeid) == 40:
1272 node = bin(changeid)
1272 node = bin(changeid)
1273 rev = self.changelog.rev(node)
1273 rev = self.changelog.rev(node)
1274 else:
1274 else:
1275 raise error.ProgrammingError(
1275 raise error.ProgrammingError(
1276 "unsupported changeid '%s' of type %s" %
1276 "unsupported changeid '%s' of type %s" %
1277 (changeid, type(changeid)))
1277 (changeid, type(changeid)))
1278
1278
1279 return context.changectx(self, rev, node)
1279 return context.changectx(self, rev, node)
1280
1280
1281 except (error.FilteredIndexError, error.FilteredLookupError):
1281 except (error.FilteredIndexError, error.FilteredLookupError):
1282 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1282 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1283 % pycompat.bytestr(changeid))
1283 % pycompat.bytestr(changeid))
1284 except (IndexError, LookupError):
1284 except (IndexError, LookupError):
1285 raise error.RepoLookupError(
1285 raise error.RepoLookupError(
1286 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1286 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1287 except error.WdirUnsupported:
1287 except error.WdirUnsupported:
1288 return context.workingctx(self)
1288 return context.workingctx(self)
1289
1289
1290 def __contains__(self, changeid):
1290 def __contains__(self, changeid):
1291 """True if the given changeid exists
1291 """True if the given changeid exists
1292
1292
1293 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1293 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1294 specified.
1294 specified.
1295 """
1295 """
1296 try:
1296 try:
1297 self[changeid]
1297 self[changeid]
1298 return True
1298 return True
1299 except error.RepoLookupError:
1299 except error.RepoLookupError:
1300 return False
1300 return False
1301
1301
1302 def __nonzero__(self):
1302 def __nonzero__(self):
1303 return True
1303 return True
1304
1304
1305 __bool__ = __nonzero__
1305 __bool__ = __nonzero__
1306
1306
1307 def __len__(self):
1307 def __len__(self):
1308 # no need to pay the cost of repoview.changelog
1308 # no need to pay the cost of repoview.changelog
1309 unfi = self.unfiltered()
1309 unfi = self.unfiltered()
1310 return len(unfi.changelog)
1310 return len(unfi.changelog)
1311
1311
1312 def __iter__(self):
1312 def __iter__(self):
1313 return iter(self.changelog)
1313 return iter(self.changelog)
1314
1314
1315 def revs(self, expr, *args):
1315 def revs(self, expr, *args):
1316 '''Find revisions matching a revset.
1316 '''Find revisions matching a revset.
1317
1317
1318 The revset is specified as a string ``expr`` that may contain
1318 The revset is specified as a string ``expr`` that may contain
1319 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1319 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1320
1320
1321 Revset aliases from the configuration are not expanded. To expand
1321 Revset aliases from the configuration are not expanded. To expand
1322 user aliases, consider calling ``scmutil.revrange()`` or
1322 user aliases, consider calling ``scmutil.revrange()`` or
1323 ``repo.anyrevs([expr], user=True)``.
1323 ``repo.anyrevs([expr], user=True)``.
1324
1324
1325 Returns a revset.abstractsmartset, which is a list-like interface
1325 Returns a revset.abstractsmartset, which is a list-like interface
1326 that contains integer revisions.
1326 that contains integer revisions.
1327 '''
1327 '''
1328 expr = revsetlang.formatspec(expr, *args)
1328 expr = revsetlang.formatspec(expr, *args)
1329 m = revset.match(None, expr)
1329 m = revset.match(None, expr)
1330 return m(self)
1330 return m(self)
1331
1331
1332 def set(self, expr, *args):
1332 def set(self, expr, *args):
1333 '''Find revisions matching a revset and emit changectx instances.
1333 '''Find revisions matching a revset and emit changectx instances.
1334
1334
1335 This is a convenience wrapper around ``revs()`` that iterates the
1335 This is a convenience wrapper around ``revs()`` that iterates the
1336 result and is a generator of changectx instances.
1336 result and is a generator of changectx instances.
1337
1337
1338 Revset aliases from the configuration are not expanded. To expand
1338 Revset aliases from the configuration are not expanded. To expand
1339 user aliases, consider calling ``scmutil.revrange()``.
1339 user aliases, consider calling ``scmutil.revrange()``.
1340 '''
1340 '''
1341 for r in self.revs(expr, *args):
1341 for r in self.revs(expr, *args):
1342 yield self[r]
1342 yield self[r]
1343
1343
1344 def anyrevs(self, specs, user=False, localalias=None):
1344 def anyrevs(self, specs, user=False, localalias=None):
1345 '''Find revisions matching one of the given revsets.
1345 '''Find revisions matching one of the given revsets.
1346
1346
1347 Revset aliases from the configuration are not expanded by default. To
1347 Revset aliases from the configuration are not expanded by default. To
1348 expand user aliases, specify ``user=True``. To provide some local
1348 expand user aliases, specify ``user=True``. To provide some local
1349 definitions overriding user aliases, set ``localalias`` to
1349 definitions overriding user aliases, set ``localalias`` to
1350 ``{name: definitionstring}``.
1350 ``{name: definitionstring}``.
1351 '''
1351 '''
1352 if user:
1352 if user:
1353 m = revset.matchany(self.ui, specs,
1353 m = revset.matchany(self.ui, specs,
1354 lookup=revset.lookupfn(self),
1354 lookup=revset.lookupfn(self),
1355 localalias=localalias)
1355 localalias=localalias)
1356 else:
1356 else:
1357 m = revset.matchany(None, specs, localalias=localalias)
1357 m = revset.matchany(None, specs, localalias=localalias)
1358 return m(self)
1358 return m(self)
1359
1359
1360 def url(self):
1360 def url(self):
1361 return 'file:' + self.root
1361 return 'file:' + self.root
1362
1362
1363 def hook(self, name, throw=False, **args):
1363 def hook(self, name, throw=False, **args):
1364 """Call a hook, passing this repo instance.
1364 """Call a hook, passing this repo instance.
1365
1365
1366 This a convenience method to aid invoking hooks. Extensions likely
1366 This a convenience method to aid invoking hooks. Extensions likely
1367 won't call this unless they have registered a custom hook or are
1367 won't call this unless they have registered a custom hook or are
1368 replacing code that is expected to call a hook.
1368 replacing code that is expected to call a hook.
1369 """
1369 """
1370 return hook.hook(self.ui, self, name, throw, **args)
1370 return hook.hook(self.ui, self, name, throw, **args)
1371
1371
1372 @filteredpropertycache
1372 @filteredpropertycache
1373 def _tagscache(self):
1373 def _tagscache(self):
1374 '''Returns a tagscache object that contains various tags related
1374 '''Returns a tagscache object that contains various tags related
1375 caches.'''
1375 caches.'''
1376
1376
1377 # This simplifies its cache management by having one decorated
1377 # This simplifies its cache management by having one decorated
1378 # function (this one) and the rest simply fetch things from it.
1378 # function (this one) and the rest simply fetch things from it.
1379 class tagscache(object):
1379 class tagscache(object):
1380 def __init__(self):
1380 def __init__(self):
1381 # These two define the set of tags for this repository. tags
1381 # These two define the set of tags for this repository. tags
1382 # maps tag name to node; tagtypes maps tag name to 'global' or
1382 # maps tag name to node; tagtypes maps tag name to 'global' or
1383 # 'local'. (Global tags are defined by .hgtags across all
1383 # 'local'. (Global tags are defined by .hgtags across all
1384 # heads, and local tags are defined in .hg/localtags.)
1384 # heads, and local tags are defined in .hg/localtags.)
1385 # They constitute the in-memory cache of tags.
1385 # They constitute the in-memory cache of tags.
1386 self.tags = self.tagtypes = None
1386 self.tags = self.tagtypes = None
1387
1387
1388 self.nodetagscache = self.tagslist = None
1388 self.nodetagscache = self.tagslist = None
1389
1389
1390 cache = tagscache()
1390 cache = tagscache()
1391 cache.tags, cache.tagtypes = self._findtags()
1391 cache.tags, cache.tagtypes = self._findtags()
1392
1392
1393 return cache
1393 return cache
1394
1394
1395 def tags(self):
1395 def tags(self):
1396 '''return a mapping of tag to node'''
1396 '''return a mapping of tag to node'''
1397 t = {}
1397 t = {}
1398 if self.changelog.filteredrevs:
1398 if self.changelog.filteredrevs:
1399 tags, tt = self._findtags()
1399 tags, tt = self._findtags()
1400 else:
1400 else:
1401 tags = self._tagscache.tags
1401 tags = self._tagscache.tags
1402 for k, v in tags.iteritems():
1402 for k, v in tags.iteritems():
1403 try:
1403 try:
1404 # ignore tags to unknown nodes
1404 # ignore tags to unknown nodes
1405 self.changelog.rev(v)
1405 self.changelog.rev(v)
1406 t[k] = v
1406 t[k] = v
1407 except (error.LookupError, ValueError):
1407 except (error.LookupError, ValueError):
1408 pass
1408 pass
1409 return t
1409 return t
1410
1410
1411 def _findtags(self):
1411 def _findtags(self):
1412 '''Do the hard work of finding tags. Return a pair of dicts
1412 '''Do the hard work of finding tags. Return a pair of dicts
1413 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1413 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1414 maps tag name to a string like \'global\' or \'local\'.
1414 maps tag name to a string like \'global\' or \'local\'.
1415 Subclasses or extensions are free to add their own tags, but
1415 Subclasses or extensions are free to add their own tags, but
1416 should be aware that the returned dicts will be retained for the
1416 should be aware that the returned dicts will be retained for the
1417 duration of the localrepo object.'''
1417 duration of the localrepo object.'''
1418
1418
1419 # XXX what tagtype should subclasses/extensions use? Currently
1419 # XXX what tagtype should subclasses/extensions use? Currently
1420 # mq and bookmarks add tags, but do not set the tagtype at all.
1420 # mq and bookmarks add tags, but do not set the tagtype at all.
1421 # Should each extension invent its own tag type? Should there
1421 # Should each extension invent its own tag type? Should there
1422 # be one tagtype for all such "virtual" tags? Or is the status
1422 # be one tagtype for all such "virtual" tags? Or is the status
1423 # quo fine?
1423 # quo fine?
1424
1424
1425
1425
1426 # map tag name to (node, hist)
1426 # map tag name to (node, hist)
1427 alltags = tagsmod.findglobaltags(self.ui, self)
1427 alltags = tagsmod.findglobaltags(self.ui, self)
1428 # map tag name to tag type
1428 # map tag name to tag type
1429 tagtypes = dict((tag, 'global') for tag in alltags)
1429 tagtypes = dict((tag, 'global') for tag in alltags)
1430
1430
1431 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1431 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1432
1432
1433 # Build the return dicts. Have to re-encode tag names because
1433 # Build the return dicts. Have to re-encode tag names because
1434 # the tags module always uses UTF-8 (in order not to lose info
1434 # the tags module always uses UTF-8 (in order not to lose info
1435 # writing to the cache), but the rest of Mercurial wants them in
1435 # writing to the cache), but the rest of Mercurial wants them in
1436 # local encoding.
1436 # local encoding.
1437 tags = {}
1437 tags = {}
1438 for (name, (node, hist)) in alltags.iteritems():
1438 for (name, (node, hist)) in alltags.iteritems():
1439 if node != nullid:
1439 if node != nullid:
1440 tags[encoding.tolocal(name)] = node
1440 tags[encoding.tolocal(name)] = node
1441 tags['tip'] = self.changelog.tip()
1441 tags['tip'] = self.changelog.tip()
1442 tagtypes = dict([(encoding.tolocal(name), value)
1442 tagtypes = dict([(encoding.tolocal(name), value)
1443 for (name, value) in tagtypes.iteritems()])
1443 for (name, value) in tagtypes.iteritems()])
1444 return (tags, tagtypes)
1444 return (tags, tagtypes)
1445
1445
1446 def tagtype(self, tagname):
1446 def tagtype(self, tagname):
1447 '''
1447 '''
1448 return the type of the given tag. result can be:
1448 return the type of the given tag. result can be:
1449
1449
1450 'local' : a local tag
1450 'local' : a local tag
1451 'global' : a global tag
1451 'global' : a global tag
1452 None : tag does not exist
1452 None : tag does not exist
1453 '''
1453 '''
1454
1454
1455 return self._tagscache.tagtypes.get(tagname)
1455 return self._tagscache.tagtypes.get(tagname)
1456
1456
1457 def tagslist(self):
1457 def tagslist(self):
1458 '''return a list of tags ordered by revision'''
1458 '''return a list of tags ordered by revision'''
1459 if not self._tagscache.tagslist:
1459 if not self._tagscache.tagslist:
1460 l = []
1460 l = []
1461 for t, n in self.tags().iteritems():
1461 for t, n in self.tags().iteritems():
1462 l.append((self.changelog.rev(n), t, n))
1462 l.append((self.changelog.rev(n), t, n))
1463 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1463 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1464
1464
1465 return self._tagscache.tagslist
1465 return self._tagscache.tagslist
1466
1466
1467 def nodetags(self, node):
1467 def nodetags(self, node):
1468 '''return the tags associated with a node'''
1468 '''return the tags associated with a node'''
1469 if not self._tagscache.nodetagscache:
1469 if not self._tagscache.nodetagscache:
1470 nodetagscache = {}
1470 nodetagscache = {}
1471 for t, n in self._tagscache.tags.iteritems():
1471 for t, n in self._tagscache.tags.iteritems():
1472 nodetagscache.setdefault(n, []).append(t)
1472 nodetagscache.setdefault(n, []).append(t)
1473 for tags in nodetagscache.itervalues():
1473 for tags in nodetagscache.itervalues():
1474 tags.sort()
1474 tags.sort()
1475 self._tagscache.nodetagscache = nodetagscache
1475 self._tagscache.nodetagscache = nodetagscache
1476 return self._tagscache.nodetagscache.get(node, [])
1476 return self._tagscache.nodetagscache.get(node, [])
1477
1477
1478 def nodebookmarks(self, node):
1478 def nodebookmarks(self, node):
1479 """return the list of bookmarks pointing to the specified node"""
1479 """return the list of bookmarks pointing to the specified node"""
1480 return self._bookmarks.names(node)
1480 return self._bookmarks.names(node)
1481
1481
1482 def branchmap(self):
1482 def branchmap(self):
1483 '''returns a dictionary {branch: [branchheads]} with branchheads
1483 '''returns a dictionary {branch: [branchheads]} with branchheads
1484 ordered by increasing revision number'''
1484 ordered by increasing revision number'''
1485 branchmap.updatecache(self)
1485 branchmap.updatecache(self)
1486 return self._branchcaches[self.filtername]
1486 return self._branchcaches[self.filtername]
1487
1487
1488 @unfilteredmethod
1488 @unfilteredmethod
1489 def revbranchcache(self):
1489 def revbranchcache(self):
1490 if not self._revbranchcache:
1490 if not self._revbranchcache:
1491 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1491 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1492 return self._revbranchcache
1492 return self._revbranchcache
1493
1493
1494 def branchtip(self, branch, ignoremissing=False):
1494 def branchtip(self, branch, ignoremissing=False):
1495 '''return the tip node for a given branch
1495 '''return the tip node for a given branch
1496
1496
1497 If ignoremissing is True, then this method will not raise an error.
1497 If ignoremissing is True, then this method will not raise an error.
1498 This is helpful for callers that only expect None for a missing branch
1498 This is helpful for callers that only expect None for a missing branch
1499 (e.g. namespace).
1499 (e.g. namespace).
1500
1500
1501 '''
1501 '''
1502 try:
1502 try:
1503 return self.branchmap().branchtip(branch)
1503 return self.branchmap().branchtip(branch)
1504 except KeyError:
1504 except KeyError:
1505 if not ignoremissing:
1505 if not ignoremissing:
1506 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1506 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1507 else:
1507 else:
1508 pass
1508 pass
1509
1509
1510 def lookup(self, key):
1510 def lookup(self, key):
1511 return scmutil.revsymbol(self, key).node()
1511 return scmutil.revsymbol(self, key).node()
1512
1512
1513 def lookupbranch(self, key):
1513 def lookupbranch(self, key):
1514 if key in self.branchmap():
1514 if key in self.branchmap():
1515 return key
1515 return key
1516
1516
1517 return scmutil.revsymbol(self, key).branch()
1517 return scmutil.revsymbol(self, key).branch()
1518
1518
1519 def known(self, nodes):
1519 def known(self, nodes):
1520 cl = self.changelog
1520 cl = self.changelog
1521 nm = cl.nodemap
1521 nm = cl.nodemap
1522 filtered = cl.filteredrevs
1522 filtered = cl.filteredrevs
1523 result = []
1523 result = []
1524 for n in nodes:
1524 for n in nodes:
1525 r = nm.get(n)
1525 r = nm.get(n)
1526 resp = not (r is None or r in filtered)
1526 resp = not (r is None or r in filtered)
1527 result.append(resp)
1527 result.append(resp)
1528 return result
1528 return result
1529
1529
1530 def local(self):
1530 def local(self):
1531 return self
1531 return self
1532
1532
1533 def publishing(self):
1533 def publishing(self):
1534 # it's safe (and desirable) to trust the publish flag unconditionally
1534 # it's safe (and desirable) to trust the publish flag unconditionally
1535 # so that we don't finalize changes shared between users via ssh or nfs
1535 # so that we don't finalize changes shared between users via ssh or nfs
1536 return self.ui.configbool('phases', 'publish', untrusted=True)
1536 return self.ui.configbool('phases', 'publish', untrusted=True)
1537
1537
1538 def cancopy(self):
1538 def cancopy(self):
1539 # so statichttprepo's override of local() works
1539 # so statichttprepo's override of local() works
1540 if not self.local():
1540 if not self.local():
1541 return False
1541 return False
1542 if not self.publishing():
1542 if not self.publishing():
1543 return True
1543 return True
1544 # if publishing we can't copy if there is filtered content
1544 # if publishing we can't copy if there is filtered content
1545 return not self.filtered('visible').changelog.filteredrevs
1545 return not self.filtered('visible').changelog.filteredrevs
1546
1546
1547 def shared(self):
1547 def shared(self):
1548 '''the type of shared repository (None if not shared)'''
1548 '''the type of shared repository (None if not shared)'''
1549 if self.sharedpath != self.path:
1549 if self.sharedpath != self.path:
1550 return 'store'
1550 return 'store'
1551 return None
1551 return None
1552
1552
1553 def wjoin(self, f, *insidef):
1553 def wjoin(self, f, *insidef):
1554 return self.vfs.reljoin(self.root, f, *insidef)
1554 return self.vfs.reljoin(self.root, f, *insidef)
1555
1555
1556 def setparents(self, p1, p2=nullid):
1556 def setparents(self, p1, p2=nullid):
1557 with self.dirstate.parentchange():
1557 with self.dirstate.parentchange():
1558 copies = self.dirstate.setparents(p1, p2)
1558 copies = self.dirstate.setparents(p1, p2)
1559 pctx = self[p1]
1559 pctx = self[p1]
1560 if copies:
1560 if copies:
1561 # Adjust copy records, the dirstate cannot do it, it
1561 # Adjust copy records, the dirstate cannot do it, it
1562 # requires access to parents manifests. Preserve them
1562 # requires access to parents manifests. Preserve them
1563 # only for entries added to first parent.
1563 # only for entries added to first parent.
1564 for f in copies:
1564 for f in copies:
1565 if f not in pctx and copies[f] in pctx:
1565 if f not in pctx and copies[f] in pctx:
1566 self.dirstate.copy(copies[f], f)
1566 self.dirstate.copy(copies[f], f)
1567 if p2 == nullid:
1567 if p2 == nullid:
1568 for f, s in sorted(self.dirstate.copies().items()):
1568 for f, s in sorted(self.dirstate.copies().items()):
1569 if f not in pctx and s not in pctx:
1569 if f not in pctx and s not in pctx:
1570 self.dirstate.copy(None, f)
1570 self.dirstate.copy(None, f)
1571
1571
1572 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1572 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1573 """changeid can be a changeset revision, node, or tag.
1573 """changeid can be a changeset revision, node, or tag.
1574 fileid can be a file revision or node."""
1574 fileid can be a file revision or node."""
1575 return context.filectx(self, path, changeid, fileid,
1575 return context.filectx(self, path, changeid, fileid,
1576 changectx=changectx)
1576 changectx=changectx)
1577
1577
1578 def getcwd(self):
1578 def getcwd(self):
1579 return self.dirstate.getcwd()
1579 return self.dirstate.getcwd()
1580
1580
1581 def pathto(self, f, cwd=None):
1581 def pathto(self, f, cwd=None):
1582 return self.dirstate.pathto(f, cwd)
1582 return self.dirstate.pathto(f, cwd)
1583
1583
1584 def _loadfilter(self, filter):
1584 def _loadfilter(self, filter):
1585 if filter not in self._filterpats:
1585 if filter not in self._filterpats:
1586 l = []
1586 l = []
1587 for pat, cmd in self.ui.configitems(filter):
1587 for pat, cmd in self.ui.configitems(filter):
1588 if cmd == '!':
1588 if cmd == '!':
1589 continue
1589 continue
1590 mf = matchmod.match(self.root, '', [pat])
1590 mf = matchmod.match(self.root, '', [pat])
1591 fn = None
1591 fn = None
1592 params = cmd
1592 params = cmd
1593 for name, filterfn in self._datafilters.iteritems():
1593 for name, filterfn in self._datafilters.iteritems():
1594 if cmd.startswith(name):
1594 if cmd.startswith(name):
1595 fn = filterfn
1595 fn = filterfn
1596 params = cmd[len(name):].lstrip()
1596 params = cmd[len(name):].lstrip()
1597 break
1597 break
1598 if not fn:
1598 if not fn:
1599 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1599 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1600 # Wrap old filters not supporting keyword arguments
1600 # Wrap old filters not supporting keyword arguments
1601 if not pycompat.getargspec(fn)[2]:
1601 if not pycompat.getargspec(fn)[2]:
1602 oldfn = fn
1602 oldfn = fn
1603 fn = lambda s, c, **kwargs: oldfn(s, c)
1603 fn = lambda s, c, **kwargs: oldfn(s, c)
1604 l.append((mf, fn, params))
1604 l.append((mf, fn, params))
1605 self._filterpats[filter] = l
1605 self._filterpats[filter] = l
1606 return self._filterpats[filter]
1606 return self._filterpats[filter]
1607
1607
1608 def _filter(self, filterpats, filename, data):
1608 def _filter(self, filterpats, filename, data):
1609 for mf, fn, cmd in filterpats:
1609 for mf, fn, cmd in filterpats:
1610 if mf(filename):
1610 if mf(filename):
1611 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1611 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1612 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1612 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1613 break
1613 break
1614
1614
1615 return data
1615 return data
1616
1616
1617 @unfilteredpropertycache
1617 @unfilteredpropertycache
1618 def _encodefilterpats(self):
1618 def _encodefilterpats(self):
1619 return self._loadfilter('encode')
1619 return self._loadfilter('encode')
1620
1620
1621 @unfilteredpropertycache
1621 @unfilteredpropertycache
1622 def _decodefilterpats(self):
1622 def _decodefilterpats(self):
1623 return self._loadfilter('decode')
1623 return self._loadfilter('decode')
1624
1624
1625 def adddatafilter(self, name, filter):
1625 def adddatafilter(self, name, filter):
1626 self._datafilters[name] = filter
1626 self._datafilters[name] = filter
1627
1627
1628 def wread(self, filename):
1628 def wread(self, filename):
1629 if self.wvfs.islink(filename):
1629 if self.wvfs.islink(filename):
1630 data = self.wvfs.readlink(filename)
1630 data = self.wvfs.readlink(filename)
1631 else:
1631 else:
1632 data = self.wvfs.read(filename)
1632 data = self.wvfs.read(filename)
1633 return self._filter(self._encodefilterpats, filename, data)
1633 return self._filter(self._encodefilterpats, filename, data)
1634
1634
1635 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1635 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1636 """write ``data`` into ``filename`` in the working directory
1636 """write ``data`` into ``filename`` in the working directory
1637
1637
1638 This returns length of written (maybe decoded) data.
1638 This returns length of written (maybe decoded) data.
1639 """
1639 """
1640 data = self._filter(self._decodefilterpats, filename, data)
1640 data = self._filter(self._decodefilterpats, filename, data)
1641 if 'l' in flags:
1641 if 'l' in flags:
1642 self.wvfs.symlink(data, filename)
1642 self.wvfs.symlink(data, filename)
1643 else:
1643 else:
1644 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1644 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1645 **kwargs)
1645 **kwargs)
1646 if 'x' in flags:
1646 if 'x' in flags:
1647 self.wvfs.setflags(filename, False, True)
1647 self.wvfs.setflags(filename, False, True)
1648 else:
1648 else:
1649 self.wvfs.setflags(filename, False, False)
1649 self.wvfs.setflags(filename, False, False)
1650 return len(data)
1650 return len(data)
1651
1651
1652 def wwritedata(self, filename, data):
1652 def wwritedata(self, filename, data):
1653 return self._filter(self._decodefilterpats, filename, data)
1653 return self._filter(self._decodefilterpats, filename, data)
1654
1654
1655 def currenttransaction(self):
1655 def currenttransaction(self):
1656 """return the current transaction or None if non exists"""
1656 """return the current transaction or None if non exists"""
1657 if self._transref:
1657 if self._transref:
1658 tr = self._transref()
1658 tr = self._transref()
1659 else:
1659 else:
1660 tr = None
1660 tr = None
1661
1661
1662 if tr and tr.running():
1662 if tr and tr.running():
1663 return tr
1663 return tr
1664 return None
1664 return None
1665
1665
1666 def transaction(self, desc, report=None):
1666 def transaction(self, desc, report=None):
1667 if (self.ui.configbool('devel', 'all-warnings')
1667 if (self.ui.configbool('devel', 'all-warnings')
1668 or self.ui.configbool('devel', 'check-locks')):
1668 or self.ui.configbool('devel', 'check-locks')):
1669 if self._currentlock(self._lockref) is None:
1669 if self._currentlock(self._lockref) is None:
1670 raise error.ProgrammingError('transaction requires locking')
1670 raise error.ProgrammingError('transaction requires locking')
1671 tr = self.currenttransaction()
1671 tr = self.currenttransaction()
1672 if tr is not None:
1672 if tr is not None:
1673 return tr.nest(name=desc)
1673 return tr.nest(name=desc)
1674
1674
1675 # abort here if the journal already exists
1675 # abort here if the journal already exists
1676 if self.svfs.exists("journal"):
1676 if self.svfs.exists("journal"):
1677 raise error.RepoError(
1677 raise error.RepoError(
1678 _("abandoned transaction found"),
1678 _("abandoned transaction found"),
1679 hint=_("run 'hg recover' to clean up transaction"))
1679 hint=_("run 'hg recover' to clean up transaction"))
1680
1680
1681 idbase = "%.40f#%f" % (random.random(), time.time())
1681 idbase = "%.40f#%f" % (random.random(), time.time())
1682 ha = hex(hashlib.sha1(idbase).digest())
1682 ha = hex(hashlib.sha1(idbase).digest())
1683 txnid = 'TXN:' + ha
1683 txnid = 'TXN:' + ha
1684 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1684 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1685
1685
1686 self._writejournal(desc)
1686 self._writejournal(desc)
1687 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1687 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1688 if report:
1688 if report:
1689 rp = report
1689 rp = report
1690 else:
1690 else:
1691 rp = self.ui.warn
1691 rp = self.ui.warn
1692 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1692 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1693 # we must avoid cyclic reference between repo and transaction.
1693 # we must avoid cyclic reference between repo and transaction.
1694 reporef = weakref.ref(self)
1694 reporef = weakref.ref(self)
1695 # Code to track tag movement
1695 # Code to track tag movement
1696 #
1696 #
1697 # Since tags are all handled as file content, it is actually quite hard
1697 # Since tags are all handled as file content, it is actually quite hard
1698 # to track these movement from a code perspective. So we fallback to a
1698 # to track these movement from a code perspective. So we fallback to a
1699 # tracking at the repository level. One could envision to track changes
1699 # tracking at the repository level. One could envision to track changes
1700 # to the '.hgtags' file through changegroup apply but that fails to
1700 # to the '.hgtags' file through changegroup apply but that fails to
1701 # cope with case where transaction expose new heads without changegroup
1701 # cope with case where transaction expose new heads without changegroup
1702 # being involved (eg: phase movement).
1702 # being involved (eg: phase movement).
1703 #
1703 #
1704 # For now, We gate the feature behind a flag since this likely comes
1704 # For now, We gate the feature behind a flag since this likely comes
1705 # with performance impacts. The current code run more often than needed
1705 # with performance impacts. The current code run more often than needed
1706 # and do not use caches as much as it could. The current focus is on
1706 # and do not use caches as much as it could. The current focus is on
1707 # the behavior of the feature so we disable it by default. The flag
1707 # the behavior of the feature so we disable it by default. The flag
1708 # will be removed when we are happy with the performance impact.
1708 # will be removed when we are happy with the performance impact.
1709 #
1709 #
1710 # Once this feature is no longer experimental move the following
1710 # Once this feature is no longer experimental move the following
1711 # documentation to the appropriate help section:
1711 # documentation to the appropriate help section:
1712 #
1712 #
1713 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1713 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1714 # tags (new or changed or deleted tags). In addition the details of
1714 # tags (new or changed or deleted tags). In addition the details of
1715 # these changes are made available in a file at:
1715 # these changes are made available in a file at:
1716 # ``REPOROOT/.hg/changes/tags.changes``.
1716 # ``REPOROOT/.hg/changes/tags.changes``.
1717 # Make sure you check for HG_TAG_MOVED before reading that file as it
1717 # Make sure you check for HG_TAG_MOVED before reading that file as it
1718 # might exist from a previous transaction even if no tag were touched
1718 # might exist from a previous transaction even if no tag were touched
1719 # in this one. Changes are recorded in a line base format::
1719 # in this one. Changes are recorded in a line base format::
1720 #
1720 #
1721 # <action> <hex-node> <tag-name>\n
1721 # <action> <hex-node> <tag-name>\n
1722 #
1722 #
1723 # Actions are defined as follow:
1723 # Actions are defined as follow:
1724 # "-R": tag is removed,
1724 # "-R": tag is removed,
1725 # "+A": tag is added,
1725 # "+A": tag is added,
1726 # "-M": tag is moved (old value),
1726 # "-M": tag is moved (old value),
1727 # "+M": tag is moved (new value),
1727 # "+M": tag is moved (new value),
1728 tracktags = lambda x: None
1728 tracktags = lambda x: None
1729 # experimental config: experimental.hook-track-tags
1729 # experimental config: experimental.hook-track-tags
1730 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1730 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1731 if desc != 'strip' and shouldtracktags:
1731 if desc != 'strip' and shouldtracktags:
1732 oldheads = self.changelog.headrevs()
1732 oldheads = self.changelog.headrevs()
1733 def tracktags(tr2):
1733 def tracktags(tr2):
1734 repo = reporef()
1734 repo = reporef()
1735 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1735 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1736 newheads = repo.changelog.headrevs()
1736 newheads = repo.changelog.headrevs()
1737 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1737 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1738 # notes: we compare lists here.
1738 # notes: we compare lists here.
1739 # As we do it only once buiding set would not be cheaper
1739 # As we do it only once buiding set would not be cheaper
1740 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1740 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1741 if changes:
1741 if changes:
1742 tr2.hookargs['tag_moved'] = '1'
1742 tr2.hookargs['tag_moved'] = '1'
1743 with repo.vfs('changes/tags.changes', 'w',
1743 with repo.vfs('changes/tags.changes', 'w',
1744 atomictemp=True) as changesfile:
1744 atomictemp=True) as changesfile:
1745 # note: we do not register the file to the transaction
1745 # note: we do not register the file to the transaction
1746 # because we needs it to still exist on the transaction
1746 # because we needs it to still exist on the transaction
1747 # is close (for txnclose hooks)
1747 # is close (for txnclose hooks)
1748 tagsmod.writediff(changesfile, changes)
1748 tagsmod.writediff(changesfile, changes)
1749 def validate(tr2):
1749 def validate(tr2):
1750 """will run pre-closing hooks"""
1750 """will run pre-closing hooks"""
1751 # XXX the transaction API is a bit lacking here so we take a hacky
1751 # XXX the transaction API is a bit lacking here so we take a hacky
1752 # path for now
1752 # path for now
1753 #
1753 #
1754 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1754 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1755 # dict is copied before these run. In addition we needs the data
1755 # dict is copied before these run. In addition we needs the data
1756 # available to in memory hooks too.
1756 # available to in memory hooks too.
1757 #
1757 #
1758 # Moreover, we also need to make sure this runs before txnclose
1758 # Moreover, we also need to make sure this runs before txnclose
1759 # hooks and there is no "pending" mechanism that would execute
1759 # hooks and there is no "pending" mechanism that would execute
1760 # logic only if hooks are about to run.
1760 # logic only if hooks are about to run.
1761 #
1761 #
1762 # Fixing this limitation of the transaction is also needed to track
1762 # Fixing this limitation of the transaction is also needed to track
1763 # other families of changes (bookmarks, phases, obsolescence).
1763 # other families of changes (bookmarks, phases, obsolescence).
1764 #
1764 #
1765 # This will have to be fixed before we remove the experimental
1765 # This will have to be fixed before we remove the experimental
1766 # gating.
1766 # gating.
1767 tracktags(tr2)
1767 tracktags(tr2)
1768 repo = reporef()
1768 repo = reporef()
1769 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1769 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1770 scmutil.enforcesinglehead(repo, tr2, desc)
1770 scmutil.enforcesinglehead(repo, tr2, desc)
1771 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1771 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1772 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1772 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1773 args = tr.hookargs.copy()
1773 args = tr.hookargs.copy()
1774 args.update(bookmarks.preparehookargs(name, old, new))
1774 args.update(bookmarks.preparehookargs(name, old, new))
1775 repo.hook('pretxnclose-bookmark', throw=True,
1775 repo.hook('pretxnclose-bookmark', throw=True,
1776 txnname=desc,
1776 txnname=desc,
1777 **pycompat.strkwargs(args))
1777 **pycompat.strkwargs(args))
1778 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1778 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1779 cl = repo.unfiltered().changelog
1779 cl = repo.unfiltered().changelog
1780 for rev, (old, new) in tr.changes['phases'].items():
1780 for rev, (old, new) in tr.changes['phases'].items():
1781 args = tr.hookargs.copy()
1781 args = tr.hookargs.copy()
1782 node = hex(cl.node(rev))
1782 node = hex(cl.node(rev))
1783 args.update(phases.preparehookargs(node, old, new))
1783 args.update(phases.preparehookargs(node, old, new))
1784 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1784 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1785 **pycompat.strkwargs(args))
1785 **pycompat.strkwargs(args))
1786
1786
1787 repo.hook('pretxnclose', throw=True,
1787 repo.hook('pretxnclose', throw=True,
1788 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1788 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1789 def releasefn(tr, success):
1789 def releasefn(tr, success):
1790 repo = reporef()
1790 repo = reporef()
1791 if success:
1791 if success:
1792 # this should be explicitly invoked here, because
1792 # this should be explicitly invoked here, because
1793 # in-memory changes aren't written out at closing
1793 # in-memory changes aren't written out at closing
1794 # transaction, if tr.addfilegenerator (via
1794 # transaction, if tr.addfilegenerator (via
1795 # dirstate.write or so) isn't invoked while
1795 # dirstate.write or so) isn't invoked while
1796 # transaction running
1796 # transaction running
1797 repo.dirstate.write(None)
1797 repo.dirstate.write(None)
1798 else:
1798 else:
1799 # discard all changes (including ones already written
1799 # discard all changes (including ones already written
1800 # out) in this transaction
1800 # out) in this transaction
1801 narrowspec.restorebackup(self, 'journal.narrowspec')
1801 narrowspec.restorebackup(self, 'journal.narrowspec')
1802 repo.dirstate.restorebackup(None, 'journal.dirstate')
1802 repo.dirstate.restorebackup(None, 'journal.dirstate')
1803
1803
1804 repo.invalidate(clearfilecache=True)
1804 repo.invalidate(clearfilecache=True)
1805
1805
1806 tr = transaction.transaction(rp, self.svfs, vfsmap,
1806 tr = transaction.transaction(rp, self.svfs, vfsmap,
1807 "journal",
1807 "journal",
1808 "undo",
1808 "undo",
1809 aftertrans(renames),
1809 aftertrans(renames),
1810 self.store.createmode,
1810 self.store.createmode,
1811 validator=validate,
1811 validator=validate,
1812 releasefn=releasefn,
1812 releasefn=releasefn,
1813 checkambigfiles=_cachedfiles,
1813 checkambigfiles=_cachedfiles,
1814 name=desc)
1814 name=desc)
1815 tr.changes['origrepolen'] = len(self)
1815 tr.changes['origrepolen'] = len(self)
1816 tr.changes['obsmarkers'] = set()
1816 tr.changes['obsmarkers'] = set()
1817 tr.changes['phases'] = {}
1817 tr.changes['phases'] = {}
1818 tr.changes['bookmarks'] = {}
1818 tr.changes['bookmarks'] = {}
1819
1819
1820 tr.hookargs['txnid'] = txnid
1820 tr.hookargs['txnid'] = txnid
1821 # note: writing the fncache only during finalize mean that the file is
1821 # note: writing the fncache only during finalize mean that the file is
1822 # outdated when running hooks. As fncache is used for streaming clone,
1822 # outdated when running hooks. As fncache is used for streaming clone,
1823 # this is not expected to break anything that happen during the hooks.
1823 # this is not expected to break anything that happen during the hooks.
1824 tr.addfinalize('flush-fncache', self.store.write)
1824 tr.addfinalize('flush-fncache', self.store.write)
1825 def txnclosehook(tr2):
1825 def txnclosehook(tr2):
1826 """To be run if transaction is successful, will schedule a hook run
1826 """To be run if transaction is successful, will schedule a hook run
1827 """
1827 """
1828 # Don't reference tr2 in hook() so we don't hold a reference.
1828 # Don't reference tr2 in hook() so we don't hold a reference.
1829 # This reduces memory consumption when there are multiple
1829 # This reduces memory consumption when there are multiple
1830 # transactions per lock. This can likely go away if issue5045
1830 # transactions per lock. This can likely go away if issue5045
1831 # fixes the function accumulation.
1831 # fixes the function accumulation.
1832 hookargs = tr2.hookargs
1832 hookargs = tr2.hookargs
1833
1833
1834 def hookfunc():
1834 def hookfunc():
1835 repo = reporef()
1835 repo = reporef()
1836 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1836 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1837 bmchanges = sorted(tr.changes['bookmarks'].items())
1837 bmchanges = sorted(tr.changes['bookmarks'].items())
1838 for name, (old, new) in bmchanges:
1838 for name, (old, new) in bmchanges:
1839 args = tr.hookargs.copy()
1839 args = tr.hookargs.copy()
1840 args.update(bookmarks.preparehookargs(name, old, new))
1840 args.update(bookmarks.preparehookargs(name, old, new))
1841 repo.hook('txnclose-bookmark', throw=False,
1841 repo.hook('txnclose-bookmark', throw=False,
1842 txnname=desc, **pycompat.strkwargs(args))
1842 txnname=desc, **pycompat.strkwargs(args))
1843
1843
1844 if hook.hashook(repo.ui, 'txnclose-phase'):
1844 if hook.hashook(repo.ui, 'txnclose-phase'):
1845 cl = repo.unfiltered().changelog
1845 cl = repo.unfiltered().changelog
1846 phasemv = sorted(tr.changes['phases'].items())
1846 phasemv = sorted(tr.changes['phases'].items())
1847 for rev, (old, new) in phasemv:
1847 for rev, (old, new) in phasemv:
1848 args = tr.hookargs.copy()
1848 args = tr.hookargs.copy()
1849 node = hex(cl.node(rev))
1849 node = hex(cl.node(rev))
1850 args.update(phases.preparehookargs(node, old, new))
1850 args.update(phases.preparehookargs(node, old, new))
1851 repo.hook('txnclose-phase', throw=False, txnname=desc,
1851 repo.hook('txnclose-phase', throw=False, txnname=desc,
1852 **pycompat.strkwargs(args))
1852 **pycompat.strkwargs(args))
1853
1853
1854 repo.hook('txnclose', throw=False, txnname=desc,
1854 repo.hook('txnclose', throw=False, txnname=desc,
1855 **pycompat.strkwargs(hookargs))
1855 **pycompat.strkwargs(hookargs))
1856 reporef()._afterlock(hookfunc)
1856 reporef()._afterlock(hookfunc)
1857 tr.addfinalize('txnclose-hook', txnclosehook)
1857 tr.addfinalize('txnclose-hook', txnclosehook)
1858 # Include a leading "-" to make it happen before the transaction summary
1858 # Include a leading "-" to make it happen before the transaction summary
1859 # reports registered via scmutil.registersummarycallback() whose names
1859 # reports registered via scmutil.registersummarycallback() whose names
1860 # are 00-txnreport etc. That way, the caches will be warm when the
1860 # are 00-txnreport etc. That way, the caches will be warm when the
1861 # callbacks run.
1861 # callbacks run.
1862 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1862 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1863 def txnaborthook(tr2):
1863 def txnaborthook(tr2):
1864 """To be run if transaction is aborted
1864 """To be run if transaction is aborted
1865 """
1865 """
1866 reporef().hook('txnabort', throw=False, txnname=desc,
1866 reporef().hook('txnabort', throw=False, txnname=desc,
1867 **pycompat.strkwargs(tr2.hookargs))
1867 **pycompat.strkwargs(tr2.hookargs))
1868 tr.addabort('txnabort-hook', txnaborthook)
1868 tr.addabort('txnabort-hook', txnaborthook)
1869 # avoid eager cache invalidation. in-memory data should be identical
1869 # avoid eager cache invalidation. in-memory data should be identical
1870 # to stored data if transaction has no error.
1870 # to stored data if transaction has no error.
1871 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1871 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1872 self._transref = weakref.ref(tr)
1872 self._transref = weakref.ref(tr)
1873 scmutil.registersummarycallback(self, tr, desc)
1873 scmutil.registersummarycallback(self, tr, desc)
1874 return tr
1874 return tr
1875
1875
1876 def _journalfiles(self):
1876 def _journalfiles(self):
1877 return ((self.svfs, 'journal'),
1877 return ((self.svfs, 'journal'),
1878 (self.vfs, 'journal.dirstate'),
1878 (self.vfs, 'journal.dirstate'),
1879 (self.vfs, 'journal.branch'),
1879 (self.vfs, 'journal.branch'),
1880 (self.vfs, 'journal.desc'),
1880 (self.vfs, 'journal.desc'),
1881 (self.vfs, 'journal.bookmarks'),
1881 (self.vfs, 'journal.bookmarks'),
1882 (self.svfs, 'journal.phaseroots'))
1882 (self.svfs, 'journal.phaseroots'))
1883
1883
1884 def undofiles(self):
1884 def undofiles(self):
1885 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1885 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1886
1886
1887 @unfilteredmethod
1887 @unfilteredmethod
1888 def _writejournal(self, desc):
1888 def _writejournal(self, desc):
1889 self.dirstate.savebackup(None, 'journal.dirstate')
1889 self.dirstate.savebackup(None, 'journal.dirstate')
1890 narrowspec.savebackup(self, 'journal.narrowspec')
1890 narrowspec.savebackup(self, 'journal.narrowspec')
1891 self.vfs.write("journal.branch",
1891 self.vfs.write("journal.branch",
1892 encoding.fromlocal(self.dirstate.branch()))
1892 encoding.fromlocal(self.dirstate.branch()))
1893 self.vfs.write("journal.desc",
1893 self.vfs.write("journal.desc",
1894 "%d\n%s\n" % (len(self), desc))
1894 "%d\n%s\n" % (len(self), desc))
1895 self.vfs.write("journal.bookmarks",
1895 self.vfs.write("journal.bookmarks",
1896 self.vfs.tryread("bookmarks"))
1896 self.vfs.tryread("bookmarks"))
1897 self.svfs.write("journal.phaseroots",
1897 self.svfs.write("journal.phaseroots",
1898 self.svfs.tryread("phaseroots"))
1898 self.svfs.tryread("phaseroots"))
1899
1899
1900 def recover(self):
1900 def recover(self):
1901 with self.lock():
1901 with self.lock():
1902 if self.svfs.exists("journal"):
1902 if self.svfs.exists("journal"):
1903 self.ui.status(_("rolling back interrupted transaction\n"))
1903 self.ui.status(_("rolling back interrupted transaction\n"))
1904 vfsmap = {'': self.svfs,
1904 vfsmap = {'': self.svfs,
1905 'plain': self.vfs,}
1905 'plain': self.vfs,}
1906 transaction.rollback(self.svfs, vfsmap, "journal",
1906 transaction.rollback(self.svfs, vfsmap, "journal",
1907 self.ui.warn,
1907 self.ui.warn,
1908 checkambigfiles=_cachedfiles)
1908 checkambigfiles=_cachedfiles)
1909 self.invalidate()
1909 self.invalidate()
1910 return True
1910 return True
1911 else:
1911 else:
1912 self.ui.warn(_("no interrupted transaction available\n"))
1912 self.ui.warn(_("no interrupted transaction available\n"))
1913 return False
1913 return False
1914
1914
1915 def rollback(self, dryrun=False, force=False):
1915 def rollback(self, dryrun=False, force=False):
1916 wlock = lock = dsguard = None
1916 wlock = lock = dsguard = None
1917 try:
1917 try:
1918 wlock = self.wlock()
1918 wlock = self.wlock()
1919 lock = self.lock()
1919 lock = self.lock()
1920 if self.svfs.exists("undo"):
1920 if self.svfs.exists("undo"):
1921 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1921 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1922
1922
1923 return self._rollback(dryrun, force, dsguard)
1923 return self._rollback(dryrun, force, dsguard)
1924 else:
1924 else:
1925 self.ui.warn(_("no rollback information available\n"))
1925 self.ui.warn(_("no rollback information available\n"))
1926 return 1
1926 return 1
1927 finally:
1927 finally:
1928 release(dsguard, lock, wlock)
1928 release(dsguard, lock, wlock)
1929
1929
1930 @unfilteredmethod # Until we get smarter cache management
1930 @unfilteredmethod # Until we get smarter cache management
1931 def _rollback(self, dryrun, force, dsguard):
1931 def _rollback(self, dryrun, force, dsguard):
1932 ui = self.ui
1932 ui = self.ui
1933 try:
1933 try:
1934 args = self.vfs.read('undo.desc').splitlines()
1934 args = self.vfs.read('undo.desc').splitlines()
1935 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1935 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1936 if len(args) >= 3:
1936 if len(args) >= 3:
1937 detail = args[2]
1937 detail = args[2]
1938 oldtip = oldlen - 1
1938 oldtip = oldlen - 1
1939
1939
1940 if detail and ui.verbose:
1940 if detail and ui.verbose:
1941 msg = (_('repository tip rolled back to revision %d'
1941 msg = (_('repository tip rolled back to revision %d'
1942 ' (undo %s: %s)\n')
1942 ' (undo %s: %s)\n')
1943 % (oldtip, desc, detail))
1943 % (oldtip, desc, detail))
1944 else:
1944 else:
1945 msg = (_('repository tip rolled back to revision %d'
1945 msg = (_('repository tip rolled back to revision %d'
1946 ' (undo %s)\n')
1946 ' (undo %s)\n')
1947 % (oldtip, desc))
1947 % (oldtip, desc))
1948 except IOError:
1948 except IOError:
1949 msg = _('rolling back unknown transaction\n')
1949 msg = _('rolling back unknown transaction\n')
1950 desc = None
1950 desc = None
1951
1951
1952 if not force and self['.'] != self['tip'] and desc == 'commit':
1952 if not force and self['.'] != self['tip'] and desc == 'commit':
1953 raise error.Abort(
1953 raise error.Abort(
1954 _('rollback of last commit while not checked out '
1954 _('rollback of last commit while not checked out '
1955 'may lose data'), hint=_('use -f to force'))
1955 'may lose data'), hint=_('use -f to force'))
1956
1956
1957 ui.status(msg)
1957 ui.status(msg)
1958 if dryrun:
1958 if dryrun:
1959 return 0
1959 return 0
1960
1960
1961 parents = self.dirstate.parents()
1961 parents = self.dirstate.parents()
1962 self.destroying()
1962 self.destroying()
1963 vfsmap = {'plain': self.vfs, '': self.svfs}
1963 vfsmap = {'plain': self.vfs, '': self.svfs}
1964 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1964 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1965 checkambigfiles=_cachedfiles)
1965 checkambigfiles=_cachedfiles)
1966 if self.vfs.exists('undo.bookmarks'):
1966 if self.vfs.exists('undo.bookmarks'):
1967 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1967 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1968 if self.svfs.exists('undo.phaseroots'):
1968 if self.svfs.exists('undo.phaseroots'):
1969 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1969 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1970 self.invalidate()
1970 self.invalidate()
1971
1971
1972 parentgone = (parents[0] not in self.changelog.nodemap or
1972 parentgone = (parents[0] not in self.changelog.nodemap or
1973 parents[1] not in self.changelog.nodemap)
1973 parents[1] not in self.changelog.nodemap)
1974 if parentgone:
1974 if parentgone:
1975 # prevent dirstateguard from overwriting already restored one
1975 # prevent dirstateguard from overwriting already restored one
1976 dsguard.close()
1976 dsguard.close()
1977
1977
1978 narrowspec.restorebackup(self, 'undo.narrowspec')
1978 narrowspec.restorebackup(self, 'undo.narrowspec')
1979 self.dirstate.restorebackup(None, 'undo.dirstate')
1979 self.dirstate.restorebackup(None, 'undo.dirstate')
1980 try:
1980 try:
1981 branch = self.vfs.read('undo.branch')
1981 branch = self.vfs.read('undo.branch')
1982 self.dirstate.setbranch(encoding.tolocal(branch))
1982 self.dirstate.setbranch(encoding.tolocal(branch))
1983 except IOError:
1983 except IOError:
1984 ui.warn(_('named branch could not be reset: '
1984 ui.warn(_('named branch could not be reset: '
1985 'current branch is still \'%s\'\n')
1985 'current branch is still \'%s\'\n')
1986 % self.dirstate.branch())
1986 % self.dirstate.branch())
1987
1987
1988 parents = tuple([p.rev() for p in self[None].parents()])
1988 parents = tuple([p.rev() for p in self[None].parents()])
1989 if len(parents) > 1:
1989 if len(parents) > 1:
1990 ui.status(_('working directory now based on '
1990 ui.status(_('working directory now based on '
1991 'revisions %d and %d\n') % parents)
1991 'revisions %d and %d\n') % parents)
1992 else:
1992 else:
1993 ui.status(_('working directory now based on '
1993 ui.status(_('working directory now based on '
1994 'revision %d\n') % parents)
1994 'revision %d\n') % parents)
1995 mergemod.mergestate.clean(self, self['.'].node())
1995 mergemod.mergestate.clean(self, self['.'].node())
1996
1996
1997 # TODO: if we know which new heads may result from this rollback, pass
1997 # TODO: if we know which new heads may result from this rollback, pass
1998 # them to destroy(), which will prevent the branchhead cache from being
1998 # them to destroy(), which will prevent the branchhead cache from being
1999 # invalidated.
1999 # invalidated.
2000 self.destroyed()
2000 self.destroyed()
2001 return 0
2001 return 0
2002
2002
2003 def _buildcacheupdater(self, newtransaction):
2003 def _buildcacheupdater(self, newtransaction):
2004 """called during transaction to build the callback updating cache
2004 """called during transaction to build the callback updating cache
2005
2005
2006 Lives on the repository to help extension who might want to augment
2006 Lives on the repository to help extension who might want to augment
2007 this logic. For this purpose, the created transaction is passed to the
2007 this logic. For this purpose, the created transaction is passed to the
2008 method.
2008 method.
2009 """
2009 """
2010 # we must avoid cyclic reference between repo and transaction.
2010 # we must avoid cyclic reference between repo and transaction.
2011 reporef = weakref.ref(self)
2011 reporef = weakref.ref(self)
2012 def updater(tr):
2012 def updater(tr):
2013 repo = reporef()
2013 repo = reporef()
2014 repo.updatecaches(tr)
2014 repo.updatecaches(tr)
2015 return updater
2015 return updater
2016
2016
2017 @unfilteredmethod
2017 @unfilteredmethod
2018 def updatecaches(self, tr=None, full=False):
2018 def updatecaches(self, tr=None, full=False):
2019 """warm appropriate caches
2019 """warm appropriate caches
2020
2020
2021 If this function is called after a transaction closed. The transaction
2021 If this function is called after a transaction closed. The transaction
2022 will be available in the 'tr' argument. This can be used to selectively
2022 will be available in the 'tr' argument. This can be used to selectively
2023 update caches relevant to the changes in that transaction.
2023 update caches relevant to the changes in that transaction.
2024
2024
2025 If 'full' is set, make sure all caches the function knows about have
2025 If 'full' is set, make sure all caches the function knows about have
2026 up-to-date data. Even the ones usually loaded more lazily.
2026 up-to-date data. Even the ones usually loaded more lazily.
2027 """
2027 """
2028 if tr is not None and tr.hookargs.get('source') == 'strip':
2028 if tr is not None and tr.hookargs.get('source') == 'strip':
2029 # During strip, many caches are invalid but
2029 # During strip, many caches are invalid but
2030 # later call to `destroyed` will refresh them.
2030 # later call to `destroyed` will refresh them.
2031 return
2031 return
2032
2032
2033 if tr is None or tr.changes['origrepolen'] < len(self):
2033 if tr is None or tr.changes['origrepolen'] < len(self):
2034 # updating the unfiltered branchmap should refresh all the others,
2034 # updating the unfiltered branchmap should refresh all the others,
2035 self.ui.debug('updating the branch cache\n')
2035 self.ui.debug('updating the branch cache\n')
2036 branchmap.updatecache(self.filtered('served'))
2036 branchmap.updatecache(self.filtered('served'))
2037
2037
2038 if full:
2038 if full:
2039 rbc = self.revbranchcache()
2039 rbc = self.revbranchcache()
2040 for r in self.changelog:
2040 for r in self.changelog:
2041 rbc.branchinfo(r)
2041 rbc.branchinfo(r)
2042 rbc.write()
2042 rbc.write()
2043
2043
2044 # ensure the working copy parents are in the manifestfulltextcache
2044 # ensure the working copy parents are in the manifestfulltextcache
2045 for ctx in self['.'].parents():
2045 for ctx in self['.'].parents():
2046 ctx.manifest() # accessing the manifest is enough
2046 ctx.manifest() # accessing the manifest is enough
2047
2047
2048 def invalidatecaches(self):
2048 def invalidatecaches(self):
2049
2049
2050 if r'_tagscache' in vars(self):
2050 if r'_tagscache' in vars(self):
2051 # can't use delattr on proxy
2051 # can't use delattr on proxy
2052 del self.__dict__[r'_tagscache']
2052 del self.__dict__[r'_tagscache']
2053
2053
2054 self.unfiltered()._branchcaches.clear()
2054 self.unfiltered()._branchcaches.clear()
2055 self.invalidatevolatilesets()
2055 self.invalidatevolatilesets()
2056 self._sparsesignaturecache.clear()
2056 self._sparsesignaturecache.clear()
2057
2057
2058 def invalidatevolatilesets(self):
2058 def invalidatevolatilesets(self):
2059 self.filteredrevcache.clear()
2059 self.filteredrevcache.clear()
2060 obsolete.clearobscaches(self)
2060 obsolete.clearobscaches(self)
2061
2061
2062 def invalidatedirstate(self):
2062 def invalidatedirstate(self):
2063 '''Invalidates the dirstate, causing the next call to dirstate
2063 '''Invalidates the dirstate, causing the next call to dirstate
2064 to check if it was modified since the last time it was read,
2064 to check if it was modified since the last time it was read,
2065 rereading it if it has.
2065 rereading it if it has.
2066
2066
2067 This is different to dirstate.invalidate() that it doesn't always
2067 This is different to dirstate.invalidate() that it doesn't always
2068 rereads the dirstate. Use dirstate.invalidate() if you want to
2068 rereads the dirstate. Use dirstate.invalidate() if you want to
2069 explicitly read the dirstate again (i.e. restoring it to a previous
2069 explicitly read the dirstate again (i.e. restoring it to a previous
2070 known good state).'''
2070 known good state).'''
2071 if hasunfilteredcache(self, r'dirstate'):
2071 if hasunfilteredcache(self, r'dirstate'):
2072 for k in self.dirstate._filecache:
2072 for k in self.dirstate._filecache:
2073 try:
2073 try:
2074 delattr(self.dirstate, k)
2074 delattr(self.dirstate, k)
2075 except AttributeError:
2075 except AttributeError:
2076 pass
2076 pass
2077 delattr(self.unfiltered(), r'dirstate')
2077 delattr(self.unfiltered(), r'dirstate')
2078
2078
2079 def invalidate(self, clearfilecache=False):
2079 def invalidate(self, clearfilecache=False):
2080 '''Invalidates both store and non-store parts other than dirstate
2080 '''Invalidates both store and non-store parts other than dirstate
2081
2081
2082 If a transaction is running, invalidation of store is omitted,
2082 If a transaction is running, invalidation of store is omitted,
2083 because discarding in-memory changes might cause inconsistency
2083 because discarding in-memory changes might cause inconsistency
2084 (e.g. incomplete fncache causes unintentional failure, but
2084 (e.g. incomplete fncache causes unintentional failure, but
2085 redundant one doesn't).
2085 redundant one doesn't).
2086 '''
2086 '''
2087 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2087 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2088 for k in list(self._filecache.keys()):
2088 for k in list(self._filecache.keys()):
2089 # dirstate is invalidated separately in invalidatedirstate()
2089 # dirstate is invalidated separately in invalidatedirstate()
2090 if k == 'dirstate':
2090 if k == 'dirstate':
2091 continue
2091 continue
2092 if (k == 'changelog' and
2092 if (k == 'changelog' and
2093 self.currenttransaction() and
2093 self.currenttransaction() and
2094 self.changelog._delayed):
2094 self.changelog._delayed):
2095 # The changelog object may store unwritten revisions. We don't
2095 # The changelog object may store unwritten revisions. We don't
2096 # want to lose them.
2096 # want to lose them.
2097 # TODO: Solve the problem instead of working around it.
2097 # TODO: Solve the problem instead of working around it.
2098 continue
2098 continue
2099
2099
2100 if clearfilecache:
2100 if clearfilecache:
2101 del self._filecache[k]
2101 del self._filecache[k]
2102 try:
2102 try:
2103 delattr(unfiltered, k)
2103 delattr(unfiltered, k)
2104 except AttributeError:
2104 except AttributeError:
2105 pass
2105 pass
2106 self.invalidatecaches()
2106 self.invalidatecaches()
2107 if not self.currenttransaction():
2107 if not self.currenttransaction():
2108 # TODO: Changing contents of store outside transaction
2108 # TODO: Changing contents of store outside transaction
2109 # causes inconsistency. We should make in-memory store
2109 # causes inconsistency. We should make in-memory store
2110 # changes detectable, and abort if changed.
2110 # changes detectable, and abort if changed.
2111 self.store.invalidatecaches()
2111 self.store.invalidatecaches()
2112
2112
2113 def invalidateall(self):
2113 def invalidateall(self):
2114 '''Fully invalidates both store and non-store parts, causing the
2114 '''Fully invalidates both store and non-store parts, causing the
2115 subsequent operation to reread any outside changes.'''
2115 subsequent operation to reread any outside changes.'''
2116 # extension should hook this to invalidate its caches
2116 # extension should hook this to invalidate its caches
2117 self.invalidate()
2117 self.invalidate()
2118 self.invalidatedirstate()
2118 self.invalidatedirstate()
2119
2119
2120 @unfilteredmethod
2120 @unfilteredmethod
2121 def _refreshfilecachestats(self, tr):
2121 def _refreshfilecachestats(self, tr):
2122 """Reload stats of cached files so that they are flagged as valid"""
2122 """Reload stats of cached files so that they are flagged as valid"""
2123 for k, ce in self._filecache.items():
2123 for k, ce in self._filecache.items():
2124 k = pycompat.sysstr(k)
2124 k = pycompat.sysstr(k)
2125 if k == r'dirstate' or k not in self.__dict__:
2125 if k == r'dirstate' or k not in self.__dict__:
2126 continue
2126 continue
2127 ce.refresh()
2127 ce.refresh()
2128
2128
2129 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2129 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2130 inheritchecker=None, parentenvvar=None):
2130 inheritchecker=None, parentenvvar=None):
2131 parentlock = None
2131 parentlock = None
2132 # the contents of parentenvvar are used by the underlying lock to
2132 # the contents of parentenvvar are used by the underlying lock to
2133 # determine whether it can be inherited
2133 # determine whether it can be inherited
2134 if parentenvvar is not None:
2134 if parentenvvar is not None:
2135 parentlock = encoding.environ.get(parentenvvar)
2135 parentlock = encoding.environ.get(parentenvvar)
2136
2136
2137 timeout = 0
2137 timeout = 0
2138 warntimeout = 0
2138 warntimeout = 0
2139 if wait:
2139 if wait:
2140 timeout = self.ui.configint("ui", "timeout")
2140 timeout = self.ui.configint("ui", "timeout")
2141 warntimeout = self.ui.configint("ui", "timeout.warn")
2141 warntimeout = self.ui.configint("ui", "timeout.warn")
2142 # internal config: ui.signal-safe-lock
2142 # internal config: ui.signal-safe-lock
2143 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2143 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2144
2144
2145 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2145 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2146 releasefn=releasefn,
2146 releasefn=releasefn,
2147 acquirefn=acquirefn, desc=desc,
2147 acquirefn=acquirefn, desc=desc,
2148 inheritchecker=inheritchecker,
2148 inheritchecker=inheritchecker,
2149 parentlock=parentlock,
2149 parentlock=parentlock,
2150 signalsafe=signalsafe)
2150 signalsafe=signalsafe)
2151 return l
2151 return l
2152
2152
2153 def _afterlock(self, callback):
2153 def _afterlock(self, callback):
2154 """add a callback to be run when the repository is fully unlocked
2154 """add a callback to be run when the repository is fully unlocked
2155
2155
2156 The callback will be executed when the outermost lock is released
2156 The callback will be executed when the outermost lock is released
2157 (with wlock being higher level than 'lock')."""
2157 (with wlock being higher level than 'lock')."""
2158 for ref in (self._wlockref, self._lockref):
2158 for ref in (self._wlockref, self._lockref):
2159 l = ref and ref()
2159 l = ref and ref()
2160 if l and l.held:
2160 if l and l.held:
2161 l.postrelease.append(callback)
2161 l.postrelease.append(callback)
2162 break
2162 break
2163 else: # no lock have been found.
2163 else: # no lock have been found.
2164 callback()
2164 callback()
2165
2165
2166 def lock(self, wait=True):
2166 def lock(self, wait=True):
2167 '''Lock the repository store (.hg/store) and return a weak reference
2167 '''Lock the repository store (.hg/store) and return a weak reference
2168 to the lock. Use this before modifying the store (e.g. committing or
2168 to the lock. Use this before modifying the store (e.g. committing or
2169 stripping). If you are opening a transaction, get a lock as well.)
2169 stripping). If you are opening a transaction, get a lock as well.)
2170
2170
2171 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2171 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2172 'wlock' first to avoid a dead-lock hazard.'''
2172 'wlock' first to avoid a dead-lock hazard.'''
2173 l = self._currentlock(self._lockref)
2173 l = self._currentlock(self._lockref)
2174 if l is not None:
2174 if l is not None:
2175 l.lock()
2175 l.lock()
2176 return l
2176 return l
2177
2177
2178 l = self._lock(self.svfs, "lock", wait, None,
2178 l = self._lock(self.svfs, "lock", wait, None,
2179 self.invalidate, _('repository %s') % self.origroot)
2179 self.invalidate, _('repository %s') % self.origroot)
2180 self._lockref = weakref.ref(l)
2180 self._lockref = weakref.ref(l)
2181 return l
2181 return l
2182
2182
2183 def _wlockchecktransaction(self):
2183 def _wlockchecktransaction(self):
2184 if self.currenttransaction() is not None:
2184 if self.currenttransaction() is not None:
2185 raise error.LockInheritanceContractViolation(
2185 raise error.LockInheritanceContractViolation(
2186 'wlock cannot be inherited in the middle of a transaction')
2186 'wlock cannot be inherited in the middle of a transaction')
2187
2187
2188 def wlock(self, wait=True):
2188 def wlock(self, wait=True):
2189 '''Lock the non-store parts of the repository (everything under
2189 '''Lock the non-store parts of the repository (everything under
2190 .hg except .hg/store) and return a weak reference to the lock.
2190 .hg except .hg/store) and return a weak reference to the lock.
2191
2191
2192 Use this before modifying files in .hg.
2192 Use this before modifying files in .hg.
2193
2193
2194 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2194 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2195 'wlock' first to avoid a dead-lock hazard.'''
2195 'wlock' first to avoid a dead-lock hazard.'''
2196 l = self._wlockref and self._wlockref()
2196 l = self._wlockref and self._wlockref()
2197 if l is not None and l.held:
2197 if l is not None and l.held:
2198 l.lock()
2198 l.lock()
2199 return l
2199 return l
2200
2200
2201 # We do not need to check for non-waiting lock acquisition. Such
2201 # We do not need to check for non-waiting lock acquisition. Such
2202 # acquisition would not cause dead-lock as they would just fail.
2202 # acquisition would not cause dead-lock as they would just fail.
2203 if wait and (self.ui.configbool('devel', 'all-warnings')
2203 if wait and (self.ui.configbool('devel', 'all-warnings')
2204 or self.ui.configbool('devel', 'check-locks')):
2204 or self.ui.configbool('devel', 'check-locks')):
2205 if self._currentlock(self._lockref) is not None:
2205 if self._currentlock(self._lockref) is not None:
2206 self.ui.develwarn('"wlock" acquired after "lock"')
2206 self.ui.develwarn('"wlock" acquired after "lock"')
2207
2207
2208 def unlock():
2208 def unlock():
2209 if self.dirstate.pendingparentchange():
2209 if self.dirstate.pendingparentchange():
2210 self.dirstate.invalidate()
2210 self.dirstate.invalidate()
2211 else:
2211 else:
2212 self.dirstate.write(None)
2212 self.dirstate.write(None)
2213
2213
2214 self._filecache['dirstate'].refresh()
2214 self._filecache['dirstate'].refresh()
2215
2215
2216 l = self._lock(self.vfs, "wlock", wait, unlock,
2216 l = self._lock(self.vfs, "wlock", wait, unlock,
2217 self.invalidatedirstate, _('working directory of %s') %
2217 self.invalidatedirstate, _('working directory of %s') %
2218 self.origroot,
2218 self.origroot,
2219 inheritchecker=self._wlockchecktransaction,
2219 inheritchecker=self._wlockchecktransaction,
2220 parentenvvar='HG_WLOCK_LOCKER')
2220 parentenvvar='HG_WLOCK_LOCKER')
2221 self._wlockref = weakref.ref(l)
2221 self._wlockref = weakref.ref(l)
2222 return l
2222 return l
2223
2223
2224 def _currentlock(self, lockref):
2224 def _currentlock(self, lockref):
2225 """Returns the lock if it's held, or None if it's not."""
2225 """Returns the lock if it's held, or None if it's not."""
2226 if lockref is None:
2226 if lockref is None:
2227 return None
2227 return None
2228 l = lockref()
2228 l = lockref()
2229 if l is None or not l.held:
2229 if l is None or not l.held:
2230 return None
2230 return None
2231 return l
2231 return l
2232
2232
2233 def currentwlock(self):
2233 def currentwlock(self):
2234 """Returns the wlock if it's held, or None if it's not."""
2234 """Returns the wlock if it's held, or None if it's not."""
2235 return self._currentlock(self._wlockref)
2235 return self._currentlock(self._wlockref)
2236
2236
2237 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2237 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2238 """
2238 """
2239 commit an individual file as part of a larger transaction
2239 commit an individual file as part of a larger transaction
2240 """
2240 """
2241
2241
2242 fname = fctx.path()
2242 fname = fctx.path()
2243 fparent1 = manifest1.get(fname, nullid)
2243 fparent1 = manifest1.get(fname, nullid)
2244 fparent2 = manifest2.get(fname, nullid)
2244 fparent2 = manifest2.get(fname, nullid)
2245 if isinstance(fctx, context.filectx):
2245 if isinstance(fctx, context.filectx):
2246 node = fctx.filenode()
2246 node = fctx.filenode()
2247 if node in [fparent1, fparent2]:
2247 if node in [fparent1, fparent2]:
2248 self.ui.debug('reusing %s filelog entry\n' % fname)
2248 self.ui.debug('reusing %s filelog entry\n' % fname)
2249 if manifest1.flags(fname) != fctx.flags():
2249 if manifest1.flags(fname) != fctx.flags():
2250 changelist.append(fname)
2250 changelist.append(fname)
2251 return node
2251 return node
2252
2252
2253 flog = self.file(fname)
2253 flog = self.file(fname)
2254 meta = {}
2254 meta = {}
2255 copy = fctx.renamed()
2255 copy = fctx.renamed()
2256 if copy and copy[0] != fname:
2256 if copy and copy[0] != fname:
2257 # Mark the new revision of this file as a copy of another
2257 # Mark the new revision of this file as a copy of another
2258 # file. This copy data will effectively act as a parent
2258 # file. This copy data will effectively act as a parent
2259 # of this new revision. If this is a merge, the first
2259 # of this new revision. If this is a merge, the first
2260 # parent will be the nullid (meaning "look up the copy data")
2260 # parent will be the nullid (meaning "look up the copy data")
2261 # and the second one will be the other parent. For example:
2261 # and the second one will be the other parent. For example:
2262 #
2262 #
2263 # 0 --- 1 --- 3 rev1 changes file foo
2263 # 0 --- 1 --- 3 rev1 changes file foo
2264 # \ / rev2 renames foo to bar and changes it
2264 # \ / rev2 renames foo to bar and changes it
2265 # \- 2 -/ rev3 should have bar with all changes and
2265 # \- 2 -/ rev3 should have bar with all changes and
2266 # should record that bar descends from
2266 # should record that bar descends from
2267 # bar in rev2 and foo in rev1
2267 # bar in rev2 and foo in rev1
2268 #
2268 #
2269 # this allows this merge to succeed:
2269 # this allows this merge to succeed:
2270 #
2270 #
2271 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2271 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2272 # \ / merging rev3 and rev4 should use bar@rev2
2272 # \ / merging rev3 and rev4 should use bar@rev2
2273 # \- 2 --- 4 as the merge base
2273 # \- 2 --- 4 as the merge base
2274 #
2274 #
2275
2275
2276 cfname = copy[0]
2276 cfname = copy[0]
2277 crev = manifest1.get(cfname)
2277 crev = manifest1.get(cfname)
2278 newfparent = fparent2
2278 newfparent = fparent2
2279
2279
2280 if manifest2: # branch merge
2280 if manifest2: # branch merge
2281 if fparent2 == nullid or crev is None: # copied on remote side
2281 if fparent2 == nullid or crev is None: # copied on remote side
2282 if cfname in manifest2:
2282 if cfname in manifest2:
2283 crev = manifest2[cfname]
2283 crev = manifest2[cfname]
2284 newfparent = fparent1
2284 newfparent = fparent1
2285
2285
2286 # Here, we used to search backwards through history to try to find
2286 # Here, we used to search backwards through history to try to find
2287 # where the file copy came from if the source of a copy was not in
2287 # where the file copy came from if the source of a copy was not in
2288 # the parent directory. However, this doesn't actually make sense to
2288 # the parent directory. However, this doesn't actually make sense to
2289 # do (what does a copy from something not in your working copy even
2289 # do (what does a copy from something not in your working copy even
2290 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2290 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2291 # the user that copy information was dropped, so if they didn't
2291 # the user that copy information was dropped, so if they didn't
2292 # expect this outcome it can be fixed, but this is the correct
2292 # expect this outcome it can be fixed, but this is the correct
2293 # behavior in this circumstance.
2293 # behavior in this circumstance.
2294
2294
2295 if crev:
2295 if crev:
2296 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2296 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2297 meta["copy"] = cfname
2297 meta["copy"] = cfname
2298 meta["copyrev"] = hex(crev)
2298 meta["copyrev"] = hex(crev)
2299 fparent1, fparent2 = nullid, newfparent
2299 fparent1, fparent2 = nullid, newfparent
2300 else:
2300 else:
2301 self.ui.warn(_("warning: can't find ancestor for '%s' "
2301 self.ui.warn(_("warning: can't find ancestor for '%s' "
2302 "copied from '%s'!\n") % (fname, cfname))
2302 "copied from '%s'!\n") % (fname, cfname))
2303
2303
2304 elif fparent1 == nullid:
2304 elif fparent1 == nullid:
2305 fparent1, fparent2 = fparent2, nullid
2305 fparent1, fparent2 = fparent2, nullid
2306 elif fparent2 != nullid:
2306 elif fparent2 != nullid:
2307 # is one parent an ancestor of the other?
2307 # is one parent an ancestor of the other?
2308 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2308 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2309 if fparent1 in fparentancestors:
2309 if fparent1 in fparentancestors:
2310 fparent1, fparent2 = fparent2, nullid
2310 fparent1, fparent2 = fparent2, nullid
2311 elif fparent2 in fparentancestors:
2311 elif fparent2 in fparentancestors:
2312 fparent2 = nullid
2312 fparent2 = nullid
2313
2313
2314 # is the file changed?
2314 # is the file changed?
2315 text = fctx.data()
2315 text = fctx.data()
2316 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2316 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2317 changelist.append(fname)
2317 changelist.append(fname)
2318 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2318 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2319 # are just the flags changed during merge?
2319 # are just the flags changed during merge?
2320 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2320 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2321 changelist.append(fname)
2321 changelist.append(fname)
2322
2322
2323 return fparent1
2323 return fparent1
2324
2324
2325 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2325 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2326 """check for commit arguments that aren't committable"""
2326 """check for commit arguments that aren't committable"""
2327 if match.isexact() or match.prefix():
2327 if match.isexact() or match.prefix():
2328 matched = set(status.modified + status.added + status.removed)
2328 matched = set(status.modified + status.added + status.removed)
2329
2329
2330 for f in match.files():
2330 for f in match.files():
2331 f = self.dirstate.normalize(f)
2331 f = self.dirstate.normalize(f)
2332 if f == '.' or f in matched or f in wctx.substate:
2332 if f == '.' or f in matched or f in wctx.substate:
2333 continue
2333 continue
2334 if f in status.deleted:
2334 if f in status.deleted:
2335 fail(f, _('file not found!'))
2335 fail(f, _('file not found!'))
2336 if f in vdirs: # visited directory
2336 if f in vdirs: # visited directory
2337 d = f + '/'
2337 d = f + '/'
2338 for mf in matched:
2338 for mf in matched:
2339 if mf.startswith(d):
2339 if mf.startswith(d):
2340 break
2340 break
2341 else:
2341 else:
2342 fail(f, _("no match under directory!"))
2342 fail(f, _("no match under directory!"))
2343 elif f not in self.dirstate:
2343 elif f not in self.dirstate:
2344 fail(f, _("file not tracked!"))
2344 fail(f, _("file not tracked!"))
2345
2345
2346 @unfilteredmethod
2346 @unfilteredmethod
2347 def commit(self, text="", user=None, date=None, match=None, force=False,
2347 def commit(self, text="", user=None, date=None, match=None, force=False,
2348 editor=False, extra=None):
2348 editor=False, extra=None):
2349 """Add a new revision to current repository.
2349 """Add a new revision to current repository.
2350
2350
2351 Revision information is gathered from the working directory,
2351 Revision information is gathered from the working directory,
2352 match can be used to filter the committed files. If editor is
2352 match can be used to filter the committed files. If editor is
2353 supplied, it is called to get a commit message.
2353 supplied, it is called to get a commit message.
2354 """
2354 """
2355 if extra is None:
2355 if extra is None:
2356 extra = {}
2356 extra = {}
2357
2357
2358 def fail(f, msg):
2358 def fail(f, msg):
2359 raise error.Abort('%s: %s' % (f, msg))
2359 raise error.Abort('%s: %s' % (f, msg))
2360
2360
2361 if not match:
2361 if not match:
2362 match = matchmod.always(self.root, '')
2362 match = matchmod.always(self.root, '')
2363
2363
2364 if not force:
2364 if not force:
2365 vdirs = []
2365 vdirs = []
2366 match.explicitdir = vdirs.append
2366 match.explicitdir = vdirs.append
2367 match.bad = fail
2367 match.bad = fail
2368
2368
2369 wlock = lock = tr = None
2369 wlock = lock = tr = None
2370 try:
2370 try:
2371 wlock = self.wlock()
2371 wlock = self.wlock()
2372 lock = self.lock() # for recent changelog (see issue4368)
2372 lock = self.lock() # for recent changelog (see issue4368)
2373
2373
2374 wctx = self[None]
2374 wctx = self[None]
2375 merge = len(wctx.parents()) > 1
2375 merge = len(wctx.parents()) > 1
2376
2376
2377 if not force and merge and not match.always():
2377 if not force and merge and not match.always():
2378 raise error.Abort(_('cannot partially commit a merge '
2378 raise error.Abort(_('cannot partially commit a merge '
2379 '(do not specify files or patterns)'))
2379 '(do not specify files or patterns)'))
2380
2380
2381 status = self.status(match=match, clean=force)
2381 status = self.status(match=match, clean=force)
2382 if force:
2382 if force:
2383 status.modified.extend(status.clean) # mq may commit clean files
2383 status.modified.extend(status.clean) # mq may commit clean files
2384
2384
2385 # check subrepos
2385 # check subrepos
2386 subs, commitsubs, newstate = subrepoutil.precommit(
2386 subs, commitsubs, newstate = subrepoutil.precommit(
2387 self.ui, wctx, status, match, force=force)
2387 self.ui, wctx, status, match, force=force)
2388
2388
2389 # make sure all explicit patterns are matched
2389 # make sure all explicit patterns are matched
2390 if not force:
2390 if not force:
2391 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2391 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2392
2392
2393 cctx = context.workingcommitctx(self, status,
2393 cctx = context.workingcommitctx(self, status,
2394 text, user, date, extra)
2394 text, user, date, extra)
2395
2395
2396 # internal config: ui.allowemptycommit
2396 # internal config: ui.allowemptycommit
2397 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2397 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2398 or extra.get('close') or merge or cctx.files()
2398 or extra.get('close') or merge or cctx.files()
2399 or self.ui.configbool('ui', 'allowemptycommit'))
2399 or self.ui.configbool('ui', 'allowemptycommit'))
2400 if not allowemptycommit:
2400 if not allowemptycommit:
2401 return None
2401 return None
2402
2402
2403 if merge and cctx.deleted():
2403 if merge and cctx.deleted():
2404 raise error.Abort(_("cannot commit merge with missing files"))
2404 raise error.Abort(_("cannot commit merge with missing files"))
2405
2405
2406 ms = mergemod.mergestate.read(self)
2406 ms = mergemod.mergestate.read(self)
2407 mergeutil.checkunresolved(ms)
2407 mergeutil.checkunresolved(ms)
2408
2408
2409 if editor:
2409 if editor:
2410 cctx._text = editor(self, cctx, subs)
2410 cctx._text = editor(self, cctx, subs)
2411 edited = (text != cctx._text)
2411 edited = (text != cctx._text)
2412
2412
2413 # Save commit message in case this transaction gets rolled back
2413 # Save commit message in case this transaction gets rolled back
2414 # (e.g. by a pretxncommit hook). Leave the content alone on
2414 # (e.g. by a pretxncommit hook). Leave the content alone on
2415 # the assumption that the user will use the same editor again.
2415 # the assumption that the user will use the same editor again.
2416 msgfn = self.savecommitmessage(cctx._text)
2416 msgfn = self.savecommitmessage(cctx._text)
2417
2417
2418 # commit subs and write new state
2418 # commit subs and write new state
2419 if subs:
2419 if subs:
2420 for s in sorted(commitsubs):
2420 for s in sorted(commitsubs):
2421 sub = wctx.sub(s)
2421 sub = wctx.sub(s)
2422 self.ui.status(_('committing subrepository %s\n') %
2422 self.ui.status(_('committing subrepository %s\n') %
2423 subrepoutil.subrelpath(sub))
2423 subrepoutil.subrelpath(sub))
2424 sr = sub.commit(cctx._text, user, date)
2424 sr = sub.commit(cctx._text, user, date)
2425 newstate[s] = (newstate[s][0], sr)
2425 newstate[s] = (newstate[s][0], sr)
2426 subrepoutil.writestate(self, newstate)
2426 subrepoutil.writestate(self, newstate)
2427
2427
2428 p1, p2 = self.dirstate.parents()
2428 p1, p2 = self.dirstate.parents()
2429 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2429 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2430 try:
2430 try:
2431 self.hook("precommit", throw=True, parent1=hookp1,
2431 self.hook("precommit", throw=True, parent1=hookp1,
2432 parent2=hookp2)
2432 parent2=hookp2)
2433 tr = self.transaction('commit')
2433 tr = self.transaction('commit')
2434 ret = self.commitctx(cctx, True)
2434 ret = self.commitctx(cctx, True)
2435 except: # re-raises
2435 except: # re-raises
2436 if edited:
2436 if edited:
2437 self.ui.write(
2437 self.ui.write(
2438 _('note: commit message saved in %s\n') % msgfn)
2438 _('note: commit message saved in %s\n') % msgfn)
2439 raise
2439 raise
2440 # update bookmarks, dirstate and mergestate
2440 # update bookmarks, dirstate and mergestate
2441 bookmarks.update(self, [p1, p2], ret)
2441 bookmarks.update(self, [p1, p2], ret)
2442 cctx.markcommitted(ret)
2442 cctx.markcommitted(ret)
2443 ms.reset()
2443 ms.reset()
2444 tr.close()
2444 tr.close()
2445
2445
2446 finally:
2446 finally:
2447 lockmod.release(tr, lock, wlock)
2447 lockmod.release(tr, lock, wlock)
2448
2448
2449 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2449 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2450 # hack for command that use a temporary commit (eg: histedit)
2450 # hack for command that use a temporary commit (eg: histedit)
2451 # temporary commit got stripped before hook release
2451 # temporary commit got stripped before hook release
2452 if self.changelog.hasnode(ret):
2452 if self.changelog.hasnode(ret):
2453 self.hook("commit", node=node, parent1=parent1,
2453 self.hook("commit", node=node, parent1=parent1,
2454 parent2=parent2)
2454 parent2=parent2)
2455 self._afterlock(commithook)
2455 self._afterlock(commithook)
2456 return ret
2456 return ret
2457
2457
2458 @unfilteredmethod
2458 @unfilteredmethod
2459 def commitctx(self, ctx, error=False):
2459 def commitctx(self, ctx, error=False):
2460 """Add a new revision to current repository.
2460 """Add a new revision to current repository.
2461 Revision information is passed via the context argument.
2461 Revision information is passed via the context argument.
2462
2462
2463 ctx.files() should list all files involved in this commit, i.e.
2463 ctx.files() should list all files involved in this commit, i.e.
2464 modified/added/removed files. On merge, it may be wider than the
2464 modified/added/removed files. On merge, it may be wider than the
2465 ctx.files() to be committed, since any file nodes derived directly
2465 ctx.files() to be committed, since any file nodes derived directly
2466 from p1 or p2 are excluded from the committed ctx.files().
2466 from p1 or p2 are excluded from the committed ctx.files().
2467 """
2467 """
2468
2468
2469 tr = None
2469 tr = None
2470 p1, p2 = ctx.p1(), ctx.p2()
2470 p1, p2 = ctx.p1(), ctx.p2()
2471 user = ctx.user()
2471 user = ctx.user()
2472
2472
2473 lock = self.lock()
2473 lock = self.lock()
2474 try:
2474 try:
2475 tr = self.transaction("commit")
2475 tr = self.transaction("commit")
2476 trp = weakref.proxy(tr)
2476 trp = weakref.proxy(tr)
2477
2477
2478 if ctx.manifestnode():
2478 if ctx.manifestnode():
2479 # reuse an existing manifest revision
2479 # reuse an existing manifest revision
2480 self.ui.debug('reusing known manifest\n')
2480 self.ui.debug('reusing known manifest\n')
2481 mn = ctx.manifestnode()
2481 mn = ctx.manifestnode()
2482 files = ctx.files()
2482 files = ctx.files()
2483 elif ctx.files():
2483 elif ctx.files():
2484 m1ctx = p1.manifestctx()
2484 m1ctx = p1.manifestctx()
2485 m2ctx = p2.manifestctx()
2485 m2ctx = p2.manifestctx()
2486 mctx = m1ctx.copy()
2486 mctx = m1ctx.copy()
2487
2487
2488 m = mctx.read()
2488 m = mctx.read()
2489 m1 = m1ctx.read()
2489 m1 = m1ctx.read()
2490 m2 = m2ctx.read()
2490 m2 = m2ctx.read()
2491
2491
2492 # check in files
2492 # check in files
2493 added = []
2493 added = []
2494 changed = []
2494 changed = []
2495 removed = list(ctx.removed())
2495 removed = list(ctx.removed())
2496 linkrev = len(self)
2496 linkrev = len(self)
2497 self.ui.note(_("committing files:\n"))
2497 self.ui.note(_("committing files:\n"))
2498 for f in sorted(ctx.modified() + ctx.added()):
2498 for f in sorted(ctx.modified() + ctx.added()):
2499 self.ui.note(f + "\n")
2499 self.ui.note(f + "\n")
2500 try:
2500 try:
2501 fctx = ctx[f]
2501 fctx = ctx[f]
2502 if fctx is None:
2502 if fctx is None:
2503 removed.append(f)
2503 removed.append(f)
2504 else:
2504 else:
2505 added.append(f)
2505 added.append(f)
2506 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2506 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2507 trp, changed)
2507 trp, changed)
2508 m.setflag(f, fctx.flags())
2508 m.setflag(f, fctx.flags())
2509 except OSError as inst:
2509 except OSError as inst:
2510 self.ui.warn(_("trouble committing %s!\n") % f)
2510 self.ui.warn(_("trouble committing %s!\n") % f)
2511 raise
2511 raise
2512 except IOError as inst:
2512 except IOError as inst:
2513 errcode = getattr(inst, 'errno', errno.ENOENT)
2513 errcode = getattr(inst, 'errno', errno.ENOENT)
2514 if error or errcode and errcode != errno.ENOENT:
2514 if error or errcode and errcode != errno.ENOENT:
2515 self.ui.warn(_("trouble committing %s!\n") % f)
2515 self.ui.warn(_("trouble committing %s!\n") % f)
2516 raise
2516 raise
2517
2517
2518 # update manifest
2518 # update manifest
2519 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2519 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2520 drop = [f for f in removed if f in m]
2520 drop = [f for f in removed if f in m]
2521 for f in drop:
2521 for f in drop:
2522 del m[f]
2522 del m[f]
2523 files = changed + removed
2523 files = changed + removed
2524 md = None
2524 md = None
2525 if not files:
2525 if not files:
2526 # if no "files" actually changed in terms of the changelog,
2526 # if no "files" actually changed in terms of the changelog,
2527 # try hard to detect unmodified manifest entry so that the
2527 # try hard to detect unmodified manifest entry so that the
2528 # exact same commit can be reproduced later on convert.
2528 # exact same commit can be reproduced later on convert.
2529 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2529 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2530 if not files and md:
2530 if not files and md:
2531 self.ui.debug('not reusing manifest (no file change in '
2531 self.ui.debug('not reusing manifest (no file change in '
2532 'changelog, but manifest differs)\n')
2532 'changelog, but manifest differs)\n')
2533 if files or md:
2533 if files or md:
2534 self.ui.note(_("committing manifest\n"))
2534 self.ui.note(_("committing manifest\n"))
2535 # we're using narrowmatch here since it's already applied at
2535 # we're using narrowmatch here since it's already applied at
2536 # other stages (such as dirstate.walk), so we're already
2536 # other stages (such as dirstate.walk), so we're already
2537 # ignoring things outside of narrowspec in most cases. The
2537 # ignoring things outside of narrowspec in most cases. The
2538 # one case where we might have files outside the narrowspec
2538 # one case where we might have files outside the narrowspec
2539 # at this point is merges, and we already error out in the
2539 # at this point is merges, and we already error out in the
2540 # case where the merge has files outside of the narrowspec,
2540 # case where the merge has files outside of the narrowspec,
2541 # so this is safe.
2541 # so this is safe.
2542 mn = mctx.write(trp, linkrev,
2542 mn = mctx.write(trp, linkrev,
2543 p1.manifestnode(), p2.manifestnode(),
2543 p1.manifestnode(), p2.manifestnode(),
2544 added, drop, match=self.narrowmatch())
2544 added, drop, match=self.narrowmatch())
2545 else:
2545 else:
2546 self.ui.debug('reusing manifest form p1 (listed files '
2546 self.ui.debug('reusing manifest form p1 (listed files '
2547 'actually unchanged)\n')
2547 'actually unchanged)\n')
2548 mn = p1.manifestnode()
2548 mn = p1.manifestnode()
2549 else:
2549 else:
2550 self.ui.debug('reusing manifest from p1 (no file change)\n')
2550 self.ui.debug('reusing manifest from p1 (no file change)\n')
2551 mn = p1.manifestnode()
2551 mn = p1.manifestnode()
2552 files = []
2552 files = []
2553
2553
2554 # update changelog
2554 # update changelog
2555 self.ui.note(_("committing changelog\n"))
2555 self.ui.note(_("committing changelog\n"))
2556 self.changelog.delayupdate(tr)
2556 self.changelog.delayupdate(tr)
2557 n = self.changelog.add(mn, files, ctx.description(),
2557 n = self.changelog.add(mn, files, ctx.description(),
2558 trp, p1.node(), p2.node(),
2558 trp, p1.node(), p2.node(),
2559 user, ctx.date(), ctx.extra().copy())
2559 user, ctx.date(), ctx.extra().copy())
2560 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2560 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2561 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2561 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2562 parent2=xp2)
2562 parent2=xp2)
2563 # set the new commit is proper phase
2563 # set the new commit is proper phase
2564 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2564 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2565 if targetphase:
2565 if targetphase:
2566 # retract boundary do not alter parent changeset.
2566 # retract boundary do not alter parent changeset.
2567 # if a parent have higher the resulting phase will
2567 # if a parent have higher the resulting phase will
2568 # be compliant anyway
2568 # be compliant anyway
2569 #
2569 #
2570 # if minimal phase was 0 we don't need to retract anything
2570 # if minimal phase was 0 we don't need to retract anything
2571 phases.registernew(self, tr, targetphase, [n])
2571 phases.registernew(self, tr, targetphase, [n])
2572 tr.close()
2572 tr.close()
2573 return n
2573 return n
2574 finally:
2574 finally:
2575 if tr:
2575 if tr:
2576 tr.release()
2576 tr.release()
2577 lock.release()
2577 lock.release()
2578
2578
2579 @unfilteredmethod
2579 @unfilteredmethod
2580 def destroying(self):
2580 def destroying(self):
2581 '''Inform the repository that nodes are about to be destroyed.
2581 '''Inform the repository that nodes are about to be destroyed.
2582 Intended for use by strip and rollback, so there's a common
2582 Intended for use by strip and rollback, so there's a common
2583 place for anything that has to be done before destroying history.
2583 place for anything that has to be done before destroying history.
2584
2584
2585 This is mostly useful for saving state that is in memory and waiting
2585 This is mostly useful for saving state that is in memory and waiting
2586 to be flushed when the current lock is released. Because a call to
2586 to be flushed when the current lock is released. Because a call to
2587 destroyed is imminent, the repo will be invalidated causing those
2587 destroyed is imminent, the repo will be invalidated causing those
2588 changes to stay in memory (waiting for the next unlock), or vanish
2588 changes to stay in memory (waiting for the next unlock), or vanish
2589 completely.
2589 completely.
2590 '''
2590 '''
2591 # When using the same lock to commit and strip, the phasecache is left
2591 # When using the same lock to commit and strip, the phasecache is left
2592 # dirty after committing. Then when we strip, the repo is invalidated,
2592 # dirty after committing. Then when we strip, the repo is invalidated,
2593 # causing those changes to disappear.
2593 # causing those changes to disappear.
2594 if '_phasecache' in vars(self):
2594 if '_phasecache' in vars(self):
2595 self._phasecache.write()
2595 self._phasecache.write()
2596
2596
2597 @unfilteredmethod
2597 @unfilteredmethod
2598 def destroyed(self):
2598 def destroyed(self):
2599 '''Inform the repository that nodes have been destroyed.
2599 '''Inform the repository that nodes have been destroyed.
2600 Intended for use by strip and rollback, so there's a common
2600 Intended for use by strip and rollback, so there's a common
2601 place for anything that has to be done after destroying history.
2601 place for anything that has to be done after destroying history.
2602 '''
2602 '''
2603 # When one tries to:
2603 # When one tries to:
2604 # 1) destroy nodes thus calling this method (e.g. strip)
2604 # 1) destroy nodes thus calling this method (e.g. strip)
2605 # 2) use phasecache somewhere (e.g. commit)
2605 # 2) use phasecache somewhere (e.g. commit)
2606 #
2606 #
2607 # then 2) will fail because the phasecache contains nodes that were
2607 # then 2) will fail because the phasecache contains nodes that were
2608 # removed. We can either remove phasecache from the filecache,
2608 # removed. We can either remove phasecache from the filecache,
2609 # causing it to reload next time it is accessed, or simply filter
2609 # causing it to reload next time it is accessed, or simply filter
2610 # the removed nodes now and write the updated cache.
2610 # the removed nodes now and write the updated cache.
2611 self._phasecache.filterunknown(self)
2611 self._phasecache.filterunknown(self)
2612 self._phasecache.write()
2612 self._phasecache.write()
2613
2613
2614 # refresh all repository caches
2614 # refresh all repository caches
2615 self.updatecaches()
2615 self.updatecaches()
2616
2616
2617 # Ensure the persistent tag cache is updated. Doing it now
2617 # Ensure the persistent tag cache is updated. Doing it now
2618 # means that the tag cache only has to worry about destroyed
2618 # means that the tag cache only has to worry about destroyed
2619 # heads immediately after a strip/rollback. That in turn
2619 # heads immediately after a strip/rollback. That in turn
2620 # guarantees that "cachetip == currenttip" (comparing both rev
2620 # guarantees that "cachetip == currenttip" (comparing both rev
2621 # and node) always means no nodes have been added or destroyed.
2621 # and node) always means no nodes have been added or destroyed.
2622
2622
2623 # XXX this is suboptimal when qrefresh'ing: we strip the current
2623 # XXX this is suboptimal when qrefresh'ing: we strip the current
2624 # head, refresh the tag cache, then immediately add a new head.
2624 # head, refresh the tag cache, then immediately add a new head.
2625 # But I think doing it this way is necessary for the "instant
2625 # But I think doing it this way is necessary for the "instant
2626 # tag cache retrieval" case to work.
2626 # tag cache retrieval" case to work.
2627 self.invalidate()
2627 self.invalidate()
2628
2628
2629 def status(self, node1='.', node2=None, match=None,
2629 def status(self, node1='.', node2=None, match=None,
2630 ignored=False, clean=False, unknown=False,
2630 ignored=False, clean=False, unknown=False,
2631 listsubrepos=False):
2631 listsubrepos=False):
2632 '''a convenience method that calls node1.status(node2)'''
2632 '''a convenience method that calls node1.status(node2)'''
2633 return self[node1].status(node2, match, ignored, clean, unknown,
2633 return self[node1].status(node2, match, ignored, clean, unknown,
2634 listsubrepos)
2634 listsubrepos)
2635
2635
2636 def addpostdsstatus(self, ps):
2636 def addpostdsstatus(self, ps):
2637 """Add a callback to run within the wlock, at the point at which status
2637 """Add a callback to run within the wlock, at the point at which status
2638 fixups happen.
2638 fixups happen.
2639
2639
2640 On status completion, callback(wctx, status) will be called with the
2640 On status completion, callback(wctx, status) will be called with the
2641 wlock held, unless the dirstate has changed from underneath or the wlock
2641 wlock held, unless the dirstate has changed from underneath or the wlock
2642 couldn't be grabbed.
2642 couldn't be grabbed.
2643
2643
2644 Callbacks should not capture and use a cached copy of the dirstate --
2644 Callbacks should not capture and use a cached copy of the dirstate --
2645 it might change in the meanwhile. Instead, they should access the
2645 it might change in the meanwhile. Instead, they should access the
2646 dirstate via wctx.repo().dirstate.
2646 dirstate via wctx.repo().dirstate.
2647
2647
2648 This list is emptied out after each status run -- extensions should
2648 This list is emptied out after each status run -- extensions should
2649 make sure it adds to this list each time dirstate.status is called.
2649 make sure it adds to this list each time dirstate.status is called.
2650 Extensions should also make sure they don't call this for statuses
2650 Extensions should also make sure they don't call this for statuses
2651 that don't involve the dirstate.
2651 that don't involve the dirstate.
2652 """
2652 """
2653
2653
2654 # The list is located here for uniqueness reasons -- it is actually
2654 # The list is located here for uniqueness reasons -- it is actually
2655 # managed by the workingctx, but that isn't unique per-repo.
2655 # managed by the workingctx, but that isn't unique per-repo.
2656 self._postdsstatus.append(ps)
2656 self._postdsstatus.append(ps)
2657
2657
2658 def postdsstatus(self):
2658 def postdsstatus(self):
2659 """Used by workingctx to get the list of post-dirstate-status hooks."""
2659 """Used by workingctx to get the list of post-dirstate-status hooks."""
2660 return self._postdsstatus
2660 return self._postdsstatus
2661
2661
2662 def clearpostdsstatus(self):
2662 def clearpostdsstatus(self):
2663 """Used by workingctx to clear post-dirstate-status hooks."""
2663 """Used by workingctx to clear post-dirstate-status hooks."""
2664 del self._postdsstatus[:]
2664 del self._postdsstatus[:]
2665
2665
2666 def heads(self, start=None):
2666 def heads(self, start=None):
2667 if start is None:
2667 if start is None:
2668 cl = self.changelog
2668 cl = self.changelog
2669 headrevs = reversed(cl.headrevs())
2669 headrevs = reversed(cl.headrevs())
2670 return [cl.node(rev) for rev in headrevs]
2670 return [cl.node(rev) for rev in headrevs]
2671
2671
2672 heads = self.changelog.heads(start)
2672 heads = self.changelog.heads(start)
2673 # sort the output in rev descending order
2673 # sort the output in rev descending order
2674 return sorted(heads, key=self.changelog.rev, reverse=True)
2674 return sorted(heads, key=self.changelog.rev, reverse=True)
2675
2675
2676 def branchheads(self, branch=None, start=None, closed=False):
2676 def branchheads(self, branch=None, start=None, closed=False):
2677 '''return a (possibly filtered) list of heads for the given branch
2677 '''return a (possibly filtered) list of heads for the given branch
2678
2678
2679 Heads are returned in topological order, from newest to oldest.
2679 Heads are returned in topological order, from newest to oldest.
2680 If branch is None, use the dirstate branch.
2680 If branch is None, use the dirstate branch.
2681 If start is not None, return only heads reachable from start.
2681 If start is not None, return only heads reachable from start.
2682 If closed is True, return heads that are marked as closed as well.
2682 If closed is True, return heads that are marked as closed as well.
2683 '''
2683 '''
2684 if branch is None:
2684 if branch is None:
2685 branch = self[None].branch()
2685 branch = self[None].branch()
2686 branches = self.branchmap()
2686 branches = self.branchmap()
2687 if branch not in branches:
2687 if branch not in branches:
2688 return []
2688 return []
2689 # the cache returns heads ordered lowest to highest
2689 # the cache returns heads ordered lowest to highest
2690 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2690 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2691 if start is not None:
2691 if start is not None:
2692 # filter out the heads that cannot be reached from startrev
2692 # filter out the heads that cannot be reached from startrev
2693 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2693 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2694 bheads = [h for h in bheads if h in fbheads]
2694 bheads = [h for h in bheads if h in fbheads]
2695 return bheads
2695 return bheads
2696
2696
2697 def branches(self, nodes):
2697 def branches(self, nodes):
2698 if not nodes:
2698 if not nodes:
2699 nodes = [self.changelog.tip()]
2699 nodes = [self.changelog.tip()]
2700 b = []
2700 b = []
2701 for n in nodes:
2701 for n in nodes:
2702 t = n
2702 t = n
2703 while True:
2703 while True:
2704 p = self.changelog.parents(n)
2704 p = self.changelog.parents(n)
2705 if p[1] != nullid or p[0] == nullid:
2705 if p[1] != nullid or p[0] == nullid:
2706 b.append((t, n, p[0], p[1]))
2706 b.append((t, n, p[0], p[1]))
2707 break
2707 break
2708 n = p[0]
2708 n = p[0]
2709 return b
2709 return b
2710
2710
2711 def between(self, pairs):
2711 def between(self, pairs):
2712 r = []
2712 r = []
2713
2713
2714 for top, bottom in pairs:
2714 for top, bottom in pairs:
2715 n, l, i = top, [], 0
2715 n, l, i = top, [], 0
2716 f = 1
2716 f = 1
2717
2717
2718 while n != bottom and n != nullid:
2718 while n != bottom and n != nullid:
2719 p = self.changelog.parents(n)[0]
2719 p = self.changelog.parents(n)[0]
2720 if i == f:
2720 if i == f:
2721 l.append(n)
2721 l.append(n)
2722 f = f * 2
2722 f = f * 2
2723 n = p
2723 n = p
2724 i += 1
2724 i += 1
2725
2725
2726 r.append(l)
2726 r.append(l)
2727
2727
2728 return r
2728 return r
2729
2729
2730 def checkpush(self, pushop):
2730 def checkpush(self, pushop):
2731 """Extensions can override this function if additional checks have
2731 """Extensions can override this function if additional checks have
2732 to be performed before pushing, or call it if they override push
2732 to be performed before pushing, or call it if they override push
2733 command.
2733 command.
2734 """
2734 """
2735
2735
2736 @unfilteredpropertycache
2736 @unfilteredpropertycache
2737 def prepushoutgoinghooks(self):
2737 def prepushoutgoinghooks(self):
2738 """Return util.hooks consists of a pushop with repo, remote, outgoing
2738 """Return util.hooks consists of a pushop with repo, remote, outgoing
2739 methods, which are called before pushing changesets.
2739 methods, which are called before pushing changesets.
2740 """
2740 """
2741 return util.hooks()
2741 return util.hooks()
2742
2742
2743 def pushkey(self, namespace, key, old, new):
2743 def pushkey(self, namespace, key, old, new):
2744 try:
2744 try:
2745 tr = self.currenttransaction()
2745 tr = self.currenttransaction()
2746 hookargs = {}
2746 hookargs = {}
2747 if tr is not None:
2747 if tr is not None:
2748 hookargs.update(tr.hookargs)
2748 hookargs.update(tr.hookargs)
2749 hookargs = pycompat.strkwargs(hookargs)
2749 hookargs = pycompat.strkwargs(hookargs)
2750 hookargs[r'namespace'] = namespace
2750 hookargs[r'namespace'] = namespace
2751 hookargs[r'key'] = key
2751 hookargs[r'key'] = key
2752 hookargs[r'old'] = old
2752 hookargs[r'old'] = old
2753 hookargs[r'new'] = new
2753 hookargs[r'new'] = new
2754 self.hook('prepushkey', throw=True, **hookargs)
2754 self.hook('prepushkey', throw=True, **hookargs)
2755 except error.HookAbort as exc:
2755 except error.HookAbort as exc:
2756 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2756 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2757 if exc.hint:
2757 if exc.hint:
2758 self.ui.write_err(_("(%s)\n") % exc.hint)
2758 self.ui.write_err(_("(%s)\n") % exc.hint)
2759 return False
2759 return False
2760 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2760 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2761 ret = pushkey.push(self, namespace, key, old, new)
2761 ret = pushkey.push(self, namespace, key, old, new)
2762 def runhook():
2762 def runhook():
2763 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2763 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2764 ret=ret)
2764 ret=ret)
2765 self._afterlock(runhook)
2765 self._afterlock(runhook)
2766 return ret
2766 return ret
2767
2767
2768 def listkeys(self, namespace):
2768 def listkeys(self, namespace):
2769 self.hook('prelistkeys', throw=True, namespace=namespace)
2769 self.hook('prelistkeys', throw=True, namespace=namespace)
2770 self.ui.debug('listing keys for "%s"\n' % namespace)
2770 self.ui.debug('listing keys for "%s"\n' % namespace)
2771 values = pushkey.list(self, namespace)
2771 values = pushkey.list(self, namespace)
2772 self.hook('listkeys', namespace=namespace, values=values)
2772 self.hook('listkeys', namespace=namespace, values=values)
2773 return values
2773 return values
2774
2774
2775 def debugwireargs(self, one, two, three=None, four=None, five=None):
2775 def debugwireargs(self, one, two, three=None, four=None, five=None):
2776 '''used to test argument passing over the wire'''
2776 '''used to test argument passing over the wire'''
2777 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2777 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2778 pycompat.bytestr(four),
2778 pycompat.bytestr(four),
2779 pycompat.bytestr(five))
2779 pycompat.bytestr(five))
2780
2780
2781 def savecommitmessage(self, text):
2781 def savecommitmessage(self, text):
2782 fp = self.vfs('last-message.txt', 'wb')
2782 fp = self.vfs('last-message.txt', 'wb')
2783 try:
2783 try:
2784 fp.write(text)
2784 fp.write(text)
2785 finally:
2785 finally:
2786 fp.close()
2786 fp.close()
2787 return self.pathto(fp.name[len(self.root) + 1:])
2787 return self.pathto(fp.name[len(self.root) + 1:])
2788
2788
2789 # used to avoid circular references so destructors work
2789 # used to avoid circular references so destructors work
2790 def aftertrans(files):
2790 def aftertrans(files):
2791 renamefiles = [tuple(t) for t in files]
2791 renamefiles = [tuple(t) for t in files]
2792 def a():
2792 def a():
2793 for vfs, src, dest in renamefiles:
2793 for vfs, src, dest in renamefiles:
2794 # if src and dest refer to a same file, vfs.rename is a no-op,
2794 # if src and dest refer to a same file, vfs.rename is a no-op,
2795 # leaving both src and dest on disk. delete dest to make sure
2795 # leaving both src and dest on disk. delete dest to make sure
2796 # the rename couldn't be such a no-op.
2796 # the rename couldn't be such a no-op.
2797 vfs.tryunlink(dest)
2797 vfs.tryunlink(dest)
2798 try:
2798 try:
2799 vfs.rename(src, dest)
2799 vfs.rename(src, dest)
2800 except OSError: # journal file does not yet exist
2800 except OSError: # journal file does not yet exist
2801 pass
2801 pass
2802 return a
2802 return a
2803
2803
2804 def undoname(fn):
2804 def undoname(fn):
2805 base, name = os.path.split(fn)
2805 base, name = os.path.split(fn)
2806 assert name.startswith('journal')
2806 assert name.startswith('journal')
2807 return os.path.join(base, name.replace('journal', 'undo', 1))
2807 return os.path.join(base, name.replace('journal', 'undo', 1))
2808
2808
2809 def instance(ui, path, create, intents=None, createopts=None):
2809 def instance(ui, path, create, intents=None, createopts=None):
2810 localpath = util.urllocalpath(path)
2810 localpath = util.urllocalpath(path)
2811 if create:
2811 if create:
2812 createrepository(ui, localpath, createopts=createopts)
2812 createrepository(ui, localpath, createopts=createopts)
2813
2813
2814 return makelocalrepository(ui, localpath, intents=intents)
2814 return makelocalrepository(ui, localpath, intents=intents)
2815
2815
2816 def islocal(path):
2816 def islocal(path):
2817 return True
2817 return True
2818
2818
2819 def defaultcreateopts(ui, createopts=None):
2819 def defaultcreateopts(ui, createopts=None):
2820 """Populate the default creation options for a repository.
2820 """Populate the default creation options for a repository.
2821
2821
2822 A dictionary of explicitly requested creation options can be passed
2822 A dictionary of explicitly requested creation options can be passed
2823 in. Missing keys will be populated.
2823 in. Missing keys will be populated.
2824 """
2824 """
2825 createopts = dict(createopts or {})
2825 createopts = dict(createopts or {})
2826
2826
2827 if 'backend' not in createopts:
2827 if 'backend' not in createopts:
2828 # experimental config: storage.new-repo-backend
2828 # experimental config: storage.new-repo-backend
2829 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2829 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2830
2830
2831 return createopts
2831 return createopts
2832
2832
2833 def newreporequirements(ui, createopts):
2833 def newreporequirements(ui, createopts):
2834 """Determine the set of requirements for a new local repository.
2834 """Determine the set of requirements for a new local repository.
2835
2835
2836 Extensions can wrap this function to specify custom requirements for
2836 Extensions can wrap this function to specify custom requirements for
2837 new repositories.
2837 new repositories.
2838 """
2838 """
2839 # If the repo is being created from a shared repository, we copy
2839 # If the repo is being created from a shared repository, we copy
2840 # its requirements.
2840 # its requirements.
2841 if 'sharedrepo' in createopts:
2841 if 'sharedrepo' in createopts:
2842 requirements = set(createopts['sharedrepo'].requirements)
2842 requirements = set(createopts['sharedrepo'].requirements)
2843 if createopts.get('sharedrelative'):
2843 if createopts.get('sharedrelative'):
2844 requirements.add('relshared')
2844 requirements.add('relshared')
2845 else:
2845 else:
2846 requirements.add('shared')
2846 requirements.add('shared')
2847
2847
2848 return requirements
2848 return requirements
2849
2849
2850 if 'backend' not in createopts:
2850 if 'backend' not in createopts:
2851 raise error.ProgrammingError('backend key not present in createopts; '
2851 raise error.ProgrammingError('backend key not present in createopts; '
2852 'was defaultcreateopts() called?')
2852 'was defaultcreateopts() called?')
2853
2853
2854 if createopts['backend'] != 'revlogv1':
2854 if createopts['backend'] != 'revlogv1':
2855 raise error.Abort(_('unable to determine repository requirements for '
2855 raise error.Abort(_('unable to determine repository requirements for '
2856 'storage backend: %s') % createopts['backend'])
2856 'storage backend: %s') % createopts['backend'])
2857
2857
2858 requirements = {'revlogv1'}
2858 requirements = {'revlogv1'}
2859 if ui.configbool('format', 'usestore'):
2859 if ui.configbool('format', 'usestore'):
2860 requirements.add('store')
2860 requirements.add('store')
2861 if ui.configbool('format', 'usefncache'):
2861 if ui.configbool('format', 'usefncache'):
2862 requirements.add('fncache')
2862 requirements.add('fncache')
2863 if ui.configbool('format', 'dotencode'):
2863 if ui.configbool('format', 'dotencode'):
2864 requirements.add('dotencode')
2864 requirements.add('dotencode')
2865
2865
2866 compengine = ui.config('experimental', 'format.compression')
2866 compengine = ui.config('experimental', 'format.compression')
2867 if compengine not in util.compengines:
2867 if compengine not in util.compengines:
2868 raise error.Abort(_('compression engine %s defined by '
2868 raise error.Abort(_('compression engine %s defined by '
2869 'experimental.format.compression not available') %
2869 'experimental.format.compression not available') %
2870 compengine,
2870 compengine,
2871 hint=_('run "hg debuginstall" to list available '
2871 hint=_('run "hg debuginstall" to list available '
2872 'compression engines'))
2872 'compression engines'))
2873
2873
2874 # zlib is the historical default and doesn't need an explicit requirement.
2874 # zlib is the historical default and doesn't need an explicit requirement.
2875 if compengine != 'zlib':
2875 if compengine != 'zlib':
2876 requirements.add('exp-compression-%s' % compengine)
2876 requirements.add('exp-compression-%s' % compengine)
2877
2877
2878 if scmutil.gdinitconfig(ui):
2878 if scmutil.gdinitconfig(ui):
2879 requirements.add('generaldelta')
2879 requirements.add('generaldelta')
2880 if ui.configbool('experimental', 'treemanifest'):
2880 if ui.configbool('experimental', 'treemanifest'):
2881 requirements.add('treemanifest')
2881 requirements.add('treemanifest')
2882 # experimental config: format.sparse-revlog
2882 # experimental config: format.sparse-revlog
2883 if ui.configbool('format', 'sparse-revlog'):
2883 if ui.configbool('format', 'sparse-revlog'):
2884 requirements.add(SPARSEREVLOG_REQUIREMENT)
2884 requirements.add(SPARSEREVLOG_REQUIREMENT)
2885
2885
2886 revlogv2 = ui.config('experimental', 'revlogv2')
2886 revlogv2 = ui.config('experimental', 'revlogv2')
2887 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2887 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2888 requirements.remove('revlogv1')
2888 requirements.remove('revlogv1')
2889 # generaldelta is implied by revlogv2.
2889 # generaldelta is implied by revlogv2.
2890 requirements.discard('generaldelta')
2890 requirements.discard('generaldelta')
2891 requirements.add(REVLOGV2_REQUIREMENT)
2891 requirements.add(REVLOGV2_REQUIREMENT)
2892 # experimental config: format.internal-phase
2892 # experimental config: format.internal-phase
2893 if ui.configbool('format', 'internal-phase'):
2893 if ui.configbool('format', 'internal-phase'):
2894 requirements.add('internal-phase')
2894 requirements.add('internal-phase')
2895
2895
2896 if createopts.get('narrowfiles'):
2896 if createopts.get('narrowfiles'):
2897 requirements.add(repository.NARROW_REQUIREMENT)
2897 requirements.add(repository.NARROW_REQUIREMENT)
2898
2898
2899 if createopts.get('lfs'):
2899 if createopts.get('lfs'):
2900 requirements.add('lfs')
2900 requirements.add('lfs')
2901
2901
2902 return requirements
2902 return requirements
2903
2903
2904 def filterknowncreateopts(ui, createopts):
2904 def filterknowncreateopts(ui, createopts):
2905 """Filters a dict of repo creation options against options that are known.
2905 """Filters a dict of repo creation options against options that are known.
2906
2906
2907 Receives a dict of repo creation options and returns a dict of those
2907 Receives a dict of repo creation options and returns a dict of those
2908 options that we don't know how to handle.
2908 options that we don't know how to handle.
2909
2909
2910 This function is called as part of repository creation. If the
2910 This function is called as part of repository creation. If the
2911 returned dict contains any items, repository creation will not
2911 returned dict contains any items, repository creation will not
2912 be allowed, as it means there was a request to create a repository
2912 be allowed, as it means there was a request to create a repository
2913 with options not recognized by loaded code.
2913 with options not recognized by loaded code.
2914
2914
2915 Extensions can wrap this function to filter out creation options
2915 Extensions can wrap this function to filter out creation options
2916 they know how to handle.
2916 they know how to handle.
2917 """
2917 """
2918 known = {
2918 known = {
2919 'backend',
2919 'backend',
2920 'lfs',
2920 'lfs',
2921 'narrowfiles',
2921 'narrowfiles',
2922 'sharedrepo',
2922 'sharedrepo',
2923 'sharedrelative',
2923 'sharedrelative',
2924 'shareditems',
2924 'shareditems',
2925 'shallowfilestore',
2925 }
2926 }
2926
2927
2927 return {k: v for k, v in createopts.items() if k not in known}
2928 return {k: v for k, v in createopts.items() if k not in known}
2928
2929
2929 def createrepository(ui, path, createopts=None):
2930 def createrepository(ui, path, createopts=None):
2930 """Create a new repository in a vfs.
2931 """Create a new repository in a vfs.
2931
2932
2932 ``path`` path to the new repo's working directory.
2933 ``path`` path to the new repo's working directory.
2933 ``createopts`` options for the new repository.
2934 ``createopts`` options for the new repository.
2934
2935
2935 The following keys for ``createopts`` are recognized:
2936 The following keys for ``createopts`` are recognized:
2936
2937
2937 backend
2938 backend
2938 The storage backend to use.
2939 The storage backend to use.
2939 lfs
2940 lfs
2940 Repository will be created with ``lfs`` requirement. The lfs extension
2941 Repository will be created with ``lfs`` requirement. The lfs extension
2941 will automatically be loaded when the repository is accessed.
2942 will automatically be loaded when the repository is accessed.
2942 narrowfiles
2943 narrowfiles
2943 Set up repository to support narrow file storage.
2944 Set up repository to support narrow file storage.
2944 sharedrepo
2945 sharedrepo
2945 Repository object from which storage should be shared.
2946 Repository object from which storage should be shared.
2946 sharedrelative
2947 sharedrelative
2947 Boolean indicating if the path to the shared repo should be
2948 Boolean indicating if the path to the shared repo should be
2948 stored as relative. By default, the pointer to the "parent" repo
2949 stored as relative. By default, the pointer to the "parent" repo
2949 is stored as an absolute path.
2950 is stored as an absolute path.
2950 shareditems
2951 shareditems
2951 Set of items to share to the new repository (in addition to storage).
2952 Set of items to share to the new repository (in addition to storage).
2953 shallowfilestore
2954 Indicates that storage for files should be shallow (not all ancestor
2955 revisions are known).
2952 """
2956 """
2953 createopts = defaultcreateopts(ui, createopts=createopts)
2957 createopts = defaultcreateopts(ui, createopts=createopts)
2954
2958
2955 unknownopts = filterknowncreateopts(ui, createopts)
2959 unknownopts = filterknowncreateopts(ui, createopts)
2956
2960
2957 if not isinstance(unknownopts, dict):
2961 if not isinstance(unknownopts, dict):
2958 raise error.ProgrammingError('filterknowncreateopts() did not return '
2962 raise error.ProgrammingError('filterknowncreateopts() did not return '
2959 'a dict')
2963 'a dict')
2960
2964
2961 if unknownopts:
2965 if unknownopts:
2962 raise error.Abort(_('unable to create repository because of unknown '
2966 raise error.Abort(_('unable to create repository because of unknown '
2963 'creation option: %s') %
2967 'creation option: %s') %
2964 ', '.join(sorted(unknownopts)),
2968 ', '.join(sorted(unknownopts)),
2965 hint=_('is a required extension not loaded?'))
2969 hint=_('is a required extension not loaded?'))
2966
2970
2967 requirements = newreporequirements(ui, createopts=createopts)
2971 requirements = newreporequirements(ui, createopts=createopts)
2968
2972
2969 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2973 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2970
2974
2971 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2975 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2972 if hgvfs.exists():
2976 if hgvfs.exists():
2973 raise error.RepoError(_('repository %s already exists') % path)
2977 raise error.RepoError(_('repository %s already exists') % path)
2974
2978
2975 if 'sharedrepo' in createopts:
2979 if 'sharedrepo' in createopts:
2976 sharedpath = createopts['sharedrepo'].sharedpath
2980 sharedpath = createopts['sharedrepo'].sharedpath
2977
2981
2978 if createopts.get('sharedrelative'):
2982 if createopts.get('sharedrelative'):
2979 try:
2983 try:
2980 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2984 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2981 except (IOError, ValueError) as e:
2985 except (IOError, ValueError) as e:
2982 # ValueError is raised on Windows if the drive letters differ
2986 # ValueError is raised on Windows if the drive letters differ
2983 # on each path.
2987 # on each path.
2984 raise error.Abort(_('cannot calculate relative path'),
2988 raise error.Abort(_('cannot calculate relative path'),
2985 hint=stringutil.forcebytestr(e))
2989 hint=stringutil.forcebytestr(e))
2986
2990
2987 if not wdirvfs.exists():
2991 if not wdirvfs.exists():
2988 wdirvfs.makedirs()
2992 wdirvfs.makedirs()
2989
2993
2990 hgvfs.makedir(notindexed=True)
2994 hgvfs.makedir(notindexed=True)
2991
2995
2992 if b'store' in requirements and 'sharedrepo' not in createopts:
2996 if b'store' in requirements and 'sharedrepo' not in createopts:
2993 hgvfs.mkdir(b'store')
2997 hgvfs.mkdir(b'store')
2994
2998
2995 # We create an invalid changelog outside the store so very old
2999 # We create an invalid changelog outside the store so very old
2996 # Mercurial versions (which didn't know about the requirements
3000 # Mercurial versions (which didn't know about the requirements
2997 # file) encounter an error on reading the changelog. This
3001 # file) encounter an error on reading the changelog. This
2998 # effectively locks out old clients and prevents them from
3002 # effectively locks out old clients and prevents them from
2999 # mucking with a repo in an unknown format.
3003 # mucking with a repo in an unknown format.
3000 #
3004 #
3001 # The revlog header has version 2, which won't be recognized by
3005 # The revlog header has version 2, which won't be recognized by
3002 # such old clients.
3006 # such old clients.
3003 hgvfs.append(b'00changelog.i',
3007 hgvfs.append(b'00changelog.i',
3004 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3008 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3005 b'layout')
3009 b'layout')
3006
3010
3007 scmutil.writerequires(hgvfs, requirements)
3011 scmutil.writerequires(hgvfs, requirements)
3008
3012
3009 # Write out file telling readers where to find the shared store.
3013 # Write out file telling readers where to find the shared store.
3010 if 'sharedrepo' in createopts:
3014 if 'sharedrepo' in createopts:
3011 hgvfs.write(b'sharedpath', sharedpath)
3015 hgvfs.write(b'sharedpath', sharedpath)
3012
3016
3013 if createopts.get('shareditems'):
3017 if createopts.get('shareditems'):
3014 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3018 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3015 hgvfs.write(b'shared', shared)
3019 hgvfs.write(b'shared', shared)
3016
3020
3017 def poisonrepository(repo):
3021 def poisonrepository(repo):
3018 """Poison a repository instance so it can no longer be used."""
3022 """Poison a repository instance so it can no longer be used."""
3019 # Perform any cleanup on the instance.
3023 # Perform any cleanup on the instance.
3020 repo.close()
3024 repo.close()
3021
3025
3022 # Our strategy is to replace the type of the object with one that
3026 # Our strategy is to replace the type of the object with one that
3023 # has all attribute lookups result in error.
3027 # has all attribute lookups result in error.
3024 #
3028 #
3025 # But we have to allow the close() method because some constructors
3029 # But we have to allow the close() method because some constructors
3026 # of repos call close() on repo references.
3030 # of repos call close() on repo references.
3027 class poisonedrepository(object):
3031 class poisonedrepository(object):
3028 def __getattribute__(self, item):
3032 def __getattribute__(self, item):
3029 if item == r'close':
3033 if item == r'close':
3030 return object.__getattribute__(self, item)
3034 return object.__getattribute__(self, item)
3031
3035
3032 raise error.ProgrammingError('repo instances should not be used '
3036 raise error.ProgrammingError('repo instances should not be used '
3033 'after unshare')
3037 'after unshare')
3034
3038
3035 def close(self):
3039 def close(self):
3036 pass
3040 pass
3037
3041
3038 # We may have a repoview, which intercepts __setattr__. So be sure
3042 # We may have a repoview, which intercepts __setattr__. So be sure
3039 # we operate at the lowest level possible.
3043 # we operate at the lowest level possible.
3040 object.__setattr__(repo, r'__class__', poisonedrepository)
3044 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,1851 +1,1853 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30 # Repository supports being stream cloned.
30 # Repository supports being stream cloned.
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
32 # Files storage may lack data for all ancestors.
33 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
32
34
33 REVISION_FLAG_CENSORED = 1 << 15
35 REVISION_FLAG_CENSORED = 1 << 15
34 REVISION_FLAG_ELLIPSIS = 1 << 14
36 REVISION_FLAG_ELLIPSIS = 1 << 14
35 REVISION_FLAG_EXTSTORED = 1 << 13
37 REVISION_FLAG_EXTSTORED = 1 << 13
36
38
37 REVISION_FLAGS_KNOWN = (
39 REVISION_FLAGS_KNOWN = (
38 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
40 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
39
41
40 class ipeerconnection(interfaceutil.Interface):
42 class ipeerconnection(interfaceutil.Interface):
41 """Represents a "connection" to a repository.
43 """Represents a "connection" to a repository.
42
44
43 This is the base interface for representing a connection to a repository.
45 This is the base interface for representing a connection to a repository.
44 It holds basic properties and methods applicable to all peer types.
46 It holds basic properties and methods applicable to all peer types.
45
47
46 This is not a complete interface definition and should not be used
48 This is not a complete interface definition and should not be used
47 outside of this module.
49 outside of this module.
48 """
50 """
49 ui = interfaceutil.Attribute("""ui.ui instance""")
51 ui = interfaceutil.Attribute("""ui.ui instance""")
50
52
51 def url():
53 def url():
52 """Returns a URL string representing this peer.
54 """Returns a URL string representing this peer.
53
55
54 Currently, implementations expose the raw URL used to construct the
56 Currently, implementations expose the raw URL used to construct the
55 instance. It may contain credentials as part of the URL. The
57 instance. It may contain credentials as part of the URL. The
56 expectations of the value aren't well-defined and this could lead to
58 expectations of the value aren't well-defined and this could lead to
57 data leakage.
59 data leakage.
58
60
59 TODO audit/clean consumers and more clearly define the contents of this
61 TODO audit/clean consumers and more clearly define the contents of this
60 value.
62 value.
61 """
63 """
62
64
63 def local():
65 def local():
64 """Returns a local repository instance.
66 """Returns a local repository instance.
65
67
66 If the peer represents a local repository, returns an object that
68 If the peer represents a local repository, returns an object that
67 can be used to interface with it. Otherwise returns ``None``.
69 can be used to interface with it. Otherwise returns ``None``.
68 """
70 """
69
71
70 def peer():
72 def peer():
71 """Returns an object conforming to this interface.
73 """Returns an object conforming to this interface.
72
74
73 Most implementations will ``return self``.
75 Most implementations will ``return self``.
74 """
76 """
75
77
76 def canpush():
78 def canpush():
77 """Returns a boolean indicating if this peer can be pushed to."""
79 """Returns a boolean indicating if this peer can be pushed to."""
78
80
79 def close():
81 def close():
80 """Close the connection to this peer.
82 """Close the connection to this peer.
81
83
82 This is called when the peer will no longer be used. Resources
84 This is called when the peer will no longer be used. Resources
83 associated with the peer should be cleaned up.
85 associated with the peer should be cleaned up.
84 """
86 """
85
87
86 class ipeercapabilities(interfaceutil.Interface):
88 class ipeercapabilities(interfaceutil.Interface):
87 """Peer sub-interface related to capabilities."""
89 """Peer sub-interface related to capabilities."""
88
90
89 def capable(name):
91 def capable(name):
90 """Determine support for a named capability.
92 """Determine support for a named capability.
91
93
92 Returns ``False`` if capability not supported.
94 Returns ``False`` if capability not supported.
93
95
94 Returns ``True`` if boolean capability is supported. Returns a string
96 Returns ``True`` if boolean capability is supported. Returns a string
95 if capability support is non-boolean.
97 if capability support is non-boolean.
96
98
97 Capability strings may or may not map to wire protocol capabilities.
99 Capability strings may or may not map to wire protocol capabilities.
98 """
100 """
99
101
100 def requirecap(name, purpose):
102 def requirecap(name, purpose):
101 """Require a capability to be present.
103 """Require a capability to be present.
102
104
103 Raises a ``CapabilityError`` if the capability isn't present.
105 Raises a ``CapabilityError`` if the capability isn't present.
104 """
106 """
105
107
106 class ipeercommands(interfaceutil.Interface):
108 class ipeercommands(interfaceutil.Interface):
107 """Client-side interface for communicating over the wire protocol.
109 """Client-side interface for communicating over the wire protocol.
108
110
109 This interface is used as a gateway to the Mercurial wire protocol.
111 This interface is used as a gateway to the Mercurial wire protocol.
110 methods commonly call wire protocol commands of the same name.
112 methods commonly call wire protocol commands of the same name.
111 """
113 """
112
114
113 def branchmap():
115 def branchmap():
114 """Obtain heads in named branches.
116 """Obtain heads in named branches.
115
117
116 Returns a dict mapping branch name to an iterable of nodes that are
118 Returns a dict mapping branch name to an iterable of nodes that are
117 heads on that branch.
119 heads on that branch.
118 """
120 """
119
121
120 def capabilities():
122 def capabilities():
121 """Obtain capabilities of the peer.
123 """Obtain capabilities of the peer.
122
124
123 Returns a set of string capabilities.
125 Returns a set of string capabilities.
124 """
126 """
125
127
126 def clonebundles():
128 def clonebundles():
127 """Obtains the clone bundles manifest for the repo.
129 """Obtains the clone bundles manifest for the repo.
128
130
129 Returns the manifest as unparsed bytes.
131 Returns the manifest as unparsed bytes.
130 """
132 """
131
133
132 def debugwireargs(one, two, three=None, four=None, five=None):
134 def debugwireargs(one, two, three=None, four=None, five=None):
133 """Used to facilitate debugging of arguments passed over the wire."""
135 """Used to facilitate debugging of arguments passed over the wire."""
134
136
135 def getbundle(source, **kwargs):
137 def getbundle(source, **kwargs):
136 """Obtain remote repository data as a bundle.
138 """Obtain remote repository data as a bundle.
137
139
138 This command is how the bulk of repository data is transferred from
140 This command is how the bulk of repository data is transferred from
139 the peer to the local repository
141 the peer to the local repository
140
142
141 Returns a generator of bundle data.
143 Returns a generator of bundle data.
142 """
144 """
143
145
144 def heads():
146 def heads():
145 """Determine all known head revisions in the peer.
147 """Determine all known head revisions in the peer.
146
148
147 Returns an iterable of binary nodes.
149 Returns an iterable of binary nodes.
148 """
150 """
149
151
150 def known(nodes):
152 def known(nodes):
151 """Determine whether multiple nodes are known.
153 """Determine whether multiple nodes are known.
152
154
153 Accepts an iterable of nodes whose presence to check for.
155 Accepts an iterable of nodes whose presence to check for.
154
156
155 Returns an iterable of booleans indicating of the corresponding node
157 Returns an iterable of booleans indicating of the corresponding node
156 at that index is known to the peer.
158 at that index is known to the peer.
157 """
159 """
158
160
159 def listkeys(namespace):
161 def listkeys(namespace):
160 """Obtain all keys in a pushkey namespace.
162 """Obtain all keys in a pushkey namespace.
161
163
162 Returns an iterable of key names.
164 Returns an iterable of key names.
163 """
165 """
164
166
165 def lookup(key):
167 def lookup(key):
166 """Resolve a value to a known revision.
168 """Resolve a value to a known revision.
167
169
168 Returns a binary node of the resolved revision on success.
170 Returns a binary node of the resolved revision on success.
169 """
171 """
170
172
171 def pushkey(namespace, key, old, new):
173 def pushkey(namespace, key, old, new):
172 """Set a value using the ``pushkey`` protocol.
174 """Set a value using the ``pushkey`` protocol.
173
175
174 Arguments correspond to the pushkey namespace and key to operate on and
176 Arguments correspond to the pushkey namespace and key to operate on and
175 the old and new values for that key.
177 the old and new values for that key.
176
178
177 Returns a string with the peer result. The value inside varies by the
179 Returns a string with the peer result. The value inside varies by the
178 namespace.
180 namespace.
179 """
181 """
180
182
181 def stream_out():
183 def stream_out():
182 """Obtain streaming clone data.
184 """Obtain streaming clone data.
183
185
184 Successful result should be a generator of data chunks.
186 Successful result should be a generator of data chunks.
185 """
187 """
186
188
187 def unbundle(bundle, heads, url):
189 def unbundle(bundle, heads, url):
188 """Transfer repository data to the peer.
190 """Transfer repository data to the peer.
189
191
190 This is how the bulk of data during a push is transferred.
192 This is how the bulk of data during a push is transferred.
191
193
192 Returns the integer number of heads added to the peer.
194 Returns the integer number of heads added to the peer.
193 """
195 """
194
196
195 class ipeerlegacycommands(interfaceutil.Interface):
197 class ipeerlegacycommands(interfaceutil.Interface):
196 """Interface for implementing support for legacy wire protocol commands.
198 """Interface for implementing support for legacy wire protocol commands.
197
199
198 Wire protocol commands transition to legacy status when they are no longer
200 Wire protocol commands transition to legacy status when they are no longer
199 used by modern clients. To facilitate identifying which commands are
201 used by modern clients. To facilitate identifying which commands are
200 legacy, the interfaces are split.
202 legacy, the interfaces are split.
201 """
203 """
202
204
203 def between(pairs):
205 def between(pairs):
204 """Obtain nodes between pairs of nodes.
206 """Obtain nodes between pairs of nodes.
205
207
206 ``pairs`` is an iterable of node pairs.
208 ``pairs`` is an iterable of node pairs.
207
209
208 Returns an iterable of iterables of nodes corresponding to each
210 Returns an iterable of iterables of nodes corresponding to each
209 requested pair.
211 requested pair.
210 """
212 """
211
213
212 def branches(nodes):
214 def branches(nodes):
213 """Obtain ancestor changesets of specific nodes back to a branch point.
215 """Obtain ancestor changesets of specific nodes back to a branch point.
214
216
215 For each requested node, the peer finds the first ancestor node that is
217 For each requested node, the peer finds the first ancestor node that is
216 a DAG root or is a merge.
218 a DAG root or is a merge.
217
219
218 Returns an iterable of iterables with the resolved values for each node.
220 Returns an iterable of iterables with the resolved values for each node.
219 """
221 """
220
222
221 def changegroup(nodes, source):
223 def changegroup(nodes, source):
222 """Obtain a changegroup with data for descendants of specified nodes."""
224 """Obtain a changegroup with data for descendants of specified nodes."""
223
225
224 def changegroupsubset(bases, heads, source):
226 def changegroupsubset(bases, heads, source):
225 pass
227 pass
226
228
227 class ipeercommandexecutor(interfaceutil.Interface):
229 class ipeercommandexecutor(interfaceutil.Interface):
228 """Represents a mechanism to execute remote commands.
230 """Represents a mechanism to execute remote commands.
229
231
230 This is the primary interface for requesting that wire protocol commands
232 This is the primary interface for requesting that wire protocol commands
231 be executed. Instances of this interface are active in a context manager
233 be executed. Instances of this interface are active in a context manager
232 and have a well-defined lifetime. When the context manager exits, all
234 and have a well-defined lifetime. When the context manager exits, all
233 outstanding requests are waited on.
235 outstanding requests are waited on.
234 """
236 """
235
237
236 def callcommand(name, args):
238 def callcommand(name, args):
237 """Request that a named command be executed.
239 """Request that a named command be executed.
238
240
239 Receives the command name and a dictionary of command arguments.
241 Receives the command name and a dictionary of command arguments.
240
242
241 Returns a ``concurrent.futures.Future`` that will resolve to the
243 Returns a ``concurrent.futures.Future`` that will resolve to the
242 result of that command request. That exact value is left up to
244 result of that command request. That exact value is left up to
243 the implementation and possibly varies by command.
245 the implementation and possibly varies by command.
244
246
245 Not all commands can coexist with other commands in an executor
247 Not all commands can coexist with other commands in an executor
246 instance: it depends on the underlying wire protocol transport being
248 instance: it depends on the underlying wire protocol transport being
247 used and the command itself.
249 used and the command itself.
248
250
249 Implementations MAY call ``sendcommands()`` automatically if the
251 Implementations MAY call ``sendcommands()`` automatically if the
250 requested command can not coexist with other commands in this executor.
252 requested command can not coexist with other commands in this executor.
251
253
252 Implementations MAY call ``sendcommands()`` automatically when the
254 Implementations MAY call ``sendcommands()`` automatically when the
253 future's ``result()`` is called. So, consumers using multiple
255 future's ``result()`` is called. So, consumers using multiple
254 commands with an executor MUST ensure that ``result()`` is not called
256 commands with an executor MUST ensure that ``result()`` is not called
255 until all command requests have been issued.
257 until all command requests have been issued.
256 """
258 """
257
259
258 def sendcommands():
260 def sendcommands():
259 """Trigger submission of queued command requests.
261 """Trigger submission of queued command requests.
260
262
261 Not all transports submit commands as soon as they are requested to
263 Not all transports submit commands as soon as they are requested to
262 run. When called, this method forces queued command requests to be
264 run. When called, this method forces queued command requests to be
263 issued. It will no-op if all commands have already been sent.
265 issued. It will no-op if all commands have already been sent.
264
266
265 When called, no more new commands may be issued with this executor.
267 When called, no more new commands may be issued with this executor.
266 """
268 """
267
269
268 def close():
270 def close():
269 """Signal that this command request is finished.
271 """Signal that this command request is finished.
270
272
271 When called, no more new commands may be issued. All outstanding
273 When called, no more new commands may be issued. All outstanding
272 commands that have previously been issued are waited on before
274 commands that have previously been issued are waited on before
273 returning. This not only includes waiting for the futures to resolve,
275 returning. This not only includes waiting for the futures to resolve,
274 but also waiting for all response data to arrive. In other words,
276 but also waiting for all response data to arrive. In other words,
275 calling this waits for all on-wire state for issued command requests
277 calling this waits for all on-wire state for issued command requests
276 to finish.
278 to finish.
277
279
278 When used as a context manager, this method is called when exiting the
280 When used as a context manager, this method is called when exiting the
279 context manager.
281 context manager.
280
282
281 This method may call ``sendcommands()`` if there are buffered commands.
283 This method may call ``sendcommands()`` if there are buffered commands.
282 """
284 """
283
285
284 class ipeerrequests(interfaceutil.Interface):
286 class ipeerrequests(interfaceutil.Interface):
285 """Interface for executing commands on a peer."""
287 """Interface for executing commands on a peer."""
286
288
287 def commandexecutor():
289 def commandexecutor():
288 """A context manager that resolves to an ipeercommandexecutor.
290 """A context manager that resolves to an ipeercommandexecutor.
289
291
290 The object this resolves to can be used to issue command requests
292 The object this resolves to can be used to issue command requests
291 to the peer.
293 to the peer.
292
294
293 Callers should call its ``callcommand`` method to issue command
295 Callers should call its ``callcommand`` method to issue command
294 requests.
296 requests.
295
297
296 A new executor should be obtained for each distinct set of commands
298 A new executor should be obtained for each distinct set of commands
297 (possibly just a single command) that the consumer wants to execute
299 (possibly just a single command) that the consumer wants to execute
298 as part of a single operation or round trip. This is because some
300 as part of a single operation or round trip. This is because some
299 peers are half-duplex and/or don't support persistent connections.
301 peers are half-duplex and/or don't support persistent connections.
300 e.g. in the case of HTTP peers, commands sent to an executor represent
302 e.g. in the case of HTTP peers, commands sent to an executor represent
301 a single HTTP request. While some peers may support multiple command
303 a single HTTP request. While some peers may support multiple command
302 sends over the wire per executor, consumers need to code to the least
304 sends over the wire per executor, consumers need to code to the least
303 capable peer. So it should be assumed that command executors buffer
305 capable peer. So it should be assumed that command executors buffer
304 called commands until they are told to send them and that each
306 called commands until they are told to send them and that each
305 command executor could result in a new connection or wire-level request
307 command executor could result in a new connection or wire-level request
306 being issued.
308 being issued.
307 """
309 """
308
310
309 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
311 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
310 """Unified interface for peer repositories.
312 """Unified interface for peer repositories.
311
313
312 All peer instances must conform to this interface.
314 All peer instances must conform to this interface.
313 """
315 """
314
316
315 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
317 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
316 """Unified peer interface for wire protocol version 2 peers."""
318 """Unified peer interface for wire protocol version 2 peers."""
317
319
318 apidescriptor = interfaceutil.Attribute(
320 apidescriptor = interfaceutil.Attribute(
319 """Data structure holding description of server API.""")
321 """Data structure holding description of server API.""")
320
322
321 @interfaceutil.implementer(ipeerbase)
323 @interfaceutil.implementer(ipeerbase)
322 class peer(object):
324 class peer(object):
323 """Base class for peer repositories."""
325 """Base class for peer repositories."""
324
326
325 def capable(self, name):
327 def capable(self, name):
326 caps = self.capabilities()
328 caps = self.capabilities()
327 if name in caps:
329 if name in caps:
328 return True
330 return True
329
331
330 name = '%s=' % name
332 name = '%s=' % name
331 for cap in caps:
333 for cap in caps:
332 if cap.startswith(name):
334 if cap.startswith(name):
333 return cap[len(name):]
335 return cap[len(name):]
334
336
335 return False
337 return False
336
338
337 def requirecap(self, name, purpose):
339 def requirecap(self, name, purpose):
338 if self.capable(name):
340 if self.capable(name):
339 return
341 return
340
342
341 raise error.CapabilityError(
343 raise error.CapabilityError(
342 _('cannot %s; remote repository does not support the %r '
344 _('cannot %s; remote repository does not support the %r '
343 'capability') % (purpose, name))
345 'capability') % (purpose, name))
344
346
345 class iverifyproblem(interfaceutil.Interface):
347 class iverifyproblem(interfaceutil.Interface):
346 """Represents a problem with the integrity of the repository.
348 """Represents a problem with the integrity of the repository.
347
349
348 Instances of this interface are emitted to describe an integrity issue
350 Instances of this interface are emitted to describe an integrity issue
349 with a repository (e.g. corrupt storage, missing data, etc).
351 with a repository (e.g. corrupt storage, missing data, etc).
350
352
351 Instances are essentially messages associated with severity.
353 Instances are essentially messages associated with severity.
352 """
354 """
353 warning = interfaceutil.Attribute(
355 warning = interfaceutil.Attribute(
354 """Message indicating a non-fatal problem.""")
356 """Message indicating a non-fatal problem.""")
355
357
356 error = interfaceutil.Attribute(
358 error = interfaceutil.Attribute(
357 """Message indicating a fatal problem.""")
359 """Message indicating a fatal problem.""")
358
360
359 node = interfaceutil.Attribute(
361 node = interfaceutil.Attribute(
360 """Revision encountering the problem.
362 """Revision encountering the problem.
361
363
362 ``None`` means the problem doesn't apply to a single revision.
364 ``None`` means the problem doesn't apply to a single revision.
363 """)
365 """)
364
366
365 class irevisiondelta(interfaceutil.Interface):
367 class irevisiondelta(interfaceutil.Interface):
366 """Represents a delta between one revision and another.
368 """Represents a delta between one revision and another.
367
369
368 Instances convey enough information to allow a revision to be exchanged
370 Instances convey enough information to allow a revision to be exchanged
369 with another repository.
371 with another repository.
370
372
371 Instances represent the fulltext revision data or a delta against
373 Instances represent the fulltext revision data or a delta against
372 another revision. Therefore the ``revision`` and ``delta`` attributes
374 another revision. Therefore the ``revision`` and ``delta`` attributes
373 are mutually exclusive.
375 are mutually exclusive.
374
376
375 Typically used for changegroup generation.
377 Typically used for changegroup generation.
376 """
378 """
377
379
378 node = interfaceutil.Attribute(
380 node = interfaceutil.Attribute(
379 """20 byte node of this revision.""")
381 """20 byte node of this revision.""")
380
382
381 p1node = interfaceutil.Attribute(
383 p1node = interfaceutil.Attribute(
382 """20 byte node of 1st parent of this revision.""")
384 """20 byte node of 1st parent of this revision.""")
383
385
384 p2node = interfaceutil.Attribute(
386 p2node = interfaceutil.Attribute(
385 """20 byte node of 2nd parent of this revision.""")
387 """20 byte node of 2nd parent of this revision.""")
386
388
387 linknode = interfaceutil.Attribute(
389 linknode = interfaceutil.Attribute(
388 """20 byte node of the changelog revision this node is linked to.""")
390 """20 byte node of the changelog revision this node is linked to.""")
389
391
390 flags = interfaceutil.Attribute(
392 flags = interfaceutil.Attribute(
391 """2 bytes of integer flags that apply to this revision.
393 """2 bytes of integer flags that apply to this revision.
392
394
393 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
395 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
394 """)
396 """)
395
397
396 basenode = interfaceutil.Attribute(
398 basenode = interfaceutil.Attribute(
397 """20 byte node of the revision this data is a delta against.
399 """20 byte node of the revision this data is a delta against.
398
400
399 ``nullid`` indicates that the revision is a full revision and not
401 ``nullid`` indicates that the revision is a full revision and not
400 a delta.
402 a delta.
401 """)
403 """)
402
404
403 baserevisionsize = interfaceutil.Attribute(
405 baserevisionsize = interfaceutil.Attribute(
404 """Size of base revision this delta is against.
406 """Size of base revision this delta is against.
405
407
406 May be ``None`` if ``basenode`` is ``nullid``.
408 May be ``None`` if ``basenode`` is ``nullid``.
407 """)
409 """)
408
410
409 revision = interfaceutil.Attribute(
411 revision = interfaceutil.Attribute(
410 """Raw fulltext of revision data for this node.""")
412 """Raw fulltext of revision data for this node.""")
411
413
412 delta = interfaceutil.Attribute(
414 delta = interfaceutil.Attribute(
413 """Delta between ``basenode`` and ``node``.
415 """Delta between ``basenode`` and ``node``.
414
416
415 Stored in the bdiff delta format.
417 Stored in the bdiff delta format.
416 """)
418 """)
417
419
418 class ifilerevisionssequence(interfaceutil.Interface):
420 class ifilerevisionssequence(interfaceutil.Interface):
419 """Contains index data for all revisions of a file.
421 """Contains index data for all revisions of a file.
420
422
421 Types implementing this behave like lists of tuples. The index
423 Types implementing this behave like lists of tuples. The index
422 in the list corresponds to the revision number. The values contain
424 in the list corresponds to the revision number. The values contain
423 index metadata.
425 index metadata.
424
426
425 The *null* revision (revision number -1) is always the last item
427 The *null* revision (revision number -1) is always the last item
426 in the index.
428 in the index.
427 """
429 """
428
430
429 def __len__():
431 def __len__():
430 """The total number of revisions."""
432 """The total number of revisions."""
431
433
432 def __getitem__(rev):
434 def __getitem__(rev):
433 """Returns the object having a specific revision number.
435 """Returns the object having a specific revision number.
434
436
435 Returns an 8-tuple with the following fields:
437 Returns an 8-tuple with the following fields:
436
438
437 offset+flags
439 offset+flags
438 Contains the offset and flags for the revision. 64-bit unsigned
440 Contains the offset and flags for the revision. 64-bit unsigned
439 integer where first 6 bytes are the offset and the next 2 bytes
441 integer where first 6 bytes are the offset and the next 2 bytes
440 are flags. The offset can be 0 if it is not used by the store.
442 are flags. The offset can be 0 if it is not used by the store.
441 compressed size
443 compressed size
442 Size of the revision data in the store. It can be 0 if it isn't
444 Size of the revision data in the store. It can be 0 if it isn't
443 needed by the store.
445 needed by the store.
444 uncompressed size
446 uncompressed size
445 Fulltext size. It can be 0 if it isn't needed by the store.
447 Fulltext size. It can be 0 if it isn't needed by the store.
446 base revision
448 base revision
447 Revision number of revision the delta for storage is encoded
449 Revision number of revision the delta for storage is encoded
448 against. -1 indicates not encoded against a base revision.
450 against. -1 indicates not encoded against a base revision.
449 link revision
451 link revision
450 Revision number of changelog revision this entry is related to.
452 Revision number of changelog revision this entry is related to.
451 p1 revision
453 p1 revision
452 Revision number of 1st parent. -1 if no 1st parent.
454 Revision number of 1st parent. -1 if no 1st parent.
453 p2 revision
455 p2 revision
454 Revision number of 2nd parent. -1 if no 1st parent.
456 Revision number of 2nd parent. -1 if no 1st parent.
455 node
457 node
456 Binary node value for this revision number.
458 Binary node value for this revision number.
457
459
458 Negative values should index off the end of the sequence. ``-1``
460 Negative values should index off the end of the sequence. ``-1``
459 should return the null revision. ``-2`` should return the most
461 should return the null revision. ``-2`` should return the most
460 recent revision.
462 recent revision.
461 """
463 """
462
464
463 def __contains__(rev):
465 def __contains__(rev):
464 """Whether a revision number exists."""
466 """Whether a revision number exists."""
465
467
466 def insert(self, i, entry):
468 def insert(self, i, entry):
467 """Add an item to the index at specific revision."""
469 """Add an item to the index at specific revision."""
468
470
469 class ifileindex(interfaceutil.Interface):
471 class ifileindex(interfaceutil.Interface):
470 """Storage interface for index data of a single file.
472 """Storage interface for index data of a single file.
471
473
472 File storage data is divided into index metadata and data storage.
474 File storage data is divided into index metadata and data storage.
473 This interface defines the index portion of the interface.
475 This interface defines the index portion of the interface.
474
476
475 The index logically consists of:
477 The index logically consists of:
476
478
477 * A mapping between revision numbers and nodes.
479 * A mapping between revision numbers and nodes.
478 * DAG data (storing and querying the relationship between nodes).
480 * DAG data (storing and querying the relationship between nodes).
479 * Metadata to facilitate storage.
481 * Metadata to facilitate storage.
480 """
482 """
481 def __len__():
483 def __len__():
482 """Obtain the number of revisions stored for this file."""
484 """Obtain the number of revisions stored for this file."""
483
485
484 def __iter__():
486 def __iter__():
485 """Iterate over revision numbers for this file."""
487 """Iterate over revision numbers for this file."""
486
488
487 def hasnode(node):
489 def hasnode(node):
488 """Returns a bool indicating if a node is known to this store.
490 """Returns a bool indicating if a node is known to this store.
489
491
490 Implementations must only return True for full, binary node values:
492 Implementations must only return True for full, binary node values:
491 hex nodes, revision numbers, and partial node matches must be
493 hex nodes, revision numbers, and partial node matches must be
492 rejected.
494 rejected.
493
495
494 The null node is never present.
496 The null node is never present.
495 """
497 """
496
498
497 def revs(start=0, stop=None):
499 def revs(start=0, stop=None):
498 """Iterate over revision numbers for this file, with control."""
500 """Iterate over revision numbers for this file, with control."""
499
501
500 def parents(node):
502 def parents(node):
501 """Returns a 2-tuple of parent nodes for a revision.
503 """Returns a 2-tuple of parent nodes for a revision.
502
504
503 Values will be ``nullid`` if the parent is empty.
505 Values will be ``nullid`` if the parent is empty.
504 """
506 """
505
507
506 def parentrevs(rev):
508 def parentrevs(rev):
507 """Like parents() but operates on revision numbers."""
509 """Like parents() but operates on revision numbers."""
508
510
509 def rev(node):
511 def rev(node):
510 """Obtain the revision number given a node.
512 """Obtain the revision number given a node.
511
513
512 Raises ``error.LookupError`` if the node is not known.
514 Raises ``error.LookupError`` if the node is not known.
513 """
515 """
514
516
515 def node(rev):
517 def node(rev):
516 """Obtain the node value given a revision number.
518 """Obtain the node value given a revision number.
517
519
518 Raises ``IndexError`` if the node is not known.
520 Raises ``IndexError`` if the node is not known.
519 """
521 """
520
522
521 def lookup(node):
523 def lookup(node):
522 """Attempt to resolve a value to a node.
524 """Attempt to resolve a value to a node.
523
525
524 Value can be a binary node, hex node, revision number, or a string
526 Value can be a binary node, hex node, revision number, or a string
525 that can be converted to an integer.
527 that can be converted to an integer.
526
528
527 Raises ``error.LookupError`` if a node could not be resolved.
529 Raises ``error.LookupError`` if a node could not be resolved.
528 """
530 """
529
531
530 def linkrev(rev):
532 def linkrev(rev):
531 """Obtain the changeset revision number a revision is linked to."""
533 """Obtain the changeset revision number a revision is linked to."""
532
534
533 def iscensored(rev):
535 def iscensored(rev):
534 """Return whether a revision's content has been censored."""
536 """Return whether a revision's content has been censored."""
535
537
536 def commonancestorsheads(node1, node2):
538 def commonancestorsheads(node1, node2):
537 """Obtain an iterable of nodes containing heads of common ancestors.
539 """Obtain an iterable of nodes containing heads of common ancestors.
538
540
539 See ``ancestor.commonancestorsheads()``.
541 See ``ancestor.commonancestorsheads()``.
540 """
542 """
541
543
542 def descendants(revs):
544 def descendants(revs):
543 """Obtain descendant revision numbers for a set of revision numbers.
545 """Obtain descendant revision numbers for a set of revision numbers.
544
546
545 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
547 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
546 """
548 """
547
549
548 def heads(start=None, stop=None):
550 def heads(start=None, stop=None):
549 """Obtain a list of nodes that are DAG heads, with control.
551 """Obtain a list of nodes that are DAG heads, with control.
550
552
551 The set of revisions examined can be limited by specifying
553 The set of revisions examined can be limited by specifying
552 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
554 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
553 iterable of nodes. DAG traversal starts at earlier revision
555 iterable of nodes. DAG traversal starts at earlier revision
554 ``start`` and iterates forward until any node in ``stop`` is
556 ``start`` and iterates forward until any node in ``stop`` is
555 encountered.
557 encountered.
556 """
558 """
557
559
558 def children(node):
560 def children(node):
559 """Obtain nodes that are children of a node.
561 """Obtain nodes that are children of a node.
560
562
561 Returns a list of nodes.
563 Returns a list of nodes.
562 """
564 """
563
565
564 class ifiledata(interfaceutil.Interface):
566 class ifiledata(interfaceutil.Interface):
565 """Storage interface for data storage of a specific file.
567 """Storage interface for data storage of a specific file.
566
568
567 This complements ``ifileindex`` and provides an interface for accessing
569 This complements ``ifileindex`` and provides an interface for accessing
568 data for a tracked file.
570 data for a tracked file.
569 """
571 """
570 def size(rev):
572 def size(rev):
571 """Obtain the fulltext size of file data.
573 """Obtain the fulltext size of file data.
572
574
573 Any metadata is excluded from size measurements.
575 Any metadata is excluded from size measurements.
574 """
576 """
575
577
576 def revision(node, raw=False):
578 def revision(node, raw=False):
577 """"Obtain fulltext data for a node.
579 """"Obtain fulltext data for a node.
578
580
579 By default, any storage transformations are applied before the data
581 By default, any storage transformations are applied before the data
580 is returned. If ``raw`` is True, non-raw storage transformations
582 is returned. If ``raw`` is True, non-raw storage transformations
581 are not applied.
583 are not applied.
582
584
583 The fulltext data may contain a header containing metadata. Most
585 The fulltext data may contain a header containing metadata. Most
584 consumers should use ``read()`` to obtain the actual file data.
586 consumers should use ``read()`` to obtain the actual file data.
585 """
587 """
586
588
587 def read(node):
589 def read(node):
588 """Resolve file fulltext data.
590 """Resolve file fulltext data.
589
591
590 This is similar to ``revision()`` except any metadata in the data
592 This is similar to ``revision()`` except any metadata in the data
591 headers is stripped.
593 headers is stripped.
592 """
594 """
593
595
594 def renamed(node):
596 def renamed(node):
595 """Obtain copy metadata for a node.
597 """Obtain copy metadata for a node.
596
598
597 Returns ``False`` if no copy metadata is stored or a 2-tuple of
599 Returns ``False`` if no copy metadata is stored or a 2-tuple of
598 (path, node) from which this revision was copied.
600 (path, node) from which this revision was copied.
599 """
601 """
600
602
601 def cmp(node, fulltext):
603 def cmp(node, fulltext):
602 """Compare fulltext to another revision.
604 """Compare fulltext to another revision.
603
605
604 Returns True if the fulltext is different from what is stored.
606 Returns True if the fulltext is different from what is stored.
605
607
606 This takes copy metadata into account.
608 This takes copy metadata into account.
607
609
608 TODO better document the copy metadata and censoring logic.
610 TODO better document the copy metadata and censoring logic.
609 """
611 """
610
612
611 def emitrevisions(nodes,
613 def emitrevisions(nodes,
612 nodesorder=None,
614 nodesorder=None,
613 revisiondata=False,
615 revisiondata=False,
614 assumehaveparentrevisions=False,
616 assumehaveparentrevisions=False,
615 deltaprevious=False):
617 deltaprevious=False):
616 """Produce ``irevisiondelta`` for revisions.
618 """Produce ``irevisiondelta`` for revisions.
617
619
618 Given an iterable of nodes, emits objects conforming to the
620 Given an iterable of nodes, emits objects conforming to the
619 ``irevisiondelta`` interface that describe revisions in storage.
621 ``irevisiondelta`` interface that describe revisions in storage.
620
622
621 This method is a generator.
623 This method is a generator.
622
624
623 The input nodes may be unordered. Implementations must ensure that a
625 The input nodes may be unordered. Implementations must ensure that a
624 node's parents are emitted before the node itself. Transitively, this
626 node's parents are emitted before the node itself. Transitively, this
625 means that a node may only be emitted once all its ancestors in
627 means that a node may only be emitted once all its ancestors in
626 ``nodes`` have also been emitted.
628 ``nodes`` have also been emitted.
627
629
628 By default, emits "index" data (the ``node``, ``p1node``, and
630 By default, emits "index" data (the ``node``, ``p1node``, and
629 ``p2node`` attributes). If ``revisiondata`` is set, revision data
631 ``p2node`` attributes). If ``revisiondata`` is set, revision data
630 will also be present on the emitted objects.
632 will also be present on the emitted objects.
631
633
632 With default argument values, implementations can choose to emit
634 With default argument values, implementations can choose to emit
633 either fulltext revision data or a delta. When emitting deltas,
635 either fulltext revision data or a delta. When emitting deltas,
634 implementations must consider whether the delta's base revision
636 implementations must consider whether the delta's base revision
635 fulltext is available to the receiver.
637 fulltext is available to the receiver.
636
638
637 The base revision fulltext is guaranteed to be available if any of
639 The base revision fulltext is guaranteed to be available if any of
638 the following are met:
640 the following are met:
639
641
640 * Its fulltext revision was emitted by this method call.
642 * Its fulltext revision was emitted by this method call.
641 * A delta for that revision was emitted by this method call.
643 * A delta for that revision was emitted by this method call.
642 * ``assumehaveparentrevisions`` is True and the base revision is a
644 * ``assumehaveparentrevisions`` is True and the base revision is a
643 parent of the node.
645 parent of the node.
644
646
645 ``nodesorder`` can be used to control the order that revisions are
647 ``nodesorder`` can be used to control the order that revisions are
646 emitted. By default, revisions can be reordered as long as they are
648 emitted. By default, revisions can be reordered as long as they are
647 in DAG topological order (see above). If the value is ``nodes``,
649 in DAG topological order (see above). If the value is ``nodes``,
648 the iteration order from ``nodes`` should be used. If the value is
650 the iteration order from ``nodes`` should be used. If the value is
649 ``storage``, then the native order from the backing storage layer
651 ``storage``, then the native order from the backing storage layer
650 is used. (Not all storage layers will have strong ordering and behavior
652 is used. (Not all storage layers will have strong ordering and behavior
651 of this mode is storage-dependent.) ``nodes`` ordering can force
653 of this mode is storage-dependent.) ``nodes`` ordering can force
652 revisions to be emitted before their ancestors, so consumers should
654 revisions to be emitted before their ancestors, so consumers should
653 use it with care.
655 use it with care.
654
656
655 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
657 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
656 be set and it is the caller's responsibility to resolve it, if needed.
658 be set and it is the caller's responsibility to resolve it, if needed.
657
659
658 If ``deltaprevious`` is True and revision data is requested, all
660 If ``deltaprevious`` is True and revision data is requested, all
659 revision data should be emitted as deltas against the revision
661 revision data should be emitted as deltas against the revision
660 emitted just prior. The initial revision should be a delta against
662 emitted just prior. The initial revision should be a delta against
661 its 1st parent.
663 its 1st parent.
662 """
664 """
663
665
664 class ifilemutation(interfaceutil.Interface):
666 class ifilemutation(interfaceutil.Interface):
665 """Storage interface for mutation events of a tracked file."""
667 """Storage interface for mutation events of a tracked file."""
666
668
667 def add(filedata, meta, transaction, linkrev, p1, p2):
669 def add(filedata, meta, transaction, linkrev, p1, p2):
668 """Add a new revision to the store.
670 """Add a new revision to the store.
669
671
670 Takes file data, dictionary of metadata, a transaction, linkrev,
672 Takes file data, dictionary of metadata, a transaction, linkrev,
671 and parent nodes.
673 and parent nodes.
672
674
673 Returns the node that was added.
675 Returns the node that was added.
674
676
675 May no-op if a revision matching the supplied data is already stored.
677 May no-op if a revision matching the supplied data is already stored.
676 """
678 """
677
679
678 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
680 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
679 flags=0, cachedelta=None):
681 flags=0, cachedelta=None):
680 """Add a new revision to the store.
682 """Add a new revision to the store.
681
683
682 This is similar to ``add()`` except it operates at a lower level.
684 This is similar to ``add()`` except it operates at a lower level.
683
685
684 The data passed in already contains a metadata header, if any.
686 The data passed in already contains a metadata header, if any.
685
687
686 ``node`` and ``flags`` can be used to define the expected node and
688 ``node`` and ``flags`` can be used to define the expected node and
687 the flags to use with storage. ``flags`` is a bitwise value composed
689 the flags to use with storage. ``flags`` is a bitwise value composed
688 of the various ``REVISION_FLAG_*`` constants.
690 of the various ``REVISION_FLAG_*`` constants.
689
691
690 ``add()`` is usually called when adding files from e.g. the working
692 ``add()`` is usually called when adding files from e.g. the working
691 directory. ``addrevision()`` is often called by ``add()`` and for
693 directory. ``addrevision()`` is often called by ``add()`` and for
692 scenarios where revision data has already been computed, such as when
694 scenarios where revision data has already been computed, such as when
693 applying raw data from a peer repo.
695 applying raw data from a peer repo.
694 """
696 """
695
697
696 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
698 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
697 maybemissingparents=False):
699 maybemissingparents=False):
698 """Process a series of deltas for storage.
700 """Process a series of deltas for storage.
699
701
700 ``deltas`` is an iterable of 7-tuples of
702 ``deltas`` is an iterable of 7-tuples of
701 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
703 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
702 to add.
704 to add.
703
705
704 The ``delta`` field contains ``mpatch`` data to apply to a base
706 The ``delta`` field contains ``mpatch`` data to apply to a base
705 revision, identified by ``deltabase``. The base node can be
707 revision, identified by ``deltabase``. The base node can be
706 ``nullid``, in which case the header from the delta can be ignored
708 ``nullid``, in which case the header from the delta can be ignored
707 and the delta used as the fulltext.
709 and the delta used as the fulltext.
708
710
709 ``addrevisioncb`` should be called for each node as it is committed.
711 ``addrevisioncb`` should be called for each node as it is committed.
710
712
711 ``maybemissingparents`` is a bool indicating whether the incoming
713 ``maybemissingparents`` is a bool indicating whether the incoming
712 data may reference parents/ancestor revisions that aren't present.
714 data may reference parents/ancestor revisions that aren't present.
713 This flag is set when receiving data into a "shallow" store that
715 This flag is set when receiving data into a "shallow" store that
714 doesn't hold all history.
716 doesn't hold all history.
715
717
716 Returns a list of nodes that were processed. A node will be in the list
718 Returns a list of nodes that were processed. A node will be in the list
717 even if it existed in the store previously.
719 even if it existed in the store previously.
718 """
720 """
719
721
720 def censorrevision(tr, node, tombstone=b''):
722 def censorrevision(tr, node, tombstone=b''):
721 """Remove the content of a single revision.
723 """Remove the content of a single revision.
722
724
723 The specified ``node`` will have its content purged from storage.
725 The specified ``node`` will have its content purged from storage.
724 Future attempts to access the revision data for this node will
726 Future attempts to access the revision data for this node will
725 result in failure.
727 result in failure.
726
728
727 A ``tombstone`` message can optionally be stored. This message may be
729 A ``tombstone`` message can optionally be stored. This message may be
728 displayed to users when they attempt to access the missing revision
730 displayed to users when they attempt to access the missing revision
729 data.
731 data.
730
732
731 Storage backends may have stored deltas against the previous content
733 Storage backends may have stored deltas against the previous content
732 in this revision. As part of censoring a revision, these storage
734 in this revision. As part of censoring a revision, these storage
733 backends are expected to rewrite any internally stored deltas such
735 backends are expected to rewrite any internally stored deltas such
734 that they no longer reference the deleted content.
736 that they no longer reference the deleted content.
735 """
737 """
736
738
737 def getstrippoint(minlink):
739 def getstrippoint(minlink):
738 """Find the minimum revision that must be stripped to strip a linkrev.
740 """Find the minimum revision that must be stripped to strip a linkrev.
739
741
740 Returns a 2-tuple containing the minimum revision number and a set
742 Returns a 2-tuple containing the minimum revision number and a set
741 of all revisions numbers that would be broken by this strip.
743 of all revisions numbers that would be broken by this strip.
742
744
743 TODO this is highly revlog centric and should be abstracted into
745 TODO this is highly revlog centric and should be abstracted into
744 a higher-level deletion API. ``repair.strip()`` relies on this.
746 a higher-level deletion API. ``repair.strip()`` relies on this.
745 """
747 """
746
748
747 def strip(minlink, transaction):
749 def strip(minlink, transaction):
748 """Remove storage of items starting at a linkrev.
750 """Remove storage of items starting at a linkrev.
749
751
750 This uses ``getstrippoint()`` to determine the first node to remove.
752 This uses ``getstrippoint()`` to determine the first node to remove.
751 Then it effectively truncates storage for all revisions after that.
753 Then it effectively truncates storage for all revisions after that.
752
754
753 TODO this is highly revlog centric and should be abstracted into a
755 TODO this is highly revlog centric and should be abstracted into a
754 higher-level deletion API.
756 higher-level deletion API.
755 """
757 """
756
758
757 class ifilestorage(ifileindex, ifiledata, ifilemutation):
759 class ifilestorage(ifileindex, ifiledata, ifilemutation):
758 """Complete storage interface for a single tracked file."""
760 """Complete storage interface for a single tracked file."""
759
761
760 def files():
762 def files():
761 """Obtain paths that are backing storage for this file.
763 """Obtain paths that are backing storage for this file.
762
764
763 TODO this is used heavily by verify code and there should probably
765 TODO this is used heavily by verify code and there should probably
764 be a better API for that.
766 be a better API for that.
765 """
767 """
766
768
767 def storageinfo(exclusivefiles=False, sharedfiles=False,
769 def storageinfo(exclusivefiles=False, sharedfiles=False,
768 revisionscount=False, trackedsize=False,
770 revisionscount=False, trackedsize=False,
769 storedsize=False):
771 storedsize=False):
770 """Obtain information about storage for this file's data.
772 """Obtain information about storage for this file's data.
771
773
772 Returns a dict describing storage for this tracked path. The keys
774 Returns a dict describing storage for this tracked path. The keys
773 in the dict map to arguments of the same. The arguments are bools
775 in the dict map to arguments of the same. The arguments are bools
774 indicating whether to calculate and obtain that data.
776 indicating whether to calculate and obtain that data.
775
777
776 exclusivefiles
778 exclusivefiles
777 Iterable of (vfs, path) describing files that are exclusively
779 Iterable of (vfs, path) describing files that are exclusively
778 used to back storage for this tracked path.
780 used to back storage for this tracked path.
779
781
780 sharedfiles
782 sharedfiles
781 Iterable of (vfs, path) describing files that are used to back
783 Iterable of (vfs, path) describing files that are used to back
782 storage for this tracked path. Those files may also provide storage
784 storage for this tracked path. Those files may also provide storage
783 for other stored entities.
785 for other stored entities.
784
786
785 revisionscount
787 revisionscount
786 Number of revisions available for retrieval.
788 Number of revisions available for retrieval.
787
789
788 trackedsize
790 trackedsize
789 Total size in bytes of all tracked revisions. This is a sum of the
791 Total size in bytes of all tracked revisions. This is a sum of the
790 length of the fulltext of all revisions.
792 length of the fulltext of all revisions.
791
793
792 storedsize
794 storedsize
793 Total size in bytes used to store data for all tracked revisions.
795 Total size in bytes used to store data for all tracked revisions.
794 This is commonly less than ``trackedsize`` due to internal usage
796 This is commonly less than ``trackedsize`` due to internal usage
795 of deltas rather than fulltext revisions.
797 of deltas rather than fulltext revisions.
796
798
797 Not all storage backends may support all queries are have a reasonable
799 Not all storage backends may support all queries are have a reasonable
798 value to use. In that case, the value should be set to ``None`` and
800 value to use. In that case, the value should be set to ``None`` and
799 callers are expected to handle this special value.
801 callers are expected to handle this special value.
800 """
802 """
801
803
802 def verifyintegrity(state):
804 def verifyintegrity(state):
803 """Verifies the integrity of file storage.
805 """Verifies the integrity of file storage.
804
806
805 ``state`` is a dict holding state of the verifier process. It can be
807 ``state`` is a dict holding state of the verifier process. It can be
806 used to communicate data between invocations of multiple storage
808 used to communicate data between invocations of multiple storage
807 primitives.
809 primitives.
808
810
809 If individual revisions cannot have their revision content resolved,
811 If individual revisions cannot have their revision content resolved,
810 the method is expected to set the ``skipread`` key to a set of nodes
812 the method is expected to set the ``skipread`` key to a set of nodes
811 that encountered problems.
813 that encountered problems.
812
814
813 The method yields objects conforming to the ``iverifyproblem``
815 The method yields objects conforming to the ``iverifyproblem``
814 interface.
816 interface.
815 """
817 """
816
818
817 class idirs(interfaceutil.Interface):
819 class idirs(interfaceutil.Interface):
818 """Interface representing a collection of directories from paths.
820 """Interface representing a collection of directories from paths.
819
821
820 This interface is essentially a derived data structure representing
822 This interface is essentially a derived data structure representing
821 directories from a collection of paths.
823 directories from a collection of paths.
822 """
824 """
823
825
824 def addpath(path):
826 def addpath(path):
825 """Add a path to the collection.
827 """Add a path to the collection.
826
828
827 All directories in the path will be added to the collection.
829 All directories in the path will be added to the collection.
828 """
830 """
829
831
830 def delpath(path):
832 def delpath(path):
831 """Remove a path from the collection.
833 """Remove a path from the collection.
832
834
833 If the removal was the last path in a particular directory, the
835 If the removal was the last path in a particular directory, the
834 directory is removed from the collection.
836 directory is removed from the collection.
835 """
837 """
836
838
837 def __iter__():
839 def __iter__():
838 """Iterate over the directories in this collection of paths."""
840 """Iterate over the directories in this collection of paths."""
839
841
840 def __contains__(path):
842 def __contains__(path):
841 """Whether a specific directory is in this collection."""
843 """Whether a specific directory is in this collection."""
842
844
843 class imanifestdict(interfaceutil.Interface):
845 class imanifestdict(interfaceutil.Interface):
844 """Interface representing a manifest data structure.
846 """Interface representing a manifest data structure.
845
847
846 A manifest is effectively a dict mapping paths to entries. Each entry
848 A manifest is effectively a dict mapping paths to entries. Each entry
847 consists of a binary node and extra flags affecting that entry.
849 consists of a binary node and extra flags affecting that entry.
848 """
850 """
849
851
850 def __getitem__(path):
852 def __getitem__(path):
851 """Returns the binary node value for a path in the manifest.
853 """Returns the binary node value for a path in the manifest.
852
854
853 Raises ``KeyError`` if the path does not exist in the manifest.
855 Raises ``KeyError`` if the path does not exist in the manifest.
854
856
855 Equivalent to ``self.find(path)[0]``.
857 Equivalent to ``self.find(path)[0]``.
856 """
858 """
857
859
858 def find(path):
860 def find(path):
859 """Returns the entry for a path in the manifest.
861 """Returns the entry for a path in the manifest.
860
862
861 Returns a 2-tuple of (node, flags).
863 Returns a 2-tuple of (node, flags).
862
864
863 Raises ``KeyError`` if the path does not exist in the manifest.
865 Raises ``KeyError`` if the path does not exist in the manifest.
864 """
866 """
865
867
866 def __len__():
868 def __len__():
867 """Return the number of entries in the manifest."""
869 """Return the number of entries in the manifest."""
868
870
869 def __nonzero__():
871 def __nonzero__():
870 """Returns True if the manifest has entries, False otherwise."""
872 """Returns True if the manifest has entries, False otherwise."""
871
873
872 __bool__ = __nonzero__
874 __bool__ = __nonzero__
873
875
874 def __setitem__(path, node):
876 def __setitem__(path, node):
875 """Define the node value for a path in the manifest.
877 """Define the node value for a path in the manifest.
876
878
877 If the path is already in the manifest, its flags will be copied to
879 If the path is already in the manifest, its flags will be copied to
878 the new entry.
880 the new entry.
879 """
881 """
880
882
881 def __contains__(path):
883 def __contains__(path):
882 """Whether a path exists in the manifest."""
884 """Whether a path exists in the manifest."""
883
885
884 def __delitem__(path):
886 def __delitem__(path):
885 """Remove a path from the manifest.
887 """Remove a path from the manifest.
886
888
887 Raises ``KeyError`` if the path is not in the manifest.
889 Raises ``KeyError`` if the path is not in the manifest.
888 """
890 """
889
891
890 def __iter__():
892 def __iter__():
891 """Iterate over paths in the manifest."""
893 """Iterate over paths in the manifest."""
892
894
893 def iterkeys():
895 def iterkeys():
894 """Iterate over paths in the manifest."""
896 """Iterate over paths in the manifest."""
895
897
896 def keys():
898 def keys():
897 """Obtain a list of paths in the manifest."""
899 """Obtain a list of paths in the manifest."""
898
900
899 def filesnotin(other, match=None):
901 def filesnotin(other, match=None):
900 """Obtain the set of paths in this manifest but not in another.
902 """Obtain the set of paths in this manifest but not in another.
901
903
902 ``match`` is an optional matcher function to be applied to both
904 ``match`` is an optional matcher function to be applied to both
903 manifests.
905 manifests.
904
906
905 Returns a set of paths.
907 Returns a set of paths.
906 """
908 """
907
909
908 def dirs():
910 def dirs():
909 """Returns an object implementing the ``idirs`` interface."""
911 """Returns an object implementing the ``idirs`` interface."""
910
912
911 def hasdir(dir):
913 def hasdir(dir):
912 """Returns a bool indicating if a directory is in this manifest."""
914 """Returns a bool indicating if a directory is in this manifest."""
913
915
914 def matches(match):
916 def matches(match):
915 """Generate a new manifest filtered through a matcher.
917 """Generate a new manifest filtered through a matcher.
916
918
917 Returns an object conforming to the ``imanifestdict`` interface.
919 Returns an object conforming to the ``imanifestdict`` interface.
918 """
920 """
919
921
920 def walk(match):
922 def walk(match):
921 """Generator of paths in manifest satisfying a matcher.
923 """Generator of paths in manifest satisfying a matcher.
922
924
923 This is equivalent to ``self.matches(match).iterkeys()`` except a new
925 This is equivalent to ``self.matches(match).iterkeys()`` except a new
924 manifest object is not created.
926 manifest object is not created.
925
927
926 If the matcher has explicit files listed and they don't exist in
928 If the matcher has explicit files listed and they don't exist in
927 the manifest, ``match.bad()`` is called for each missing file.
929 the manifest, ``match.bad()`` is called for each missing file.
928 """
930 """
929
931
930 def diff(other, match=None, clean=False):
932 def diff(other, match=None, clean=False):
931 """Find differences between this manifest and another.
933 """Find differences between this manifest and another.
932
934
933 This manifest is compared to ``other``.
935 This manifest is compared to ``other``.
934
936
935 If ``match`` is provided, the two manifests are filtered against this
937 If ``match`` is provided, the two manifests are filtered against this
936 matcher and only entries satisfying the matcher are compared.
938 matcher and only entries satisfying the matcher are compared.
937
939
938 If ``clean`` is True, unchanged files are included in the returned
940 If ``clean`` is True, unchanged files are included in the returned
939 object.
941 object.
940
942
941 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
943 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
942 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
944 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
943 represents the node and flags for this manifest and ``(node2, flag2)``
945 represents the node and flags for this manifest and ``(node2, flag2)``
944 are the same for the other manifest.
946 are the same for the other manifest.
945 """
947 """
946
948
947 def setflag(path, flag):
949 def setflag(path, flag):
948 """Set the flag value for a given path.
950 """Set the flag value for a given path.
949
951
950 Raises ``KeyError`` if the path is not already in the manifest.
952 Raises ``KeyError`` if the path is not already in the manifest.
951 """
953 """
952
954
953 def get(path, default=None):
955 def get(path, default=None):
954 """Obtain the node value for a path or a default value if missing."""
956 """Obtain the node value for a path or a default value if missing."""
955
957
956 def flags(path, default=''):
958 def flags(path, default=''):
957 """Return the flags value for a path or a default value if missing."""
959 """Return the flags value for a path or a default value if missing."""
958
960
959 def copy():
961 def copy():
960 """Return a copy of this manifest."""
962 """Return a copy of this manifest."""
961
963
962 def items():
964 def items():
963 """Returns an iterable of (path, node) for items in this manifest."""
965 """Returns an iterable of (path, node) for items in this manifest."""
964
966
965 def iteritems():
967 def iteritems():
966 """Identical to items()."""
968 """Identical to items()."""
967
969
968 def iterentries():
970 def iterentries():
969 """Returns an iterable of (path, node, flags) for this manifest.
971 """Returns an iterable of (path, node, flags) for this manifest.
970
972
971 Similar to ``iteritems()`` except items are a 3-tuple and include
973 Similar to ``iteritems()`` except items are a 3-tuple and include
972 flags.
974 flags.
973 """
975 """
974
976
975 def text():
977 def text():
976 """Obtain the raw data representation for this manifest.
978 """Obtain the raw data representation for this manifest.
977
979
978 Result is used to create a manifest revision.
980 Result is used to create a manifest revision.
979 """
981 """
980
982
981 def fastdelta(base, changes):
983 def fastdelta(base, changes):
982 """Obtain a delta between this manifest and another given changes.
984 """Obtain a delta between this manifest and another given changes.
983
985
984 ``base`` in the raw data representation for another manifest.
986 ``base`` in the raw data representation for another manifest.
985
987
986 ``changes`` is an iterable of ``(path, to_delete)``.
988 ``changes`` is an iterable of ``(path, to_delete)``.
987
989
988 Returns a 2-tuple containing ``bytearray(self.text())`` and the
990 Returns a 2-tuple containing ``bytearray(self.text())`` and the
989 delta between ``base`` and this manifest.
991 delta between ``base`` and this manifest.
990 """
992 """
991
993
992 class imanifestrevisionbase(interfaceutil.Interface):
994 class imanifestrevisionbase(interfaceutil.Interface):
993 """Base interface representing a single revision of a manifest.
995 """Base interface representing a single revision of a manifest.
994
996
995 Should not be used as a primary interface: should always be inherited
997 Should not be used as a primary interface: should always be inherited
996 as part of a larger interface.
998 as part of a larger interface.
997 """
999 """
998
1000
999 def new():
1001 def new():
1000 """Obtain a new manifest instance.
1002 """Obtain a new manifest instance.
1001
1003
1002 Returns an object conforming to the ``imanifestrevisionwritable``
1004 Returns an object conforming to the ``imanifestrevisionwritable``
1003 interface. The instance will be associated with the same
1005 interface. The instance will be associated with the same
1004 ``imanifestlog`` collection as this instance.
1006 ``imanifestlog`` collection as this instance.
1005 """
1007 """
1006
1008
1007 def copy():
1009 def copy():
1008 """Obtain a copy of this manifest instance.
1010 """Obtain a copy of this manifest instance.
1009
1011
1010 Returns an object conforming to the ``imanifestrevisionwritable``
1012 Returns an object conforming to the ``imanifestrevisionwritable``
1011 interface. The instance will be associated with the same
1013 interface. The instance will be associated with the same
1012 ``imanifestlog`` collection as this instance.
1014 ``imanifestlog`` collection as this instance.
1013 """
1015 """
1014
1016
1015 def read():
1017 def read():
1016 """Obtain the parsed manifest data structure.
1018 """Obtain the parsed manifest data structure.
1017
1019
1018 The returned object conforms to the ``imanifestdict`` interface.
1020 The returned object conforms to the ``imanifestdict`` interface.
1019 """
1021 """
1020
1022
1021 class imanifestrevisionstored(imanifestrevisionbase):
1023 class imanifestrevisionstored(imanifestrevisionbase):
1022 """Interface representing a manifest revision committed to storage."""
1024 """Interface representing a manifest revision committed to storage."""
1023
1025
1024 def node():
1026 def node():
1025 """The binary node for this manifest."""
1027 """The binary node for this manifest."""
1026
1028
1027 parents = interfaceutil.Attribute(
1029 parents = interfaceutil.Attribute(
1028 """List of binary nodes that are parents for this manifest revision."""
1030 """List of binary nodes that are parents for this manifest revision."""
1029 )
1031 )
1030
1032
1031 def readdelta(shallow=False):
1033 def readdelta(shallow=False):
1032 """Obtain the manifest data structure representing changes from parent.
1034 """Obtain the manifest data structure representing changes from parent.
1033
1035
1034 This manifest is compared to its 1st parent. A new manifest representing
1036 This manifest is compared to its 1st parent. A new manifest representing
1035 those differences is constructed.
1037 those differences is constructed.
1036
1038
1037 The returned object conforms to the ``imanifestdict`` interface.
1039 The returned object conforms to the ``imanifestdict`` interface.
1038 """
1040 """
1039
1041
1040 def readfast(shallow=False):
1042 def readfast(shallow=False):
1041 """Calls either ``read()`` or ``readdelta()``.
1043 """Calls either ``read()`` or ``readdelta()``.
1042
1044
1043 The faster of the two options is called.
1045 The faster of the two options is called.
1044 """
1046 """
1045
1047
1046 def find(key):
1048 def find(key):
1047 """Calls self.read().find(key)``.
1049 """Calls self.read().find(key)``.
1048
1050
1049 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1051 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1050 """
1052 """
1051
1053
1052 class imanifestrevisionwritable(imanifestrevisionbase):
1054 class imanifestrevisionwritable(imanifestrevisionbase):
1053 """Interface representing a manifest revision that can be committed."""
1055 """Interface representing a manifest revision that can be committed."""
1054
1056
1055 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1057 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1056 """Add this revision to storage.
1058 """Add this revision to storage.
1057
1059
1058 Takes a transaction object, the changeset revision number it will
1060 Takes a transaction object, the changeset revision number it will
1059 be associated with, its parent nodes, and lists of added and
1061 be associated with, its parent nodes, and lists of added and
1060 removed paths.
1062 removed paths.
1061
1063
1062 If match is provided, storage can choose not to inspect or write out
1064 If match is provided, storage can choose not to inspect or write out
1063 items that do not match. Storage is still required to be able to provide
1065 items that do not match. Storage is still required to be able to provide
1064 the full manifest in the future for any directories written (these
1066 the full manifest in the future for any directories written (these
1065 manifests should not be "narrowed on disk").
1067 manifests should not be "narrowed on disk").
1066
1068
1067 Returns the binary node of the created revision.
1069 Returns the binary node of the created revision.
1068 """
1070 """
1069
1071
1070 class imanifeststorage(interfaceutil.Interface):
1072 class imanifeststorage(interfaceutil.Interface):
1071 """Storage interface for manifest data."""
1073 """Storage interface for manifest data."""
1072
1074
1073 tree = interfaceutil.Attribute(
1075 tree = interfaceutil.Attribute(
1074 """The path to the directory this manifest tracks.
1076 """The path to the directory this manifest tracks.
1075
1077
1076 The empty bytestring represents the root manifest.
1078 The empty bytestring represents the root manifest.
1077 """)
1079 """)
1078
1080
1079 index = interfaceutil.Attribute(
1081 index = interfaceutil.Attribute(
1080 """An ``ifilerevisionssequence`` instance.""")
1082 """An ``ifilerevisionssequence`` instance.""")
1081
1083
1082 indexfile = interfaceutil.Attribute(
1084 indexfile = interfaceutil.Attribute(
1083 """Path of revlog index file.
1085 """Path of revlog index file.
1084
1086
1085 TODO this is revlog specific and should not be exposed.
1087 TODO this is revlog specific and should not be exposed.
1086 """)
1088 """)
1087
1089
1088 opener = interfaceutil.Attribute(
1090 opener = interfaceutil.Attribute(
1089 """VFS opener to use to access underlying files used for storage.
1091 """VFS opener to use to access underlying files used for storage.
1090
1092
1091 TODO this is revlog specific and should not be exposed.
1093 TODO this is revlog specific and should not be exposed.
1092 """)
1094 """)
1093
1095
1094 version = interfaceutil.Attribute(
1096 version = interfaceutil.Attribute(
1095 """Revlog version number.
1097 """Revlog version number.
1096
1098
1097 TODO this is revlog specific and should not be exposed.
1099 TODO this is revlog specific and should not be exposed.
1098 """)
1100 """)
1099
1101
1100 _generaldelta = interfaceutil.Attribute(
1102 _generaldelta = interfaceutil.Attribute(
1101 """Whether generaldelta storage is being used.
1103 """Whether generaldelta storage is being used.
1102
1104
1103 TODO this is revlog specific and should not be exposed.
1105 TODO this is revlog specific and should not be exposed.
1104 """)
1106 """)
1105
1107
1106 fulltextcache = interfaceutil.Attribute(
1108 fulltextcache = interfaceutil.Attribute(
1107 """Dict with cache of fulltexts.
1109 """Dict with cache of fulltexts.
1108
1110
1109 TODO this doesn't feel appropriate for the storage interface.
1111 TODO this doesn't feel appropriate for the storage interface.
1110 """)
1112 """)
1111
1113
1112 def __len__():
1114 def __len__():
1113 """Obtain the number of revisions stored for this manifest."""
1115 """Obtain the number of revisions stored for this manifest."""
1114
1116
1115 def __iter__():
1117 def __iter__():
1116 """Iterate over revision numbers for this manifest."""
1118 """Iterate over revision numbers for this manifest."""
1117
1119
1118 def rev(node):
1120 def rev(node):
1119 """Obtain the revision number given a binary node.
1121 """Obtain the revision number given a binary node.
1120
1122
1121 Raises ``error.LookupError`` if the node is not known.
1123 Raises ``error.LookupError`` if the node is not known.
1122 """
1124 """
1123
1125
1124 def node(rev):
1126 def node(rev):
1125 """Obtain the node value given a revision number.
1127 """Obtain the node value given a revision number.
1126
1128
1127 Raises ``error.LookupError`` if the revision is not known.
1129 Raises ``error.LookupError`` if the revision is not known.
1128 """
1130 """
1129
1131
1130 def lookup(value):
1132 def lookup(value):
1131 """Attempt to resolve a value to a node.
1133 """Attempt to resolve a value to a node.
1132
1134
1133 Value can be a binary node, hex node, revision number, or a bytes
1135 Value can be a binary node, hex node, revision number, or a bytes
1134 that can be converted to an integer.
1136 that can be converted to an integer.
1135
1137
1136 Raises ``error.LookupError`` if a ndoe could not be resolved.
1138 Raises ``error.LookupError`` if a ndoe could not be resolved.
1137 """
1139 """
1138
1140
1139 def parents(node):
1141 def parents(node):
1140 """Returns a 2-tuple of parent nodes for a node.
1142 """Returns a 2-tuple of parent nodes for a node.
1141
1143
1142 Values will be ``nullid`` if the parent is empty.
1144 Values will be ``nullid`` if the parent is empty.
1143 """
1145 """
1144
1146
1145 def parentrevs(rev):
1147 def parentrevs(rev):
1146 """Like parents() but operates on revision numbers."""
1148 """Like parents() but operates on revision numbers."""
1147
1149
1148 def linkrev(rev):
1150 def linkrev(rev):
1149 """Obtain the changeset revision number a revision is linked to."""
1151 """Obtain the changeset revision number a revision is linked to."""
1150
1152
1151 def revision(node, _df=None, raw=False):
1153 def revision(node, _df=None, raw=False):
1152 """Obtain fulltext data for a node."""
1154 """Obtain fulltext data for a node."""
1153
1155
1154 def revdiff(rev1, rev2):
1156 def revdiff(rev1, rev2):
1155 """Obtain a delta between two revision numbers.
1157 """Obtain a delta between two revision numbers.
1156
1158
1157 The returned data is the result of ``bdiff.bdiff()`` on the raw
1159 The returned data is the result of ``bdiff.bdiff()`` on the raw
1158 revision data.
1160 revision data.
1159 """
1161 """
1160
1162
1161 def cmp(node, fulltext):
1163 def cmp(node, fulltext):
1162 """Compare fulltext to another revision.
1164 """Compare fulltext to another revision.
1163
1165
1164 Returns True if the fulltext is different from what is stored.
1166 Returns True if the fulltext is different from what is stored.
1165 """
1167 """
1166
1168
1167 def emitrevisions(nodes,
1169 def emitrevisions(nodes,
1168 nodesorder=None,
1170 nodesorder=None,
1169 revisiondata=False,
1171 revisiondata=False,
1170 assumehaveparentrevisions=False):
1172 assumehaveparentrevisions=False):
1171 """Produce ``irevisiondelta`` describing revisions.
1173 """Produce ``irevisiondelta`` describing revisions.
1172
1174
1173 See the documentation for ``ifiledata`` for more.
1175 See the documentation for ``ifiledata`` for more.
1174 """
1176 """
1175
1177
1176 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1178 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1177 """Process a series of deltas for storage.
1179 """Process a series of deltas for storage.
1178
1180
1179 See the documentation in ``ifilemutation`` for more.
1181 See the documentation in ``ifilemutation`` for more.
1180 """
1182 """
1181
1183
1182 def rawsize(rev):
1184 def rawsize(rev):
1183 """Obtain the size of tracked data.
1185 """Obtain the size of tracked data.
1184
1186
1185 Is equivalent to ``len(m.revision(node, raw=True))``.
1187 Is equivalent to ``len(m.revision(node, raw=True))``.
1186
1188
1187 TODO this method is only used by upgrade code and may be removed.
1189 TODO this method is only used by upgrade code and may be removed.
1188 """
1190 """
1189
1191
1190 def getstrippoint(minlink):
1192 def getstrippoint(minlink):
1191 """Find minimum revision that must be stripped to strip a linkrev.
1193 """Find minimum revision that must be stripped to strip a linkrev.
1192
1194
1193 See the documentation in ``ifilemutation`` for more.
1195 See the documentation in ``ifilemutation`` for more.
1194 """
1196 """
1195
1197
1196 def strip(minlink, transaction):
1198 def strip(minlink, transaction):
1197 """Remove storage of items starting at a linkrev.
1199 """Remove storage of items starting at a linkrev.
1198
1200
1199 See the documentation in ``ifilemutation`` for more.
1201 See the documentation in ``ifilemutation`` for more.
1200 """
1202 """
1201
1203
1202 def checksize():
1204 def checksize():
1203 """Obtain the expected sizes of backing files.
1205 """Obtain the expected sizes of backing files.
1204
1206
1205 TODO this is used by verify and it should not be part of the interface.
1207 TODO this is used by verify and it should not be part of the interface.
1206 """
1208 """
1207
1209
1208 def files():
1210 def files():
1209 """Obtain paths that are backing storage for this manifest.
1211 """Obtain paths that are backing storage for this manifest.
1210
1212
1211 TODO this is used by verify and there should probably be a better API
1213 TODO this is used by verify and there should probably be a better API
1212 for this functionality.
1214 for this functionality.
1213 """
1215 """
1214
1216
1215 def deltaparent(rev):
1217 def deltaparent(rev):
1216 """Obtain the revision that a revision is delta'd against.
1218 """Obtain the revision that a revision is delta'd against.
1217
1219
1218 TODO delta encoding is an implementation detail of storage and should
1220 TODO delta encoding is an implementation detail of storage and should
1219 not be exposed to the storage interface.
1221 not be exposed to the storage interface.
1220 """
1222 """
1221
1223
1222 def clone(tr, dest, **kwargs):
1224 def clone(tr, dest, **kwargs):
1223 """Clone this instance to another."""
1225 """Clone this instance to another."""
1224
1226
1225 def clearcaches(clear_persisted_data=False):
1227 def clearcaches(clear_persisted_data=False):
1226 """Clear any caches associated with this instance."""
1228 """Clear any caches associated with this instance."""
1227
1229
1228 def dirlog(d):
1230 def dirlog(d):
1229 """Obtain a manifest storage instance for a tree."""
1231 """Obtain a manifest storage instance for a tree."""
1230
1232
1231 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1233 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1232 match=None):
1234 match=None):
1233 """Add a revision to storage.
1235 """Add a revision to storage.
1234
1236
1235 ``m`` is an object conforming to ``imanifestdict``.
1237 ``m`` is an object conforming to ``imanifestdict``.
1236
1238
1237 ``link`` is the linkrev revision number.
1239 ``link`` is the linkrev revision number.
1238
1240
1239 ``p1`` and ``p2`` are the parent revision numbers.
1241 ``p1`` and ``p2`` are the parent revision numbers.
1240
1242
1241 ``added`` and ``removed`` are iterables of added and removed paths,
1243 ``added`` and ``removed`` are iterables of added and removed paths,
1242 respectively.
1244 respectively.
1243
1245
1244 ``readtree`` is a function that can be used to read the child tree(s)
1246 ``readtree`` is a function that can be used to read the child tree(s)
1245 when recursively writing the full tree structure when using
1247 when recursively writing the full tree structure when using
1246 treemanifets.
1248 treemanifets.
1247
1249
1248 ``match`` is a matcher that can be used to hint to storage that not all
1250 ``match`` is a matcher that can be used to hint to storage that not all
1249 paths must be inspected; this is an optimization and can be safely
1251 paths must be inspected; this is an optimization and can be safely
1250 ignored. Note that the storage must still be able to reproduce a full
1252 ignored. Note that the storage must still be able to reproduce a full
1251 manifest including files that did not match.
1253 manifest including files that did not match.
1252 """
1254 """
1253
1255
1254 def storageinfo(exclusivefiles=False, sharedfiles=False,
1256 def storageinfo(exclusivefiles=False, sharedfiles=False,
1255 revisionscount=False, trackedsize=False,
1257 revisionscount=False, trackedsize=False,
1256 storedsize=False):
1258 storedsize=False):
1257 """Obtain information about storage for this manifest's data.
1259 """Obtain information about storage for this manifest's data.
1258
1260
1259 See ``ifilestorage.storageinfo()`` for a description of this method.
1261 See ``ifilestorage.storageinfo()`` for a description of this method.
1260 This one behaves the same way, except for manifest data.
1262 This one behaves the same way, except for manifest data.
1261 """
1263 """
1262
1264
1263 class imanifestlog(interfaceutil.Interface):
1265 class imanifestlog(interfaceutil.Interface):
1264 """Interface representing a collection of manifest snapshots.
1266 """Interface representing a collection of manifest snapshots.
1265
1267
1266 Represents the root manifest in a repository.
1268 Represents the root manifest in a repository.
1267
1269
1268 Also serves as a means to access nested tree manifests and to cache
1270 Also serves as a means to access nested tree manifests and to cache
1269 tree manifests.
1271 tree manifests.
1270 """
1272 """
1271
1273
1272 def __getitem__(node):
1274 def __getitem__(node):
1273 """Obtain a manifest instance for a given binary node.
1275 """Obtain a manifest instance for a given binary node.
1274
1276
1275 Equivalent to calling ``self.get('', node)``.
1277 Equivalent to calling ``self.get('', node)``.
1276
1278
1277 The returned object conforms to the ``imanifestrevisionstored``
1279 The returned object conforms to the ``imanifestrevisionstored``
1278 interface.
1280 interface.
1279 """
1281 """
1280
1282
1281 def get(tree, node, verify=True):
1283 def get(tree, node, verify=True):
1282 """Retrieve the manifest instance for a given directory and binary node.
1284 """Retrieve the manifest instance for a given directory and binary node.
1283
1285
1284 ``node`` always refers to the node of the root manifest (which will be
1286 ``node`` always refers to the node of the root manifest (which will be
1285 the only manifest if flat manifests are being used).
1287 the only manifest if flat manifests are being used).
1286
1288
1287 If ``tree`` is the empty string, the root manifest is returned.
1289 If ``tree`` is the empty string, the root manifest is returned.
1288 Otherwise the manifest for the specified directory will be returned
1290 Otherwise the manifest for the specified directory will be returned
1289 (requires tree manifests).
1291 (requires tree manifests).
1290
1292
1291 If ``verify`` is True, ``LookupError`` is raised if the node is not
1293 If ``verify`` is True, ``LookupError`` is raised if the node is not
1292 known.
1294 known.
1293
1295
1294 The returned object conforms to the ``imanifestrevisionstored``
1296 The returned object conforms to the ``imanifestrevisionstored``
1295 interface.
1297 interface.
1296 """
1298 """
1297
1299
1298 def getstorage(tree):
1300 def getstorage(tree):
1299 """Retrieve an interface to storage for a particular tree.
1301 """Retrieve an interface to storage for a particular tree.
1300
1302
1301 If ``tree`` is the empty bytestring, storage for the root manifest will
1303 If ``tree`` is the empty bytestring, storage for the root manifest will
1302 be returned. Otherwise storage for a tree manifest is returned.
1304 be returned. Otherwise storage for a tree manifest is returned.
1303
1305
1304 TODO formalize interface for returned object.
1306 TODO formalize interface for returned object.
1305 """
1307 """
1306
1308
1307 def clearcaches():
1309 def clearcaches():
1308 """Clear caches associated with this collection."""
1310 """Clear caches associated with this collection."""
1309
1311
1310 def rev(node):
1312 def rev(node):
1311 """Obtain the revision number for a binary node.
1313 """Obtain the revision number for a binary node.
1312
1314
1313 Raises ``error.LookupError`` if the node is not known.
1315 Raises ``error.LookupError`` if the node is not known.
1314 """
1316 """
1315
1317
1316 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1318 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1317 """Local repository sub-interface providing access to tracked file storage.
1319 """Local repository sub-interface providing access to tracked file storage.
1318
1320
1319 This interface defines how a repository accesses storage for a single
1321 This interface defines how a repository accesses storage for a single
1320 tracked file path.
1322 tracked file path.
1321 """
1323 """
1322
1324
1323 def file(f):
1325 def file(f):
1324 """Obtain a filelog for a tracked path.
1326 """Obtain a filelog for a tracked path.
1325
1327
1326 The returned type conforms to the ``ifilestorage`` interface.
1328 The returned type conforms to the ``ifilestorage`` interface.
1327 """
1329 """
1328
1330
1329 class ilocalrepositorymain(interfaceutil.Interface):
1331 class ilocalrepositorymain(interfaceutil.Interface):
1330 """Main interface for local repositories.
1332 """Main interface for local repositories.
1331
1333
1332 This currently captures the reality of things - not how things should be.
1334 This currently captures the reality of things - not how things should be.
1333 """
1335 """
1334
1336
1335 supportedformats = interfaceutil.Attribute(
1337 supportedformats = interfaceutil.Attribute(
1336 """Set of requirements that apply to stream clone.
1338 """Set of requirements that apply to stream clone.
1337
1339
1338 This is actually a class attribute and is shared among all instances.
1340 This is actually a class attribute and is shared among all instances.
1339 """)
1341 """)
1340
1342
1341 supported = interfaceutil.Attribute(
1343 supported = interfaceutil.Attribute(
1342 """Set of requirements that this repo is capable of opening.""")
1344 """Set of requirements that this repo is capable of opening.""")
1343
1345
1344 requirements = interfaceutil.Attribute(
1346 requirements = interfaceutil.Attribute(
1345 """Set of requirements this repo uses.""")
1347 """Set of requirements this repo uses.""")
1346
1348
1347 features = interfaceutil.Attribute(
1349 features = interfaceutil.Attribute(
1348 """Set of "features" this repository supports.
1350 """Set of "features" this repository supports.
1349
1351
1350 A "feature" is a loosely-defined term. It can refer to a feature
1352 A "feature" is a loosely-defined term. It can refer to a feature
1351 in the classical sense or can describe an implementation detail
1353 in the classical sense or can describe an implementation detail
1352 of the repository. For example, a ``readonly`` feature may denote
1354 of the repository. For example, a ``readonly`` feature may denote
1353 the repository as read-only. Or a ``revlogfilestore`` feature may
1355 the repository as read-only. Or a ``revlogfilestore`` feature may
1354 denote that the repository is using revlogs for file storage.
1356 denote that the repository is using revlogs for file storage.
1355
1357
1356 The intent of features is to provide a machine-queryable mechanism
1358 The intent of features is to provide a machine-queryable mechanism
1357 for repo consumers to test for various repository characteristics.
1359 for repo consumers to test for various repository characteristics.
1358
1360
1359 Features are similar to ``requirements``. The main difference is that
1361 Features are similar to ``requirements``. The main difference is that
1360 requirements are stored on-disk and represent requirements to open the
1362 requirements are stored on-disk and represent requirements to open the
1361 repository. Features are more run-time capabilities of the repository
1363 repository. Features are more run-time capabilities of the repository
1362 and more granular capabilities (which may be derived from requirements).
1364 and more granular capabilities (which may be derived from requirements).
1363 """)
1365 """)
1364
1366
1365 filtername = interfaceutil.Attribute(
1367 filtername = interfaceutil.Attribute(
1366 """Name of the repoview that is active on this repo.""")
1368 """Name of the repoview that is active on this repo.""")
1367
1369
1368 wvfs = interfaceutil.Attribute(
1370 wvfs = interfaceutil.Attribute(
1369 """VFS used to access the working directory.""")
1371 """VFS used to access the working directory.""")
1370
1372
1371 vfs = interfaceutil.Attribute(
1373 vfs = interfaceutil.Attribute(
1372 """VFS rooted at the .hg directory.
1374 """VFS rooted at the .hg directory.
1373
1375
1374 Used to access repository data not in the store.
1376 Used to access repository data not in the store.
1375 """)
1377 """)
1376
1378
1377 svfs = interfaceutil.Attribute(
1379 svfs = interfaceutil.Attribute(
1378 """VFS rooted at the store.
1380 """VFS rooted at the store.
1379
1381
1380 Used to access repository data in the store. Typically .hg/store.
1382 Used to access repository data in the store. Typically .hg/store.
1381 But can point elsewhere if the store is shared.
1383 But can point elsewhere if the store is shared.
1382 """)
1384 """)
1383
1385
1384 root = interfaceutil.Attribute(
1386 root = interfaceutil.Attribute(
1385 """Path to the root of the working directory.""")
1387 """Path to the root of the working directory.""")
1386
1388
1387 path = interfaceutil.Attribute(
1389 path = interfaceutil.Attribute(
1388 """Path to the .hg directory.""")
1390 """Path to the .hg directory.""")
1389
1391
1390 origroot = interfaceutil.Attribute(
1392 origroot = interfaceutil.Attribute(
1391 """The filesystem path that was used to construct the repo.""")
1393 """The filesystem path that was used to construct the repo.""")
1392
1394
1393 auditor = interfaceutil.Attribute(
1395 auditor = interfaceutil.Attribute(
1394 """A pathauditor for the working directory.
1396 """A pathauditor for the working directory.
1395
1397
1396 This checks if a path refers to a nested repository.
1398 This checks if a path refers to a nested repository.
1397
1399
1398 Operates on the filesystem.
1400 Operates on the filesystem.
1399 """)
1401 """)
1400
1402
1401 nofsauditor = interfaceutil.Attribute(
1403 nofsauditor = interfaceutil.Attribute(
1402 """A pathauditor for the working directory.
1404 """A pathauditor for the working directory.
1403
1405
1404 This is like ``auditor`` except it doesn't do filesystem checks.
1406 This is like ``auditor`` except it doesn't do filesystem checks.
1405 """)
1407 """)
1406
1408
1407 baseui = interfaceutil.Attribute(
1409 baseui = interfaceutil.Attribute(
1408 """Original ui instance passed into constructor.""")
1410 """Original ui instance passed into constructor.""")
1409
1411
1410 ui = interfaceutil.Attribute(
1412 ui = interfaceutil.Attribute(
1411 """Main ui instance for this instance.""")
1413 """Main ui instance for this instance.""")
1412
1414
1413 sharedpath = interfaceutil.Attribute(
1415 sharedpath = interfaceutil.Attribute(
1414 """Path to the .hg directory of the repo this repo was shared from.""")
1416 """Path to the .hg directory of the repo this repo was shared from.""")
1415
1417
1416 store = interfaceutil.Attribute(
1418 store = interfaceutil.Attribute(
1417 """A store instance.""")
1419 """A store instance.""")
1418
1420
1419 spath = interfaceutil.Attribute(
1421 spath = interfaceutil.Attribute(
1420 """Path to the store.""")
1422 """Path to the store.""")
1421
1423
1422 sjoin = interfaceutil.Attribute(
1424 sjoin = interfaceutil.Attribute(
1423 """Alias to self.store.join.""")
1425 """Alias to self.store.join.""")
1424
1426
1425 cachevfs = interfaceutil.Attribute(
1427 cachevfs = interfaceutil.Attribute(
1426 """A VFS used to access the cache directory.
1428 """A VFS used to access the cache directory.
1427
1429
1428 Typically .hg/cache.
1430 Typically .hg/cache.
1429 """)
1431 """)
1430
1432
1431 filteredrevcache = interfaceutil.Attribute(
1433 filteredrevcache = interfaceutil.Attribute(
1432 """Holds sets of revisions to be filtered.""")
1434 """Holds sets of revisions to be filtered.""")
1433
1435
1434 names = interfaceutil.Attribute(
1436 names = interfaceutil.Attribute(
1435 """A ``namespaces`` instance.""")
1437 """A ``namespaces`` instance.""")
1436
1438
1437 def close():
1439 def close():
1438 """Close the handle on this repository."""
1440 """Close the handle on this repository."""
1439
1441
1440 def peer():
1442 def peer():
1441 """Obtain an object conforming to the ``peer`` interface."""
1443 """Obtain an object conforming to the ``peer`` interface."""
1442
1444
1443 def unfiltered():
1445 def unfiltered():
1444 """Obtain an unfiltered/raw view of this repo."""
1446 """Obtain an unfiltered/raw view of this repo."""
1445
1447
1446 def filtered(name, visibilityexceptions=None):
1448 def filtered(name, visibilityexceptions=None):
1447 """Obtain a named view of this repository."""
1449 """Obtain a named view of this repository."""
1448
1450
1449 obsstore = interfaceutil.Attribute(
1451 obsstore = interfaceutil.Attribute(
1450 """A store of obsolescence data.""")
1452 """A store of obsolescence data.""")
1451
1453
1452 changelog = interfaceutil.Attribute(
1454 changelog = interfaceutil.Attribute(
1453 """A handle on the changelog revlog.""")
1455 """A handle on the changelog revlog.""")
1454
1456
1455 manifestlog = interfaceutil.Attribute(
1457 manifestlog = interfaceutil.Attribute(
1456 """An instance conforming to the ``imanifestlog`` interface.
1458 """An instance conforming to the ``imanifestlog`` interface.
1457
1459
1458 Provides access to manifests for the repository.
1460 Provides access to manifests for the repository.
1459 """)
1461 """)
1460
1462
1461 dirstate = interfaceutil.Attribute(
1463 dirstate = interfaceutil.Attribute(
1462 """Working directory state.""")
1464 """Working directory state.""")
1463
1465
1464 narrowpats = interfaceutil.Attribute(
1466 narrowpats = interfaceutil.Attribute(
1465 """Matcher patterns for this repository's narrowspec.""")
1467 """Matcher patterns for this repository's narrowspec.""")
1466
1468
1467 def narrowmatch():
1469 def narrowmatch():
1468 """Obtain a matcher for the narrowspec."""
1470 """Obtain a matcher for the narrowspec."""
1469
1471
1470 def setnarrowpats(newincludes, newexcludes):
1472 def setnarrowpats(newincludes, newexcludes):
1471 """Define the narrowspec for this repository."""
1473 """Define the narrowspec for this repository."""
1472
1474
1473 def __getitem__(changeid):
1475 def __getitem__(changeid):
1474 """Try to resolve a changectx."""
1476 """Try to resolve a changectx."""
1475
1477
1476 def __contains__(changeid):
1478 def __contains__(changeid):
1477 """Whether a changeset exists."""
1479 """Whether a changeset exists."""
1478
1480
1479 def __nonzero__():
1481 def __nonzero__():
1480 """Always returns True."""
1482 """Always returns True."""
1481 return True
1483 return True
1482
1484
1483 __bool__ = __nonzero__
1485 __bool__ = __nonzero__
1484
1486
1485 def __len__():
1487 def __len__():
1486 """Returns the number of changesets in the repo."""
1488 """Returns the number of changesets in the repo."""
1487
1489
1488 def __iter__():
1490 def __iter__():
1489 """Iterate over revisions in the changelog."""
1491 """Iterate over revisions in the changelog."""
1490
1492
1491 def revs(expr, *args):
1493 def revs(expr, *args):
1492 """Evaluate a revset.
1494 """Evaluate a revset.
1493
1495
1494 Emits revisions.
1496 Emits revisions.
1495 """
1497 """
1496
1498
1497 def set(expr, *args):
1499 def set(expr, *args):
1498 """Evaluate a revset.
1500 """Evaluate a revset.
1499
1501
1500 Emits changectx instances.
1502 Emits changectx instances.
1501 """
1503 """
1502
1504
1503 def anyrevs(specs, user=False, localalias=None):
1505 def anyrevs(specs, user=False, localalias=None):
1504 """Find revisions matching one of the given revsets."""
1506 """Find revisions matching one of the given revsets."""
1505
1507
1506 def url():
1508 def url():
1507 """Returns a string representing the location of this repo."""
1509 """Returns a string representing the location of this repo."""
1508
1510
1509 def hook(name, throw=False, **args):
1511 def hook(name, throw=False, **args):
1510 """Call a hook."""
1512 """Call a hook."""
1511
1513
1512 def tags():
1514 def tags():
1513 """Return a mapping of tag to node."""
1515 """Return a mapping of tag to node."""
1514
1516
1515 def tagtype(tagname):
1517 def tagtype(tagname):
1516 """Return the type of a given tag."""
1518 """Return the type of a given tag."""
1517
1519
1518 def tagslist():
1520 def tagslist():
1519 """Return a list of tags ordered by revision."""
1521 """Return a list of tags ordered by revision."""
1520
1522
1521 def nodetags(node):
1523 def nodetags(node):
1522 """Return the tags associated with a node."""
1524 """Return the tags associated with a node."""
1523
1525
1524 def nodebookmarks(node):
1526 def nodebookmarks(node):
1525 """Return the list of bookmarks pointing to the specified node."""
1527 """Return the list of bookmarks pointing to the specified node."""
1526
1528
1527 def branchmap():
1529 def branchmap():
1528 """Return a mapping of branch to heads in that branch."""
1530 """Return a mapping of branch to heads in that branch."""
1529
1531
1530 def revbranchcache():
1532 def revbranchcache():
1531 pass
1533 pass
1532
1534
1533 def branchtip(branchtip, ignoremissing=False):
1535 def branchtip(branchtip, ignoremissing=False):
1534 """Return the tip node for a given branch."""
1536 """Return the tip node for a given branch."""
1535
1537
1536 def lookup(key):
1538 def lookup(key):
1537 """Resolve the node for a revision."""
1539 """Resolve the node for a revision."""
1538
1540
1539 def lookupbranch(key):
1541 def lookupbranch(key):
1540 """Look up the branch name of the given revision or branch name."""
1542 """Look up the branch name of the given revision or branch name."""
1541
1543
1542 def known(nodes):
1544 def known(nodes):
1543 """Determine whether a series of nodes is known.
1545 """Determine whether a series of nodes is known.
1544
1546
1545 Returns a list of bools.
1547 Returns a list of bools.
1546 """
1548 """
1547
1549
1548 def local():
1550 def local():
1549 """Whether the repository is local."""
1551 """Whether the repository is local."""
1550 return True
1552 return True
1551
1553
1552 def publishing():
1554 def publishing():
1553 """Whether the repository is a publishing repository."""
1555 """Whether the repository is a publishing repository."""
1554
1556
1555 def cancopy():
1557 def cancopy():
1556 pass
1558 pass
1557
1559
1558 def shared():
1560 def shared():
1559 """The type of shared repository or None."""
1561 """The type of shared repository or None."""
1560
1562
1561 def wjoin(f, *insidef):
1563 def wjoin(f, *insidef):
1562 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1564 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1563
1565
1564 def setparents(p1, p2):
1566 def setparents(p1, p2):
1565 """Set the parent nodes of the working directory."""
1567 """Set the parent nodes of the working directory."""
1566
1568
1567 def filectx(path, changeid=None, fileid=None):
1569 def filectx(path, changeid=None, fileid=None):
1568 """Obtain a filectx for the given file revision."""
1570 """Obtain a filectx for the given file revision."""
1569
1571
1570 def getcwd():
1572 def getcwd():
1571 """Obtain the current working directory from the dirstate."""
1573 """Obtain the current working directory from the dirstate."""
1572
1574
1573 def pathto(f, cwd=None):
1575 def pathto(f, cwd=None):
1574 """Obtain the relative path to a file."""
1576 """Obtain the relative path to a file."""
1575
1577
1576 def adddatafilter(name, fltr):
1578 def adddatafilter(name, fltr):
1577 pass
1579 pass
1578
1580
1579 def wread(filename):
1581 def wread(filename):
1580 """Read a file from wvfs, using data filters."""
1582 """Read a file from wvfs, using data filters."""
1581
1583
1582 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1584 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1583 """Write data to a file in the wvfs, using data filters."""
1585 """Write data to a file in the wvfs, using data filters."""
1584
1586
1585 def wwritedata(filename, data):
1587 def wwritedata(filename, data):
1586 """Resolve data for writing to the wvfs, using data filters."""
1588 """Resolve data for writing to the wvfs, using data filters."""
1587
1589
1588 def currenttransaction():
1590 def currenttransaction():
1589 """Obtain the current transaction instance or None."""
1591 """Obtain the current transaction instance or None."""
1590
1592
1591 def transaction(desc, report=None):
1593 def transaction(desc, report=None):
1592 """Open a new transaction to write to the repository."""
1594 """Open a new transaction to write to the repository."""
1593
1595
1594 def undofiles():
1596 def undofiles():
1595 """Returns a list of (vfs, path) for files to undo transactions."""
1597 """Returns a list of (vfs, path) for files to undo transactions."""
1596
1598
1597 def recover():
1599 def recover():
1598 """Roll back an interrupted transaction."""
1600 """Roll back an interrupted transaction."""
1599
1601
1600 def rollback(dryrun=False, force=False):
1602 def rollback(dryrun=False, force=False):
1601 """Undo the last transaction.
1603 """Undo the last transaction.
1602
1604
1603 DANGEROUS.
1605 DANGEROUS.
1604 """
1606 """
1605
1607
1606 def updatecaches(tr=None, full=False):
1608 def updatecaches(tr=None, full=False):
1607 """Warm repo caches."""
1609 """Warm repo caches."""
1608
1610
1609 def invalidatecaches():
1611 def invalidatecaches():
1610 """Invalidate cached data due to the repository mutating."""
1612 """Invalidate cached data due to the repository mutating."""
1611
1613
1612 def invalidatevolatilesets():
1614 def invalidatevolatilesets():
1613 pass
1615 pass
1614
1616
1615 def invalidatedirstate():
1617 def invalidatedirstate():
1616 """Invalidate the dirstate."""
1618 """Invalidate the dirstate."""
1617
1619
1618 def invalidate(clearfilecache=False):
1620 def invalidate(clearfilecache=False):
1619 pass
1621 pass
1620
1622
1621 def invalidateall():
1623 def invalidateall():
1622 pass
1624 pass
1623
1625
1624 def lock(wait=True):
1626 def lock(wait=True):
1625 """Lock the repository store and return a lock instance."""
1627 """Lock the repository store and return a lock instance."""
1626
1628
1627 def wlock(wait=True):
1629 def wlock(wait=True):
1628 """Lock the non-store parts of the repository."""
1630 """Lock the non-store parts of the repository."""
1629
1631
1630 def currentwlock():
1632 def currentwlock():
1631 """Return the wlock if it's held or None."""
1633 """Return the wlock if it's held or None."""
1632
1634
1633 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1635 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1634 pass
1636 pass
1635
1637
1636 def commit(text='', user=None, date=None, match=None, force=False,
1638 def commit(text='', user=None, date=None, match=None, force=False,
1637 editor=False, extra=None):
1639 editor=False, extra=None):
1638 """Add a new revision to the repository."""
1640 """Add a new revision to the repository."""
1639
1641
1640 def commitctx(ctx, error=False):
1642 def commitctx(ctx, error=False):
1641 """Commit a commitctx instance to the repository."""
1643 """Commit a commitctx instance to the repository."""
1642
1644
1643 def destroying():
1645 def destroying():
1644 """Inform the repository that nodes are about to be destroyed."""
1646 """Inform the repository that nodes are about to be destroyed."""
1645
1647
1646 def destroyed():
1648 def destroyed():
1647 """Inform the repository that nodes have been destroyed."""
1649 """Inform the repository that nodes have been destroyed."""
1648
1650
1649 def status(node1='.', node2=None, match=None, ignored=False,
1651 def status(node1='.', node2=None, match=None, ignored=False,
1650 clean=False, unknown=False, listsubrepos=False):
1652 clean=False, unknown=False, listsubrepos=False):
1651 """Convenience method to call repo[x].status()."""
1653 """Convenience method to call repo[x].status()."""
1652
1654
1653 def addpostdsstatus(ps):
1655 def addpostdsstatus(ps):
1654 pass
1656 pass
1655
1657
1656 def postdsstatus():
1658 def postdsstatus():
1657 pass
1659 pass
1658
1660
1659 def clearpostdsstatus():
1661 def clearpostdsstatus():
1660 pass
1662 pass
1661
1663
1662 def heads(start=None):
1664 def heads(start=None):
1663 """Obtain list of nodes that are DAG heads."""
1665 """Obtain list of nodes that are DAG heads."""
1664
1666
1665 def branchheads(branch=None, start=None, closed=False):
1667 def branchheads(branch=None, start=None, closed=False):
1666 pass
1668 pass
1667
1669
1668 def branches(nodes):
1670 def branches(nodes):
1669 pass
1671 pass
1670
1672
1671 def between(pairs):
1673 def between(pairs):
1672 pass
1674 pass
1673
1675
1674 def checkpush(pushop):
1676 def checkpush(pushop):
1675 pass
1677 pass
1676
1678
1677 prepushoutgoinghooks = interfaceutil.Attribute(
1679 prepushoutgoinghooks = interfaceutil.Attribute(
1678 """util.hooks instance.""")
1680 """util.hooks instance.""")
1679
1681
1680 def pushkey(namespace, key, old, new):
1682 def pushkey(namespace, key, old, new):
1681 pass
1683 pass
1682
1684
1683 def listkeys(namespace):
1685 def listkeys(namespace):
1684 pass
1686 pass
1685
1687
1686 def debugwireargs(one, two, three=None, four=None, five=None):
1688 def debugwireargs(one, two, three=None, four=None, five=None):
1687 pass
1689 pass
1688
1690
1689 def savecommitmessage(text):
1691 def savecommitmessage(text):
1690 pass
1692 pass
1691
1693
1692 class completelocalrepository(ilocalrepositorymain,
1694 class completelocalrepository(ilocalrepositorymain,
1693 ilocalrepositoryfilestorage):
1695 ilocalrepositoryfilestorage):
1694 """Complete interface for a local repository."""
1696 """Complete interface for a local repository."""
1695
1697
1696 class iwireprotocolcommandcacher(interfaceutil.Interface):
1698 class iwireprotocolcommandcacher(interfaceutil.Interface):
1697 """Represents a caching backend for wire protocol commands.
1699 """Represents a caching backend for wire protocol commands.
1698
1700
1699 Wire protocol version 2 supports transparent caching of many commands.
1701 Wire protocol version 2 supports transparent caching of many commands.
1700 To leverage this caching, servers can activate objects that cache
1702 To leverage this caching, servers can activate objects that cache
1701 command responses. Objects handle both cache writing and reading.
1703 command responses. Objects handle both cache writing and reading.
1702 This interface defines how that response caching mechanism works.
1704 This interface defines how that response caching mechanism works.
1703
1705
1704 Wire protocol version 2 commands emit a series of objects that are
1706 Wire protocol version 2 commands emit a series of objects that are
1705 serialized and sent to the client. The caching layer exists between
1707 serialized and sent to the client. The caching layer exists between
1706 the invocation of the command function and the sending of its output
1708 the invocation of the command function and the sending of its output
1707 objects to an output layer.
1709 objects to an output layer.
1708
1710
1709 Instances of this interface represent a binding to a cache that
1711 Instances of this interface represent a binding to a cache that
1710 can serve a response (in place of calling a command function) and/or
1712 can serve a response (in place of calling a command function) and/or
1711 write responses to a cache for subsequent use.
1713 write responses to a cache for subsequent use.
1712
1714
1713 When a command request arrives, the following happens with regards
1715 When a command request arrives, the following happens with regards
1714 to this interface:
1716 to this interface:
1715
1717
1716 1. The server determines whether the command request is cacheable.
1718 1. The server determines whether the command request is cacheable.
1717 2. If it is, an instance of this interface is spawned.
1719 2. If it is, an instance of this interface is spawned.
1718 3. The cacher is activated in a context manager (``__enter__`` is called).
1720 3. The cacher is activated in a context manager (``__enter__`` is called).
1719 4. A cache *key* for that request is derived. This will call the
1721 4. A cache *key* for that request is derived. This will call the
1720 instance's ``adjustcachekeystate()`` method so the derivation
1722 instance's ``adjustcachekeystate()`` method so the derivation
1721 can be influenced.
1723 can be influenced.
1722 5. The cacher is informed of the derived cache key via a call to
1724 5. The cacher is informed of the derived cache key via a call to
1723 ``setcachekey()``.
1725 ``setcachekey()``.
1724 6. The cacher's ``lookup()`` method is called to test for presence of
1726 6. The cacher's ``lookup()`` method is called to test for presence of
1725 the derived key in the cache.
1727 the derived key in the cache.
1726 7. If ``lookup()`` returns a hit, that cached result is used in place
1728 7. If ``lookup()`` returns a hit, that cached result is used in place
1727 of invoking the command function. ``__exit__`` is called and the instance
1729 of invoking the command function. ``__exit__`` is called and the instance
1728 is discarded.
1730 is discarded.
1729 8. The command function is invoked.
1731 8. The command function is invoked.
1730 9. ``onobject()`` is called for each object emitted by the command
1732 9. ``onobject()`` is called for each object emitted by the command
1731 function.
1733 function.
1732 10. After the final object is seen, ``onfinished()`` is called.
1734 10. After the final object is seen, ``onfinished()`` is called.
1733 11. ``__exit__`` is called to signal the end of use of the instance.
1735 11. ``__exit__`` is called to signal the end of use of the instance.
1734
1736
1735 Cache *key* derivation can be influenced by the instance.
1737 Cache *key* derivation can be influenced by the instance.
1736
1738
1737 Cache keys are initially derived by a deterministic representation of
1739 Cache keys are initially derived by a deterministic representation of
1738 the command request. This includes the command name, arguments, protocol
1740 the command request. This includes the command name, arguments, protocol
1739 version, etc. This initial key derivation is performed by CBOR-encoding a
1741 version, etc. This initial key derivation is performed by CBOR-encoding a
1740 data structure and feeding that output into a hasher.
1742 data structure and feeding that output into a hasher.
1741
1743
1742 Instances of this interface can influence this initial key derivation
1744 Instances of this interface can influence this initial key derivation
1743 via ``adjustcachekeystate()``.
1745 via ``adjustcachekeystate()``.
1744
1746
1745 The instance is informed of the derived cache key via a call to
1747 The instance is informed of the derived cache key via a call to
1746 ``setcachekey()``. The instance must store the key locally so it can
1748 ``setcachekey()``. The instance must store the key locally so it can
1747 be consulted on subsequent operations that may require it.
1749 be consulted on subsequent operations that may require it.
1748
1750
1749 When constructed, the instance has access to a callable that can be used
1751 When constructed, the instance has access to a callable that can be used
1750 for encoding response objects. This callable receives as its single
1752 for encoding response objects. This callable receives as its single
1751 argument an object emitted by a command function. It returns an iterable
1753 argument an object emitted by a command function. It returns an iterable
1752 of bytes chunks representing the encoded object. Unless the cacher is
1754 of bytes chunks representing the encoded object. Unless the cacher is
1753 caching native Python objects in memory or has a way of reconstructing
1755 caching native Python objects in memory or has a way of reconstructing
1754 the original Python objects, implementations typically call this function
1756 the original Python objects, implementations typically call this function
1755 to produce bytes from the output objects and then store those bytes in
1757 to produce bytes from the output objects and then store those bytes in
1756 the cache. When it comes time to re-emit those bytes, they are wrapped
1758 the cache. When it comes time to re-emit those bytes, they are wrapped
1757 in a ``wireprototypes.encodedresponse`` instance to tell the output
1759 in a ``wireprototypes.encodedresponse`` instance to tell the output
1758 layer that they are pre-encoded.
1760 layer that they are pre-encoded.
1759
1761
1760 When receiving the objects emitted by the command function, instances
1762 When receiving the objects emitted by the command function, instances
1761 can choose what to do with those objects. The simplest thing to do is
1763 can choose what to do with those objects. The simplest thing to do is
1762 re-emit the original objects. They will be forwarded to the output
1764 re-emit the original objects. They will be forwarded to the output
1763 layer and will be processed as if the cacher did not exist.
1765 layer and will be processed as if the cacher did not exist.
1764
1766
1765 Implementations could also choose to not emit objects - instead locally
1767 Implementations could also choose to not emit objects - instead locally
1766 buffering objects or their encoded representation. They could then emit
1768 buffering objects or their encoded representation. They could then emit
1767 a single "coalesced" object when ``onfinished()`` is called. In
1769 a single "coalesced" object when ``onfinished()`` is called. In
1768 this way, the implementation would function as a filtering layer of
1770 this way, the implementation would function as a filtering layer of
1769 sorts.
1771 sorts.
1770
1772
1771 When caching objects, typically the encoded form of the object will
1773 When caching objects, typically the encoded form of the object will
1772 be stored. Keep in mind that if the original object is forwarded to
1774 be stored. Keep in mind that if the original object is forwarded to
1773 the output layer, it will need to be encoded there as well. For large
1775 the output layer, it will need to be encoded there as well. For large
1774 output, this redundant encoding could add overhead. Implementations
1776 output, this redundant encoding could add overhead. Implementations
1775 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1777 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1776 instances to avoid this overhead.
1778 instances to avoid this overhead.
1777 """
1779 """
1778 def __enter__():
1780 def __enter__():
1779 """Marks the instance as active.
1781 """Marks the instance as active.
1780
1782
1781 Should return self.
1783 Should return self.
1782 """
1784 """
1783
1785
1784 def __exit__(exctype, excvalue, exctb):
1786 def __exit__(exctype, excvalue, exctb):
1785 """Called when cacher is no longer used.
1787 """Called when cacher is no longer used.
1786
1788
1787 This can be used by implementations to perform cleanup actions (e.g.
1789 This can be used by implementations to perform cleanup actions (e.g.
1788 disconnecting network sockets, aborting a partially cached response.
1790 disconnecting network sockets, aborting a partially cached response.
1789 """
1791 """
1790
1792
1791 def adjustcachekeystate(state):
1793 def adjustcachekeystate(state):
1792 """Influences cache key derivation by adjusting state to derive key.
1794 """Influences cache key derivation by adjusting state to derive key.
1793
1795
1794 A dict defining the state used to derive the cache key is passed.
1796 A dict defining the state used to derive the cache key is passed.
1795
1797
1796 Implementations can modify this dict to record additional state that
1798 Implementations can modify this dict to record additional state that
1797 is wanted to influence key derivation.
1799 is wanted to influence key derivation.
1798
1800
1799 Implementations are *highly* encouraged to not modify or delete
1801 Implementations are *highly* encouraged to not modify or delete
1800 existing keys.
1802 existing keys.
1801 """
1803 """
1802
1804
1803 def setcachekey(key):
1805 def setcachekey(key):
1804 """Record the derived cache key for this request.
1806 """Record the derived cache key for this request.
1805
1807
1806 Instances may mutate the key for internal usage, as desired. e.g.
1808 Instances may mutate the key for internal usage, as desired. e.g.
1807 instances may wish to prepend the repo name, introduce path
1809 instances may wish to prepend the repo name, introduce path
1808 components for filesystem or URL addressing, etc. Behavior is up to
1810 components for filesystem or URL addressing, etc. Behavior is up to
1809 the cache.
1811 the cache.
1810
1812
1811 Returns a bool indicating if the request is cacheable by this
1813 Returns a bool indicating if the request is cacheable by this
1812 instance.
1814 instance.
1813 """
1815 """
1814
1816
1815 def lookup():
1817 def lookup():
1816 """Attempt to resolve an entry in the cache.
1818 """Attempt to resolve an entry in the cache.
1817
1819
1818 The instance is instructed to look for the cache key that it was
1820 The instance is instructed to look for the cache key that it was
1819 informed about via the call to ``setcachekey()``.
1821 informed about via the call to ``setcachekey()``.
1820
1822
1821 If there's no cache hit or the cacher doesn't wish to use the cached
1823 If there's no cache hit or the cacher doesn't wish to use the cached
1822 entry, ``None`` should be returned.
1824 entry, ``None`` should be returned.
1823
1825
1824 Else, a dict defining the cached result should be returned. The
1826 Else, a dict defining the cached result should be returned. The
1825 dict may have the following keys:
1827 dict may have the following keys:
1826
1828
1827 objs
1829 objs
1828 An iterable of objects that should be sent to the client. That
1830 An iterable of objects that should be sent to the client. That
1829 iterable of objects is expected to be what the command function
1831 iterable of objects is expected to be what the command function
1830 would return if invoked or an equivalent representation thereof.
1832 would return if invoked or an equivalent representation thereof.
1831 """
1833 """
1832
1834
1833 def onobject(obj):
1835 def onobject(obj):
1834 """Called when a new object is emitted from the command function.
1836 """Called when a new object is emitted from the command function.
1835
1837
1836 Receives as its argument the object that was emitted from the
1838 Receives as its argument the object that was emitted from the
1837 command function.
1839 command function.
1838
1840
1839 This method returns an iterator of objects to forward to the output
1841 This method returns an iterator of objects to forward to the output
1840 layer. The easiest implementation is a generator that just
1842 layer. The easiest implementation is a generator that just
1841 ``yield obj``.
1843 ``yield obj``.
1842 """
1844 """
1843
1845
1844 def onfinished():
1846 def onfinished():
1845 """Called after all objects have been emitted from the command function.
1847 """Called after all objects have been emitted from the command function.
1846
1848
1847 Implementations should return an iterator of objects to forward to
1849 Implementations should return an iterator of objects to forward to
1848 the output layer.
1850 the output layer.
1849
1851
1850 This method can be a generator.
1852 This method can be a generator.
1851 """
1853 """
General Comments 0
You need to be logged in to leave comments. Login now