##// END OF EJS Templates
changegroup: allow to force delta to be against p1...
Boris Feld -
r40458:968dd7e0 default
parent child Browse files
Show More
@@ -1,1385 +1,1392
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 mdiff,
25 mdiff,
26 phases,
26 phases,
27 pycompat,
27 pycompat,
28 repository,
28 repository,
29 util,
29 util,
30 )
30 )
31
31
32 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
32 _CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s")
33 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
33 _CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s")
34 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
34 _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH")
35
35
36 LFS_REQUIREMENT = 'lfs'
36 LFS_REQUIREMENT = 'lfs'
37
37
38 readexactly = util.readexactly
38 readexactly = util.readexactly
39
39
40 def getchunk(stream):
40 def getchunk(stream):
41 """return the next chunk from stream as a string"""
41 """return the next chunk from stream as a string"""
42 d = readexactly(stream, 4)
42 d = readexactly(stream, 4)
43 l = struct.unpack(">l", d)[0]
43 l = struct.unpack(">l", d)[0]
44 if l <= 4:
44 if l <= 4:
45 if l:
45 if l:
46 raise error.Abort(_("invalid chunk length %d") % l)
46 raise error.Abort(_("invalid chunk length %d") % l)
47 return ""
47 return ""
48 return readexactly(stream, l - 4)
48 return readexactly(stream, l - 4)
49
49
50 def chunkheader(length):
50 def chunkheader(length):
51 """return a changegroup chunk header (string)"""
51 """return a changegroup chunk header (string)"""
52 return struct.pack(">l", length + 4)
52 return struct.pack(">l", length + 4)
53
53
54 def closechunk():
54 def closechunk():
55 """return a changegroup chunk header (string) for a zero-length chunk"""
55 """return a changegroup chunk header (string) for a zero-length chunk"""
56 return struct.pack(">l", 0)
56 return struct.pack(">l", 0)
57
57
58 def _fileheader(path):
58 def _fileheader(path):
59 """Obtain a changegroup chunk header for a named path."""
59 """Obtain a changegroup chunk header for a named path."""
60 return chunkheader(len(path)) + path
60 return chunkheader(len(path)) + path
61
61
62 def writechunks(ui, chunks, filename, vfs=None):
62 def writechunks(ui, chunks, filename, vfs=None):
63 """Write chunks to a file and return its filename.
63 """Write chunks to a file and return its filename.
64
64
65 The stream is assumed to be a bundle file.
65 The stream is assumed to be a bundle file.
66 Existing files will not be overwritten.
66 Existing files will not be overwritten.
67 If no filename is specified, a temporary file is created.
67 If no filename is specified, a temporary file is created.
68 """
68 """
69 fh = None
69 fh = None
70 cleanup = None
70 cleanup = None
71 try:
71 try:
72 if filename:
72 if filename:
73 if vfs:
73 if vfs:
74 fh = vfs.open(filename, "wb")
74 fh = vfs.open(filename, "wb")
75 else:
75 else:
76 # Increase default buffer size because default is usually
76 # Increase default buffer size because default is usually
77 # small (4k is common on Linux).
77 # small (4k is common on Linux).
78 fh = open(filename, "wb", 131072)
78 fh = open(filename, "wb", 131072)
79 else:
79 else:
80 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
80 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fh = os.fdopen(fd, r"wb")
81 fh = os.fdopen(fd, r"wb")
82 cleanup = filename
82 cleanup = filename
83 for c in chunks:
83 for c in chunks:
84 fh.write(c)
84 fh.write(c)
85 cleanup = None
85 cleanup = None
86 return filename
86 return filename
87 finally:
87 finally:
88 if fh is not None:
88 if fh is not None:
89 fh.close()
89 fh.close()
90 if cleanup is not None:
90 if cleanup is not None:
91 if filename and vfs:
91 if filename and vfs:
92 vfs.unlink(cleanup)
92 vfs.unlink(cleanup)
93 else:
93 else:
94 os.unlink(cleanup)
94 os.unlink(cleanup)
95
95
96 class cg1unpacker(object):
96 class cg1unpacker(object):
97 """Unpacker for cg1 changegroup streams.
97 """Unpacker for cg1 changegroup streams.
98
98
99 A changegroup unpacker handles the framing of the revision data in
99 A changegroup unpacker handles the framing of the revision data in
100 the wire format. Most consumers will want to use the apply()
100 the wire format. Most consumers will want to use the apply()
101 method to add the changes from the changegroup to a repository.
101 method to add the changes from the changegroup to a repository.
102
102
103 If you're forwarding a changegroup unmodified to another consumer,
103 If you're forwarding a changegroup unmodified to another consumer,
104 use getchunks(), which returns an iterator of changegroup
104 use getchunks(), which returns an iterator of changegroup
105 chunks. This is mostly useful for cases where you need to know the
105 chunks. This is mostly useful for cases where you need to know the
106 data stream has ended by observing the end of the changegroup.
106 data stream has ended by observing the end of the changegroup.
107
107
108 deltachunk() is useful only if you're applying delta data. Most
108 deltachunk() is useful only if you're applying delta data. Most
109 consumers should prefer apply() instead.
109 consumers should prefer apply() instead.
110
110
111 A few other public methods exist. Those are used only for
111 A few other public methods exist. Those are used only for
112 bundlerepo and some debug commands - their use is discouraged.
112 bundlerepo and some debug commands - their use is discouraged.
113 """
113 """
114 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
114 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
115 deltaheadersize = deltaheader.size
115 deltaheadersize = deltaheader.size
116 version = '01'
116 version = '01'
117 _grouplistcount = 1 # One list of files after the manifests
117 _grouplistcount = 1 # One list of files after the manifests
118
118
119 def __init__(self, fh, alg, extras=None):
119 def __init__(self, fh, alg, extras=None):
120 if alg is None:
120 if alg is None:
121 alg = 'UN'
121 alg = 'UN'
122 if alg not in util.compengines.supportedbundletypes:
122 if alg not in util.compengines.supportedbundletypes:
123 raise error.Abort(_('unknown stream compression type: %s')
123 raise error.Abort(_('unknown stream compression type: %s')
124 % alg)
124 % alg)
125 if alg == 'BZ':
125 if alg == 'BZ':
126 alg = '_truncatedBZ'
126 alg = '_truncatedBZ'
127
127
128 compengine = util.compengines.forbundletype(alg)
128 compengine = util.compengines.forbundletype(alg)
129 self._stream = compengine.decompressorreader(fh)
129 self._stream = compengine.decompressorreader(fh)
130 self._type = alg
130 self._type = alg
131 self.extras = extras or {}
131 self.extras = extras or {}
132 self.callback = None
132 self.callback = None
133
133
134 # These methods (compressed, read, seek, tell) all appear to only
134 # These methods (compressed, read, seek, tell) all appear to only
135 # be used by bundlerepo, but it's a little hard to tell.
135 # be used by bundlerepo, but it's a little hard to tell.
136 def compressed(self):
136 def compressed(self):
137 return self._type is not None and self._type != 'UN'
137 return self._type is not None and self._type != 'UN'
138 def read(self, l):
138 def read(self, l):
139 return self._stream.read(l)
139 return self._stream.read(l)
140 def seek(self, pos):
140 def seek(self, pos):
141 return self._stream.seek(pos)
141 return self._stream.seek(pos)
142 def tell(self):
142 def tell(self):
143 return self._stream.tell()
143 return self._stream.tell()
144 def close(self):
144 def close(self):
145 return self._stream.close()
145 return self._stream.close()
146
146
147 def _chunklength(self):
147 def _chunklength(self):
148 d = readexactly(self._stream, 4)
148 d = readexactly(self._stream, 4)
149 l = struct.unpack(">l", d)[0]
149 l = struct.unpack(">l", d)[0]
150 if l <= 4:
150 if l <= 4:
151 if l:
151 if l:
152 raise error.Abort(_("invalid chunk length %d") % l)
152 raise error.Abort(_("invalid chunk length %d") % l)
153 return 0
153 return 0
154 if self.callback:
154 if self.callback:
155 self.callback()
155 self.callback()
156 return l - 4
156 return l - 4
157
157
158 def changelogheader(self):
158 def changelogheader(self):
159 """v10 does not have a changelog header chunk"""
159 """v10 does not have a changelog header chunk"""
160 return {}
160 return {}
161
161
162 def manifestheader(self):
162 def manifestheader(self):
163 """v10 does not have a manifest header chunk"""
163 """v10 does not have a manifest header chunk"""
164 return {}
164 return {}
165
165
166 def filelogheader(self):
166 def filelogheader(self):
167 """return the header of the filelogs chunk, v10 only has the filename"""
167 """return the header of the filelogs chunk, v10 only has the filename"""
168 l = self._chunklength()
168 l = self._chunklength()
169 if not l:
169 if not l:
170 return {}
170 return {}
171 fname = readexactly(self._stream, l)
171 fname = readexactly(self._stream, l)
172 return {'filename': fname}
172 return {'filename': fname}
173
173
174 def _deltaheader(self, headertuple, prevnode):
174 def _deltaheader(self, headertuple, prevnode):
175 node, p1, p2, cs = headertuple
175 node, p1, p2, cs = headertuple
176 if prevnode is None:
176 if prevnode is None:
177 deltabase = p1
177 deltabase = p1
178 else:
178 else:
179 deltabase = prevnode
179 deltabase = prevnode
180 flags = 0
180 flags = 0
181 return node, p1, p2, deltabase, cs, flags
181 return node, p1, p2, deltabase, cs, flags
182
182
183 def deltachunk(self, prevnode):
183 def deltachunk(self, prevnode):
184 l = self._chunklength()
184 l = self._chunklength()
185 if not l:
185 if not l:
186 return {}
186 return {}
187 headerdata = readexactly(self._stream, self.deltaheadersize)
187 headerdata = readexactly(self._stream, self.deltaheadersize)
188 header = self.deltaheader.unpack(headerdata)
188 header = self.deltaheader.unpack(headerdata)
189 delta = readexactly(self._stream, l - self.deltaheadersize)
189 delta = readexactly(self._stream, l - self.deltaheadersize)
190 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
190 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
191 return (node, p1, p2, cs, deltabase, delta, flags)
191 return (node, p1, p2, cs, deltabase, delta, flags)
192
192
193 def getchunks(self):
193 def getchunks(self):
194 """returns all the chunks contains in the bundle
194 """returns all the chunks contains in the bundle
195
195
196 Used when you need to forward the binary stream to a file or another
196 Used when you need to forward the binary stream to a file or another
197 network API. To do so, it parse the changegroup data, otherwise it will
197 network API. To do so, it parse the changegroup data, otherwise it will
198 block in case of sshrepo because it don't know the end of the stream.
198 block in case of sshrepo because it don't know the end of the stream.
199 """
199 """
200 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
200 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
201 # and a list of filelogs. For changegroup 3, we expect 4 parts:
201 # and a list of filelogs. For changegroup 3, we expect 4 parts:
202 # changelog, manifestlog, a list of tree manifestlogs, and a list of
202 # changelog, manifestlog, a list of tree manifestlogs, and a list of
203 # filelogs.
203 # filelogs.
204 #
204 #
205 # Changelog and manifestlog parts are terminated with empty chunks. The
205 # Changelog and manifestlog parts are terminated with empty chunks. The
206 # tree and file parts are a list of entry sections. Each entry section
206 # tree and file parts are a list of entry sections. Each entry section
207 # is a series of chunks terminating in an empty chunk. The list of these
207 # is a series of chunks terminating in an empty chunk. The list of these
208 # entry sections is terminated in yet another empty chunk, so we know
208 # entry sections is terminated in yet another empty chunk, so we know
209 # we've reached the end of the tree/file list when we reach an empty
209 # we've reached the end of the tree/file list when we reach an empty
210 # chunk that was proceeded by no non-empty chunks.
210 # chunk that was proceeded by no non-empty chunks.
211
211
212 parts = 0
212 parts = 0
213 while parts < 2 + self._grouplistcount:
213 while parts < 2 + self._grouplistcount:
214 noentries = True
214 noentries = True
215 while True:
215 while True:
216 chunk = getchunk(self)
216 chunk = getchunk(self)
217 if not chunk:
217 if not chunk:
218 # The first two empty chunks represent the end of the
218 # The first two empty chunks represent the end of the
219 # changelog and the manifestlog portions. The remaining
219 # changelog and the manifestlog portions. The remaining
220 # empty chunks represent either A) the end of individual
220 # empty chunks represent either A) the end of individual
221 # tree or file entries in the file list, or B) the end of
221 # tree or file entries in the file list, or B) the end of
222 # the entire list. It's the end of the entire list if there
222 # the entire list. It's the end of the entire list if there
223 # were no entries (i.e. noentries is True).
223 # were no entries (i.e. noentries is True).
224 if parts < 2:
224 if parts < 2:
225 parts += 1
225 parts += 1
226 elif noentries:
226 elif noentries:
227 parts += 1
227 parts += 1
228 break
228 break
229 noentries = False
229 noentries = False
230 yield chunkheader(len(chunk))
230 yield chunkheader(len(chunk))
231 pos = 0
231 pos = 0
232 while pos < len(chunk):
232 while pos < len(chunk):
233 next = pos + 2**20
233 next = pos + 2**20
234 yield chunk[pos:next]
234 yield chunk[pos:next]
235 pos = next
235 pos = next
236 yield closechunk()
236 yield closechunk()
237
237
238 def _unpackmanifests(self, repo, revmap, trp, prog):
238 def _unpackmanifests(self, repo, revmap, trp, prog):
239 self.callback = prog.increment
239 self.callback = prog.increment
240 # no need to check for empty manifest group here:
240 # no need to check for empty manifest group here:
241 # if the result of the merge of 1 and 2 is the same in 3 and 4,
241 # if the result of the merge of 1 and 2 is the same in 3 and 4,
242 # no new manifest will be created and the manifest group will
242 # no new manifest will be created and the manifest group will
243 # be empty during the pull
243 # be empty during the pull
244 self.manifestheader()
244 self.manifestheader()
245 deltas = self.deltaiter()
245 deltas = self.deltaiter()
246 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
246 repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp)
247 prog.complete()
247 prog.complete()
248 self.callback = None
248 self.callback = None
249
249
250 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
250 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
251 expectedtotal=None):
251 expectedtotal=None):
252 """Add the changegroup returned by source.read() to this repo.
252 """Add the changegroup returned by source.read() to this repo.
253 srctype is a string like 'push', 'pull', or 'unbundle'. url is
253 srctype is a string like 'push', 'pull', or 'unbundle'. url is
254 the URL of the repo where this changegroup is coming from.
254 the URL of the repo where this changegroup is coming from.
255
255
256 Return an integer summarizing the change to this repo:
256 Return an integer summarizing the change to this repo:
257 - nothing changed or no source: 0
257 - nothing changed or no source: 0
258 - more heads than before: 1+added heads (2..n)
258 - more heads than before: 1+added heads (2..n)
259 - fewer heads than before: -1-removed heads (-2..-n)
259 - fewer heads than before: -1-removed heads (-2..-n)
260 - number of heads stays the same: 1
260 - number of heads stays the same: 1
261 """
261 """
262 repo = repo.unfiltered()
262 repo = repo.unfiltered()
263 def csmap(x):
263 def csmap(x):
264 repo.ui.debug("add changeset %s\n" % short(x))
264 repo.ui.debug("add changeset %s\n" % short(x))
265 return len(cl)
265 return len(cl)
266
266
267 def revmap(x):
267 def revmap(x):
268 return cl.rev(x)
268 return cl.rev(x)
269
269
270 changesets = files = revisions = 0
270 changesets = files = revisions = 0
271
271
272 try:
272 try:
273 # The transaction may already carry source information. In this
273 # The transaction may already carry source information. In this
274 # case we use the top level data. We overwrite the argument
274 # case we use the top level data. We overwrite the argument
275 # because we need to use the top level value (if they exist)
275 # because we need to use the top level value (if they exist)
276 # in this function.
276 # in this function.
277 srctype = tr.hookargs.setdefault('source', srctype)
277 srctype = tr.hookargs.setdefault('source', srctype)
278 url = tr.hookargs.setdefault('url', url)
278 url = tr.hookargs.setdefault('url', url)
279 repo.hook('prechangegroup',
279 repo.hook('prechangegroup',
280 throw=True, **pycompat.strkwargs(tr.hookargs))
280 throw=True, **pycompat.strkwargs(tr.hookargs))
281
281
282 # write changelog data to temp files so concurrent readers
282 # write changelog data to temp files so concurrent readers
283 # will not see an inconsistent view
283 # will not see an inconsistent view
284 cl = repo.changelog
284 cl = repo.changelog
285 cl.delayupdate(tr)
285 cl.delayupdate(tr)
286 oldheads = set(cl.heads())
286 oldheads = set(cl.heads())
287
287
288 trp = weakref.proxy(tr)
288 trp = weakref.proxy(tr)
289 # pull off the changeset group
289 # pull off the changeset group
290 repo.ui.status(_("adding changesets\n"))
290 repo.ui.status(_("adding changesets\n"))
291 clstart = len(cl)
291 clstart = len(cl)
292 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
292 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
293 total=expectedtotal)
293 total=expectedtotal)
294 self.callback = progress.increment
294 self.callback = progress.increment
295
295
296 efiles = set()
296 efiles = set()
297 def onchangelog(cl, node):
297 def onchangelog(cl, node):
298 efiles.update(cl.readfiles(node))
298 efiles.update(cl.readfiles(node))
299
299
300 self.changelogheader()
300 self.changelogheader()
301 deltas = self.deltaiter()
301 deltas = self.deltaiter()
302 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
302 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
303 efiles = len(efiles)
303 efiles = len(efiles)
304
304
305 if not cgnodes:
305 if not cgnodes:
306 repo.ui.develwarn('applied empty changelog from changegroup',
306 repo.ui.develwarn('applied empty changelog from changegroup',
307 config='warn-empty-changegroup')
307 config='warn-empty-changegroup')
308 clend = len(cl)
308 clend = len(cl)
309 changesets = clend - clstart
309 changesets = clend - clstart
310 progress.complete()
310 progress.complete()
311 self.callback = None
311 self.callback = None
312
312
313 # pull off the manifest group
313 # pull off the manifest group
314 repo.ui.status(_("adding manifests\n"))
314 repo.ui.status(_("adding manifests\n"))
315 # We know that we'll never have more manifests than we had
315 # We know that we'll never have more manifests than we had
316 # changesets.
316 # changesets.
317 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
317 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
318 total=changesets)
318 total=changesets)
319 self._unpackmanifests(repo, revmap, trp, progress)
319 self._unpackmanifests(repo, revmap, trp, progress)
320
320
321 needfiles = {}
321 needfiles = {}
322 if repo.ui.configbool('server', 'validate'):
322 if repo.ui.configbool('server', 'validate'):
323 cl = repo.changelog
323 cl = repo.changelog
324 ml = repo.manifestlog
324 ml = repo.manifestlog
325 # validate incoming csets have their manifests
325 # validate incoming csets have their manifests
326 for cset in pycompat.xrange(clstart, clend):
326 for cset in pycompat.xrange(clstart, clend):
327 mfnode = cl.changelogrevision(cset).manifest
327 mfnode = cl.changelogrevision(cset).manifest
328 mfest = ml[mfnode].readdelta()
328 mfest = ml[mfnode].readdelta()
329 # store file cgnodes we must see
329 # store file cgnodes we must see
330 for f, n in mfest.iteritems():
330 for f, n in mfest.iteritems():
331 needfiles.setdefault(f, set()).add(n)
331 needfiles.setdefault(f, set()).add(n)
332
332
333 # process the files
333 # process the files
334 repo.ui.status(_("adding file changes\n"))
334 repo.ui.status(_("adding file changes\n"))
335 newrevs, newfiles = _addchangegroupfiles(
335 newrevs, newfiles = _addchangegroupfiles(
336 repo, self, revmap, trp, efiles, needfiles)
336 repo, self, revmap, trp, efiles, needfiles)
337 revisions += newrevs
337 revisions += newrevs
338 files += newfiles
338 files += newfiles
339
339
340 deltaheads = 0
340 deltaheads = 0
341 if oldheads:
341 if oldheads:
342 heads = cl.heads()
342 heads = cl.heads()
343 deltaheads = len(heads) - len(oldheads)
343 deltaheads = len(heads) - len(oldheads)
344 for h in heads:
344 for h in heads:
345 if h not in oldheads and repo[h].closesbranch():
345 if h not in oldheads and repo[h].closesbranch():
346 deltaheads -= 1
346 deltaheads -= 1
347 htext = ""
347 htext = ""
348 if deltaheads:
348 if deltaheads:
349 htext = _(" (%+d heads)") % deltaheads
349 htext = _(" (%+d heads)") % deltaheads
350
350
351 repo.ui.status(_("added %d changesets"
351 repo.ui.status(_("added %d changesets"
352 " with %d changes to %d files%s\n")
352 " with %d changes to %d files%s\n")
353 % (changesets, revisions, files, htext))
353 % (changesets, revisions, files, htext))
354 repo.invalidatevolatilesets()
354 repo.invalidatevolatilesets()
355
355
356 if changesets > 0:
356 if changesets > 0:
357 if 'node' not in tr.hookargs:
357 if 'node' not in tr.hookargs:
358 tr.hookargs['node'] = hex(cl.node(clstart))
358 tr.hookargs['node'] = hex(cl.node(clstart))
359 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
359 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
360 hookargs = dict(tr.hookargs)
360 hookargs = dict(tr.hookargs)
361 else:
361 else:
362 hookargs = dict(tr.hookargs)
362 hookargs = dict(tr.hookargs)
363 hookargs['node'] = hex(cl.node(clstart))
363 hookargs['node'] = hex(cl.node(clstart))
364 hookargs['node_last'] = hex(cl.node(clend - 1))
364 hookargs['node_last'] = hex(cl.node(clend - 1))
365 repo.hook('pretxnchangegroup',
365 repo.hook('pretxnchangegroup',
366 throw=True, **pycompat.strkwargs(hookargs))
366 throw=True, **pycompat.strkwargs(hookargs))
367
367
368 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
368 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
369 phaseall = None
369 phaseall = None
370 if srctype in ('push', 'serve'):
370 if srctype in ('push', 'serve'):
371 # Old servers can not push the boundary themselves.
371 # Old servers can not push the boundary themselves.
372 # New servers won't push the boundary if changeset already
372 # New servers won't push the boundary if changeset already
373 # exists locally as secret
373 # exists locally as secret
374 #
374 #
375 # We should not use added here but the list of all change in
375 # We should not use added here but the list of all change in
376 # the bundle
376 # the bundle
377 if repo.publishing():
377 if repo.publishing():
378 targetphase = phaseall = phases.public
378 targetphase = phaseall = phases.public
379 else:
379 else:
380 # closer target phase computation
380 # closer target phase computation
381
381
382 # Those changesets have been pushed from the
382 # Those changesets have been pushed from the
383 # outside, their phases are going to be pushed
383 # outside, their phases are going to be pushed
384 # alongside. Therefor `targetphase` is
384 # alongside. Therefor `targetphase` is
385 # ignored.
385 # ignored.
386 targetphase = phaseall = phases.draft
386 targetphase = phaseall = phases.draft
387 if added:
387 if added:
388 phases.registernew(repo, tr, targetphase, added)
388 phases.registernew(repo, tr, targetphase, added)
389 if phaseall is not None:
389 if phaseall is not None:
390 phases.advanceboundary(repo, tr, phaseall, cgnodes)
390 phases.advanceboundary(repo, tr, phaseall, cgnodes)
391
391
392 if changesets > 0:
392 if changesets > 0:
393
393
394 def runhooks():
394 def runhooks():
395 # These hooks run when the lock releases, not when the
395 # These hooks run when the lock releases, not when the
396 # transaction closes. So it's possible for the changelog
396 # transaction closes. So it's possible for the changelog
397 # to have changed since we last saw it.
397 # to have changed since we last saw it.
398 if clstart >= len(repo):
398 if clstart >= len(repo):
399 return
399 return
400
400
401 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
401 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
402
402
403 for n in added:
403 for n in added:
404 args = hookargs.copy()
404 args = hookargs.copy()
405 args['node'] = hex(n)
405 args['node'] = hex(n)
406 del args['node_last']
406 del args['node_last']
407 repo.hook("incoming", **pycompat.strkwargs(args))
407 repo.hook("incoming", **pycompat.strkwargs(args))
408
408
409 newheads = [h for h in repo.heads()
409 newheads = [h for h in repo.heads()
410 if h not in oldheads]
410 if h not in oldheads]
411 repo.ui.log("incoming",
411 repo.ui.log("incoming",
412 "%d incoming changes - new heads: %s\n",
412 "%d incoming changes - new heads: %s\n",
413 len(added),
413 len(added),
414 ', '.join([hex(c[:6]) for c in newheads]))
414 ', '.join([hex(c[:6]) for c in newheads]))
415
415
416 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
416 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
417 lambda tr: repo._afterlock(runhooks))
417 lambda tr: repo._afterlock(runhooks))
418 finally:
418 finally:
419 repo.ui.flush()
419 repo.ui.flush()
420 # never return 0 here:
420 # never return 0 here:
421 if deltaheads < 0:
421 if deltaheads < 0:
422 ret = deltaheads - 1
422 ret = deltaheads - 1
423 else:
423 else:
424 ret = deltaheads + 1
424 ret = deltaheads + 1
425 return ret
425 return ret
426
426
427 def deltaiter(self):
427 def deltaiter(self):
428 """
428 """
429 returns an iterator of the deltas in this changegroup
429 returns an iterator of the deltas in this changegroup
430
430
431 Useful for passing to the underlying storage system to be stored.
431 Useful for passing to the underlying storage system to be stored.
432 """
432 """
433 chain = None
433 chain = None
434 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
434 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
435 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
435 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
436 yield chunkdata
436 yield chunkdata
437 chain = chunkdata[0]
437 chain = chunkdata[0]
438
438
439 class cg2unpacker(cg1unpacker):
439 class cg2unpacker(cg1unpacker):
440 """Unpacker for cg2 streams.
440 """Unpacker for cg2 streams.
441
441
442 cg2 streams add support for generaldelta, so the delta header
442 cg2 streams add support for generaldelta, so the delta header
443 format is slightly different. All other features about the data
443 format is slightly different. All other features about the data
444 remain the same.
444 remain the same.
445 """
445 """
446 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
446 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
447 deltaheadersize = deltaheader.size
447 deltaheadersize = deltaheader.size
448 version = '02'
448 version = '02'
449
449
450 def _deltaheader(self, headertuple, prevnode):
450 def _deltaheader(self, headertuple, prevnode):
451 node, p1, p2, deltabase, cs = headertuple
451 node, p1, p2, deltabase, cs = headertuple
452 flags = 0
452 flags = 0
453 return node, p1, p2, deltabase, cs, flags
453 return node, p1, p2, deltabase, cs, flags
454
454
455 class cg3unpacker(cg2unpacker):
455 class cg3unpacker(cg2unpacker):
456 """Unpacker for cg3 streams.
456 """Unpacker for cg3 streams.
457
457
458 cg3 streams add support for exchanging treemanifests and revlog
458 cg3 streams add support for exchanging treemanifests and revlog
459 flags. It adds the revlog flags to the delta header and an empty chunk
459 flags. It adds the revlog flags to the delta header and an empty chunk
460 separating manifests and files.
460 separating manifests and files.
461 """
461 """
462 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
462 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
463 deltaheadersize = deltaheader.size
463 deltaheadersize = deltaheader.size
464 version = '03'
464 version = '03'
465 _grouplistcount = 2 # One list of manifests and one list of files
465 _grouplistcount = 2 # One list of manifests and one list of files
466
466
467 def _deltaheader(self, headertuple, prevnode):
467 def _deltaheader(self, headertuple, prevnode):
468 node, p1, p2, deltabase, cs, flags = headertuple
468 node, p1, p2, deltabase, cs, flags = headertuple
469 return node, p1, p2, deltabase, cs, flags
469 return node, p1, p2, deltabase, cs, flags
470
470
471 def _unpackmanifests(self, repo, revmap, trp, prog):
471 def _unpackmanifests(self, repo, revmap, trp, prog):
472 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
472 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
473 for chunkdata in iter(self.filelogheader, {}):
473 for chunkdata in iter(self.filelogheader, {}):
474 # If we get here, there are directory manifests in the changegroup
474 # If we get here, there are directory manifests in the changegroup
475 d = chunkdata["filename"]
475 d = chunkdata["filename"]
476 repo.ui.debug("adding %s revisions\n" % d)
476 repo.ui.debug("adding %s revisions\n" % d)
477 deltas = self.deltaiter()
477 deltas = self.deltaiter()
478 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
478 if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp):
479 raise error.Abort(_("received dir revlog group is empty"))
479 raise error.Abort(_("received dir revlog group is empty"))
480
480
481 class headerlessfixup(object):
481 class headerlessfixup(object):
482 def __init__(self, fh, h):
482 def __init__(self, fh, h):
483 self._h = h
483 self._h = h
484 self._fh = fh
484 self._fh = fh
485 def read(self, n):
485 def read(self, n):
486 if self._h:
486 if self._h:
487 d, self._h = self._h[:n], self._h[n:]
487 d, self._h = self._h[:n], self._h[n:]
488 if len(d) < n:
488 if len(d) < n:
489 d += readexactly(self._fh, n - len(d))
489 d += readexactly(self._fh, n - len(d))
490 return d
490 return d
491 return readexactly(self._fh, n)
491 return readexactly(self._fh, n)
492
492
493 def _revisiondeltatochunks(delta, headerfn):
493 def _revisiondeltatochunks(delta, headerfn):
494 """Serialize a revisiondelta to changegroup chunks."""
494 """Serialize a revisiondelta to changegroup chunks."""
495
495
496 # The captured revision delta may be encoded as a delta against
496 # The captured revision delta may be encoded as a delta against
497 # a base revision or as a full revision. The changegroup format
497 # a base revision or as a full revision. The changegroup format
498 # requires that everything on the wire be deltas. So for full
498 # requires that everything on the wire be deltas. So for full
499 # revisions, we need to invent a header that says to rewrite
499 # revisions, we need to invent a header that says to rewrite
500 # data.
500 # data.
501
501
502 if delta.delta is not None:
502 if delta.delta is not None:
503 prefix, data = b'', delta.delta
503 prefix, data = b'', delta.delta
504 elif delta.basenode == nullid:
504 elif delta.basenode == nullid:
505 data = delta.revision
505 data = delta.revision
506 prefix = mdiff.trivialdiffheader(len(data))
506 prefix = mdiff.trivialdiffheader(len(data))
507 else:
507 else:
508 data = delta.revision
508 data = delta.revision
509 prefix = mdiff.replacediffheader(delta.baserevisionsize,
509 prefix = mdiff.replacediffheader(delta.baserevisionsize,
510 len(data))
510 len(data))
511
511
512 meta = headerfn(delta)
512 meta = headerfn(delta)
513
513
514 yield chunkheader(len(meta) + len(prefix) + len(data))
514 yield chunkheader(len(meta) + len(prefix) + len(data))
515 yield meta
515 yield meta
516 if prefix:
516 if prefix:
517 yield prefix
517 yield prefix
518 yield data
518 yield data
519
519
520 def _sortnodesellipsis(store, nodes, cl, lookup):
520 def _sortnodesellipsis(store, nodes, cl, lookup):
521 """Sort nodes for changegroup generation."""
521 """Sort nodes for changegroup generation."""
522 # Ellipses serving mode.
522 # Ellipses serving mode.
523 #
523 #
524 # In a perfect world, we'd generate better ellipsis-ified graphs
524 # In a perfect world, we'd generate better ellipsis-ified graphs
525 # for non-changelog revlogs. In practice, we haven't started doing
525 # for non-changelog revlogs. In practice, we haven't started doing
526 # that yet, so the resulting DAGs for the manifestlog and filelogs
526 # that yet, so the resulting DAGs for the manifestlog and filelogs
527 # are actually full of bogus parentage on all the ellipsis
527 # are actually full of bogus parentage on all the ellipsis
528 # nodes. This has the side effect that, while the contents are
528 # nodes. This has the side effect that, while the contents are
529 # correct, the individual DAGs might be completely out of whack in
529 # correct, the individual DAGs might be completely out of whack in
530 # a case like 882681bc3166 and its ancestors (back about 10
530 # a case like 882681bc3166 and its ancestors (back about 10
531 # revisions or so) in the main hg repo.
531 # revisions or so) in the main hg repo.
532 #
532 #
533 # The one invariant we *know* holds is that the new (potentially
533 # The one invariant we *know* holds is that the new (potentially
534 # bogus) DAG shape will be valid if we order the nodes in the
534 # bogus) DAG shape will be valid if we order the nodes in the
535 # order that they're introduced in dramatis personae by the
535 # order that they're introduced in dramatis personae by the
536 # changelog, so what we do is we sort the non-changelog histories
536 # changelog, so what we do is we sort the non-changelog histories
537 # by the order in which they are used by the changelog.
537 # by the order in which they are used by the changelog.
538 key = lambda n: cl.rev(lookup(n))
538 key = lambda n: cl.rev(lookup(n))
539 return sorted(nodes, key=key)
539 return sorted(nodes, key=key)
540
540
541 def _resolvenarrowrevisioninfo(cl, store, ischangelog, rev, linkrev,
541 def _resolvenarrowrevisioninfo(cl, store, ischangelog, rev, linkrev,
542 linknode, clrevtolocalrev, fullclnodes,
542 linknode, clrevtolocalrev, fullclnodes,
543 precomputedellipsis):
543 precomputedellipsis):
544 linkparents = precomputedellipsis[linkrev]
544 linkparents = precomputedellipsis[linkrev]
545 def local(clrev):
545 def local(clrev):
546 """Turn a changelog revnum into a local revnum.
546 """Turn a changelog revnum into a local revnum.
547
547
548 The ellipsis dag is stored as revnums on the changelog,
548 The ellipsis dag is stored as revnums on the changelog,
549 but when we're producing ellipsis entries for
549 but when we're producing ellipsis entries for
550 non-changelog revlogs, we need to turn those numbers into
550 non-changelog revlogs, we need to turn those numbers into
551 something local. This does that for us, and during the
551 something local. This does that for us, and during the
552 changelog sending phase will also expand the stored
552 changelog sending phase will also expand the stored
553 mappings as needed.
553 mappings as needed.
554 """
554 """
555 if clrev == nullrev:
555 if clrev == nullrev:
556 return nullrev
556 return nullrev
557
557
558 if ischangelog:
558 if ischangelog:
559 return clrev
559 return clrev
560
560
561 # Walk the ellipsis-ized changelog breadth-first looking for a
561 # Walk the ellipsis-ized changelog breadth-first looking for a
562 # change that has been linked from the current revlog.
562 # change that has been linked from the current revlog.
563 #
563 #
564 # For a flat manifest revlog only a single step should be necessary
564 # For a flat manifest revlog only a single step should be necessary
565 # as all relevant changelog entries are relevant to the flat
565 # as all relevant changelog entries are relevant to the flat
566 # manifest.
566 # manifest.
567 #
567 #
568 # For a filelog or tree manifest dirlog however not every changelog
568 # For a filelog or tree manifest dirlog however not every changelog
569 # entry will have been relevant, so we need to skip some changelog
569 # entry will have been relevant, so we need to skip some changelog
570 # nodes even after ellipsis-izing.
570 # nodes even after ellipsis-izing.
571 walk = [clrev]
571 walk = [clrev]
572 while walk:
572 while walk:
573 p = walk[0]
573 p = walk[0]
574 walk = walk[1:]
574 walk = walk[1:]
575 if p in clrevtolocalrev:
575 if p in clrevtolocalrev:
576 return clrevtolocalrev[p]
576 return clrevtolocalrev[p]
577 elif p in fullclnodes:
577 elif p in fullclnodes:
578 walk.extend([pp for pp in cl.parentrevs(p)
578 walk.extend([pp for pp in cl.parentrevs(p)
579 if pp != nullrev])
579 if pp != nullrev])
580 elif p in precomputedellipsis:
580 elif p in precomputedellipsis:
581 walk.extend([pp for pp in precomputedellipsis[p]
581 walk.extend([pp for pp in precomputedellipsis[p]
582 if pp != nullrev])
582 if pp != nullrev])
583 else:
583 else:
584 # In this case, we've got an ellipsis with parents
584 # In this case, we've got an ellipsis with parents
585 # outside the current bundle (likely an
585 # outside the current bundle (likely an
586 # incremental pull). We "know" that we can use the
586 # incremental pull). We "know" that we can use the
587 # value of this same revlog at whatever revision
587 # value of this same revlog at whatever revision
588 # is pointed to by linknode. "Know" is in scare
588 # is pointed to by linknode. "Know" is in scare
589 # quotes because I haven't done enough examination
589 # quotes because I haven't done enough examination
590 # of edge cases to convince myself this is really
590 # of edge cases to convince myself this is really
591 # a fact - it works for all the (admittedly
591 # a fact - it works for all the (admittedly
592 # thorough) cases in our testsuite, but I would be
592 # thorough) cases in our testsuite, but I would be
593 # somewhat unsurprised to find a case in the wild
593 # somewhat unsurprised to find a case in the wild
594 # where this breaks down a bit. That said, I don't
594 # where this breaks down a bit. That said, I don't
595 # know if it would hurt anything.
595 # know if it would hurt anything.
596 for i in pycompat.xrange(rev, 0, -1):
596 for i in pycompat.xrange(rev, 0, -1):
597 if store.linkrev(i) == clrev:
597 if store.linkrev(i) == clrev:
598 return i
598 return i
599 # We failed to resolve a parent for this node, so
599 # We failed to resolve a parent for this node, so
600 # we crash the changegroup construction.
600 # we crash the changegroup construction.
601 raise error.Abort(
601 raise error.Abort(
602 'unable to resolve parent while packing %r %r'
602 'unable to resolve parent while packing %r %r'
603 ' for changeset %r' % (store.indexfile, rev, clrev))
603 ' for changeset %r' % (store.indexfile, rev, clrev))
604
604
605 return nullrev
605 return nullrev
606
606
607 if not linkparents or (
607 if not linkparents or (
608 store.parentrevs(rev) == (nullrev, nullrev)):
608 store.parentrevs(rev) == (nullrev, nullrev)):
609 p1, p2 = nullrev, nullrev
609 p1, p2 = nullrev, nullrev
610 elif len(linkparents) == 1:
610 elif len(linkparents) == 1:
611 p1, = sorted(local(p) for p in linkparents)
611 p1, = sorted(local(p) for p in linkparents)
612 p2 = nullrev
612 p2 = nullrev
613 else:
613 else:
614 p1, p2 = sorted(local(p) for p in linkparents)
614 p1, p2 = sorted(local(p) for p in linkparents)
615
615
616 p1node, p2node = store.node(p1), store.node(p2)
616 p1node, p2node = store.node(p1), store.node(p2)
617
617
618 return p1node, p2node, linknode
618 return p1node, p2node, linknode
619
619
620 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
620 def deltagroup(repo, store, nodes, ischangelog, lookup, forcedeltaparentprev,
621 topic=None,
621 topic=None,
622 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
622 ellipses=False, clrevtolocalrev=None, fullclnodes=None,
623 precomputedellipsis=None):
623 precomputedellipsis=None):
624 """Calculate deltas for a set of revisions.
624 """Calculate deltas for a set of revisions.
625
625
626 Is a generator of ``revisiondelta`` instances.
626 Is a generator of ``revisiondelta`` instances.
627
627
628 If topic is not None, progress detail will be generated using this
628 If topic is not None, progress detail will be generated using this
629 topic name (e.g. changesets, manifests, etc).
629 topic name (e.g. changesets, manifests, etc).
630 """
630 """
631 if not nodes:
631 if not nodes:
632 return
632 return
633
633
634 cl = repo.changelog
634 cl = repo.changelog
635
635
636 if ischangelog:
636 if ischangelog:
637 # `hg log` shows changesets in storage order. To preserve order
637 # `hg log` shows changesets in storage order. To preserve order
638 # across clones, send out changesets in storage order.
638 # across clones, send out changesets in storage order.
639 nodesorder = 'storage'
639 nodesorder = 'storage'
640 elif ellipses:
640 elif ellipses:
641 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
641 nodes = _sortnodesellipsis(store, nodes, cl, lookup)
642 nodesorder = 'nodes'
642 nodesorder = 'nodes'
643 else:
643 else:
644 nodesorder = None
644 nodesorder = None
645
645
646 # Perform ellipses filtering and revision massaging. We do this before
646 # Perform ellipses filtering and revision massaging. We do this before
647 # emitrevisions() because a) filtering out revisions creates less work
647 # emitrevisions() because a) filtering out revisions creates less work
648 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
648 # for emitrevisions() b) dropping revisions would break emitrevisions()'s
649 # assumptions about delta choices and we would possibly send a delta
649 # assumptions about delta choices and we would possibly send a delta
650 # referencing a missing base revision.
650 # referencing a missing base revision.
651 #
651 #
652 # Also, calling lookup() has side-effects with regards to populating
652 # Also, calling lookup() has side-effects with regards to populating
653 # data structures. If we don't call lookup() for each node or if we call
653 # data structures. If we don't call lookup() for each node or if we call
654 # lookup() after the first pass through each node, things can break -
654 # lookup() after the first pass through each node, things can break -
655 # possibly intermittently depending on the python hash seed! For that
655 # possibly intermittently depending on the python hash seed! For that
656 # reason, we store a mapping of all linknodes during the initial node
656 # reason, we store a mapping of all linknodes during the initial node
657 # pass rather than use lookup() on the output side.
657 # pass rather than use lookup() on the output side.
658 if ellipses:
658 if ellipses:
659 filtered = []
659 filtered = []
660 adjustedparents = {}
660 adjustedparents = {}
661 linknodes = {}
661 linknodes = {}
662
662
663 for node in nodes:
663 for node in nodes:
664 rev = store.rev(node)
664 rev = store.rev(node)
665 linknode = lookup(node)
665 linknode = lookup(node)
666 linkrev = cl.rev(linknode)
666 linkrev = cl.rev(linknode)
667 clrevtolocalrev[linkrev] = rev
667 clrevtolocalrev[linkrev] = rev
668
668
669 # If linknode is in fullclnodes, it means the corresponding
669 # If linknode is in fullclnodes, it means the corresponding
670 # changeset was a full changeset and is being sent unaltered.
670 # changeset was a full changeset and is being sent unaltered.
671 if linknode in fullclnodes:
671 if linknode in fullclnodes:
672 linknodes[node] = linknode
672 linknodes[node] = linknode
673
673
674 # If the corresponding changeset wasn't in the set computed
674 # If the corresponding changeset wasn't in the set computed
675 # as relevant to us, it should be dropped outright.
675 # as relevant to us, it should be dropped outright.
676 elif linkrev not in precomputedellipsis:
676 elif linkrev not in precomputedellipsis:
677 continue
677 continue
678
678
679 else:
679 else:
680 # We could probably do this later and avoid the dict
680 # We could probably do this later and avoid the dict
681 # holding state. But it likely doesn't matter.
681 # holding state. But it likely doesn't matter.
682 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
682 p1node, p2node, linknode = _resolvenarrowrevisioninfo(
683 cl, store, ischangelog, rev, linkrev, linknode,
683 cl, store, ischangelog, rev, linkrev, linknode,
684 clrevtolocalrev, fullclnodes, precomputedellipsis)
684 clrevtolocalrev, fullclnodes, precomputedellipsis)
685
685
686 adjustedparents[node] = (p1node, p2node)
686 adjustedparents[node] = (p1node, p2node)
687 linknodes[node] = linknode
687 linknodes[node] = linknode
688
688
689 filtered.append(node)
689 filtered.append(node)
690
690
691 nodes = filtered
691 nodes = filtered
692
692
693 # We expect the first pass to be fast, so we only engage the progress
693 # We expect the first pass to be fast, so we only engage the progress
694 # meter for constructing the revision deltas.
694 # meter for constructing the revision deltas.
695 progress = None
695 progress = None
696 if topic is not None:
696 if topic is not None:
697 progress = repo.ui.makeprogress(topic, unit=_('chunks'),
697 progress = repo.ui.makeprogress(topic, unit=_('chunks'),
698 total=len(nodes))
698 total=len(nodes))
699
699
700 configtarget = repo.ui.config('devel', 'bundle.delta')
701 if configtarget not in ('', 'p1'):
702 msg = _("""config "devel.bundle.delta" as unknown value: %s""")
703 repo.ui.warn(msg % configtarget)
704
700 deltamode = repository.CG_DELTAMODE_STD
705 deltamode = repository.CG_DELTAMODE_STD
701 if forcedeltaparentprev:
706 if forcedeltaparentprev:
702 deltamode = repository.CG_DELTAMODE_PREV
707 deltamode = repository.CG_DELTAMODE_PREV
708 elif configtarget == 'p1':
709 deltamode = repository.CG_DELTAMODE_P1
703
710
704 revisions = store.emitrevisions(
711 revisions = store.emitrevisions(
705 nodes,
712 nodes,
706 nodesorder=nodesorder,
713 nodesorder=nodesorder,
707 revisiondata=True,
714 revisiondata=True,
708 assumehaveparentrevisions=not ellipses,
715 assumehaveparentrevisions=not ellipses,
709 deltamode=deltamode)
716 deltamode=deltamode)
710
717
711 for i, revision in enumerate(revisions):
718 for i, revision in enumerate(revisions):
712 if progress:
719 if progress:
713 progress.update(i + 1)
720 progress.update(i + 1)
714
721
715 if ellipses:
722 if ellipses:
716 linknode = linknodes[revision.node]
723 linknode = linknodes[revision.node]
717
724
718 if revision.node in adjustedparents:
725 if revision.node in adjustedparents:
719 p1node, p2node = adjustedparents[revision.node]
726 p1node, p2node = adjustedparents[revision.node]
720 revision.p1node = p1node
727 revision.p1node = p1node
721 revision.p2node = p2node
728 revision.p2node = p2node
722 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
729 revision.flags |= repository.REVISION_FLAG_ELLIPSIS
723
730
724 else:
731 else:
725 linknode = lookup(revision.node)
732 linknode = lookup(revision.node)
726
733
727 revision.linknode = linknode
734 revision.linknode = linknode
728 yield revision
735 yield revision
729
736
730 if progress:
737 if progress:
731 progress.complete()
738 progress.complete()
732
739
733 class cgpacker(object):
740 class cgpacker(object):
734 def __init__(self, repo, oldmatcher, matcher, version,
741 def __init__(self, repo, oldmatcher, matcher, version,
735 builddeltaheader, manifestsend,
742 builddeltaheader, manifestsend,
736 forcedeltaparentprev=False,
743 forcedeltaparentprev=False,
737 bundlecaps=None, ellipses=False,
744 bundlecaps=None, ellipses=False,
738 shallow=False, ellipsisroots=None, fullnodes=None):
745 shallow=False, ellipsisroots=None, fullnodes=None):
739 """Given a source repo, construct a bundler.
746 """Given a source repo, construct a bundler.
740
747
741 oldmatcher is a matcher that matches on files the client already has.
748 oldmatcher is a matcher that matches on files the client already has.
742 These will not be included in the changegroup.
749 These will not be included in the changegroup.
743
750
744 matcher is a matcher that matches on files to include in the
751 matcher is a matcher that matches on files to include in the
745 changegroup. Used to facilitate sparse changegroups.
752 changegroup. Used to facilitate sparse changegroups.
746
753
747 forcedeltaparentprev indicates whether delta parents must be against
754 forcedeltaparentprev indicates whether delta parents must be against
748 the previous revision in a delta group. This should only be used for
755 the previous revision in a delta group. This should only be used for
749 compatibility with changegroup version 1.
756 compatibility with changegroup version 1.
750
757
751 builddeltaheader is a callable that constructs the header for a group
758 builddeltaheader is a callable that constructs the header for a group
752 delta.
759 delta.
753
760
754 manifestsend is a chunk to send after manifests have been fully emitted.
761 manifestsend is a chunk to send after manifests have been fully emitted.
755
762
756 ellipses indicates whether ellipsis serving mode is enabled.
763 ellipses indicates whether ellipsis serving mode is enabled.
757
764
758 bundlecaps is optional and can be used to specify the set of
765 bundlecaps is optional and can be used to specify the set of
759 capabilities which can be used to build the bundle. While bundlecaps is
766 capabilities which can be used to build the bundle. While bundlecaps is
760 unused in core Mercurial, extensions rely on this feature to communicate
767 unused in core Mercurial, extensions rely on this feature to communicate
761 capabilities to customize the changegroup packer.
768 capabilities to customize the changegroup packer.
762
769
763 shallow indicates whether shallow data might be sent. The packer may
770 shallow indicates whether shallow data might be sent. The packer may
764 need to pack file contents not introduced by the changes being packed.
771 need to pack file contents not introduced by the changes being packed.
765
772
766 fullnodes is the set of changelog nodes which should not be ellipsis
773 fullnodes is the set of changelog nodes which should not be ellipsis
767 nodes. We store this rather than the set of nodes that should be
774 nodes. We store this rather than the set of nodes that should be
768 ellipsis because for very large histories we expect this to be
775 ellipsis because for very large histories we expect this to be
769 significantly smaller.
776 significantly smaller.
770 """
777 """
771 assert oldmatcher
778 assert oldmatcher
772 assert matcher
779 assert matcher
773 self._oldmatcher = oldmatcher
780 self._oldmatcher = oldmatcher
774 self._matcher = matcher
781 self._matcher = matcher
775
782
776 self.version = version
783 self.version = version
777 self._forcedeltaparentprev = forcedeltaparentprev
784 self._forcedeltaparentprev = forcedeltaparentprev
778 self._builddeltaheader = builddeltaheader
785 self._builddeltaheader = builddeltaheader
779 self._manifestsend = manifestsend
786 self._manifestsend = manifestsend
780 self._ellipses = ellipses
787 self._ellipses = ellipses
781
788
782 # Set of capabilities we can use to build the bundle.
789 # Set of capabilities we can use to build the bundle.
783 if bundlecaps is None:
790 if bundlecaps is None:
784 bundlecaps = set()
791 bundlecaps = set()
785 self._bundlecaps = bundlecaps
792 self._bundlecaps = bundlecaps
786 self._isshallow = shallow
793 self._isshallow = shallow
787 self._fullclnodes = fullnodes
794 self._fullclnodes = fullnodes
788
795
789 # Maps ellipsis revs to their roots at the changelog level.
796 # Maps ellipsis revs to their roots at the changelog level.
790 self._precomputedellipsis = ellipsisroots
797 self._precomputedellipsis = ellipsisroots
791
798
792 self._repo = repo
799 self._repo = repo
793
800
794 if self._repo.ui.verbose and not self._repo.ui.debugflag:
801 if self._repo.ui.verbose and not self._repo.ui.debugflag:
795 self._verbosenote = self._repo.ui.note
802 self._verbosenote = self._repo.ui.note
796 else:
803 else:
797 self._verbosenote = lambda s: None
804 self._verbosenote = lambda s: None
798
805
799 def generate(self, commonrevs, clnodes, fastpathlinkrev, source,
806 def generate(self, commonrevs, clnodes, fastpathlinkrev, source,
800 changelog=True):
807 changelog=True):
801 """Yield a sequence of changegroup byte chunks.
808 """Yield a sequence of changegroup byte chunks.
802 If changelog is False, changelog data won't be added to changegroup
809 If changelog is False, changelog data won't be added to changegroup
803 """
810 """
804
811
805 repo = self._repo
812 repo = self._repo
806 cl = repo.changelog
813 cl = repo.changelog
807
814
808 self._verbosenote(_('uncompressed size of bundle content:\n'))
815 self._verbosenote(_('uncompressed size of bundle content:\n'))
809 size = 0
816 size = 0
810
817
811 clstate, deltas = self._generatechangelog(cl, clnodes)
818 clstate, deltas = self._generatechangelog(cl, clnodes)
812 for delta in deltas:
819 for delta in deltas:
813 if changelog:
820 if changelog:
814 for chunk in _revisiondeltatochunks(delta,
821 for chunk in _revisiondeltatochunks(delta,
815 self._builddeltaheader):
822 self._builddeltaheader):
816 size += len(chunk)
823 size += len(chunk)
817 yield chunk
824 yield chunk
818
825
819 close = closechunk()
826 close = closechunk()
820 size += len(close)
827 size += len(close)
821 yield closechunk()
828 yield closechunk()
822
829
823 self._verbosenote(_('%8.i (changelog)\n') % size)
830 self._verbosenote(_('%8.i (changelog)\n') % size)
824
831
825 clrevorder = clstate['clrevorder']
832 clrevorder = clstate['clrevorder']
826 manifests = clstate['manifests']
833 manifests = clstate['manifests']
827 changedfiles = clstate['changedfiles']
834 changedfiles = clstate['changedfiles']
828
835
829 # We need to make sure that the linkrev in the changegroup refers to
836 # We need to make sure that the linkrev in the changegroup refers to
830 # the first changeset that introduced the manifest or file revision.
837 # the first changeset that introduced the manifest or file revision.
831 # The fastpath is usually safer than the slowpath, because the filelogs
838 # The fastpath is usually safer than the slowpath, because the filelogs
832 # are walked in revlog order.
839 # are walked in revlog order.
833 #
840 #
834 # When taking the slowpath when the manifest revlog uses generaldelta,
841 # When taking the slowpath when the manifest revlog uses generaldelta,
835 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
842 # the manifest may be walked in the "wrong" order. Without 'clrevorder',
836 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
843 # we would get an incorrect linkrev (see fix in cc0ff93d0c0c).
837 #
844 #
838 # When taking the fastpath, we are only vulnerable to reordering
845 # When taking the fastpath, we are only vulnerable to reordering
839 # of the changelog itself. The changelog never uses generaldelta and is
846 # of the changelog itself. The changelog never uses generaldelta and is
840 # never reordered. To handle this case, we simply take the slowpath,
847 # never reordered. To handle this case, we simply take the slowpath,
841 # which already has the 'clrevorder' logic. This was also fixed in
848 # which already has the 'clrevorder' logic. This was also fixed in
842 # cc0ff93d0c0c.
849 # cc0ff93d0c0c.
843
850
844 # Treemanifests don't work correctly with fastpathlinkrev
851 # Treemanifests don't work correctly with fastpathlinkrev
845 # either, because we don't discover which directory nodes to
852 # either, because we don't discover which directory nodes to
846 # send along with files. This could probably be fixed.
853 # send along with files. This could probably be fixed.
847 fastpathlinkrev = fastpathlinkrev and (
854 fastpathlinkrev = fastpathlinkrev and (
848 'treemanifest' not in repo.requirements)
855 'treemanifest' not in repo.requirements)
849
856
850 fnodes = {} # needed file nodes
857 fnodes = {} # needed file nodes
851
858
852 size = 0
859 size = 0
853 it = self.generatemanifests(
860 it = self.generatemanifests(
854 commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
861 commonrevs, clrevorder, fastpathlinkrev, manifests, fnodes, source,
855 clstate['clrevtomanifestrev'])
862 clstate['clrevtomanifestrev'])
856
863
857 for tree, deltas in it:
864 for tree, deltas in it:
858 if tree:
865 if tree:
859 assert self.version == b'03'
866 assert self.version == b'03'
860 chunk = _fileheader(tree)
867 chunk = _fileheader(tree)
861 size += len(chunk)
868 size += len(chunk)
862 yield chunk
869 yield chunk
863
870
864 for delta in deltas:
871 for delta in deltas:
865 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
872 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
866 for chunk in chunks:
873 for chunk in chunks:
867 size += len(chunk)
874 size += len(chunk)
868 yield chunk
875 yield chunk
869
876
870 close = closechunk()
877 close = closechunk()
871 size += len(close)
878 size += len(close)
872 yield close
879 yield close
873
880
874 self._verbosenote(_('%8.i (manifests)\n') % size)
881 self._verbosenote(_('%8.i (manifests)\n') % size)
875 yield self._manifestsend
882 yield self._manifestsend
876
883
877 mfdicts = None
884 mfdicts = None
878 if self._ellipses and self._isshallow:
885 if self._ellipses and self._isshallow:
879 mfdicts = [(self._repo.manifestlog[n].read(), lr)
886 mfdicts = [(self._repo.manifestlog[n].read(), lr)
880 for (n, lr) in manifests.iteritems()]
887 for (n, lr) in manifests.iteritems()]
881
888
882 manifests.clear()
889 manifests.clear()
883 clrevs = set(cl.rev(x) for x in clnodes)
890 clrevs = set(cl.rev(x) for x in clnodes)
884
891
885 it = self.generatefiles(changedfiles, commonrevs,
892 it = self.generatefiles(changedfiles, commonrevs,
886 source, mfdicts, fastpathlinkrev,
893 source, mfdicts, fastpathlinkrev,
887 fnodes, clrevs)
894 fnodes, clrevs)
888
895
889 for path, deltas in it:
896 for path, deltas in it:
890 h = _fileheader(path)
897 h = _fileheader(path)
891 size = len(h)
898 size = len(h)
892 yield h
899 yield h
893
900
894 for delta in deltas:
901 for delta in deltas:
895 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
902 chunks = _revisiondeltatochunks(delta, self._builddeltaheader)
896 for chunk in chunks:
903 for chunk in chunks:
897 size += len(chunk)
904 size += len(chunk)
898 yield chunk
905 yield chunk
899
906
900 close = closechunk()
907 close = closechunk()
901 size += len(close)
908 size += len(close)
902 yield close
909 yield close
903
910
904 self._verbosenote(_('%8.i %s\n') % (size, path))
911 self._verbosenote(_('%8.i %s\n') % (size, path))
905
912
906 yield closechunk()
913 yield closechunk()
907
914
908 if clnodes:
915 if clnodes:
909 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
916 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
910
917
911 def _generatechangelog(self, cl, nodes):
918 def _generatechangelog(self, cl, nodes):
912 """Generate data for changelog chunks.
919 """Generate data for changelog chunks.
913
920
914 Returns a 2-tuple of a dict containing state and an iterable of
921 Returns a 2-tuple of a dict containing state and an iterable of
915 byte chunks. The state will not be fully populated until the
922 byte chunks. The state will not be fully populated until the
916 chunk stream has been fully consumed.
923 chunk stream has been fully consumed.
917 """
924 """
918 clrevorder = {}
925 clrevorder = {}
919 manifests = {}
926 manifests = {}
920 mfl = self._repo.manifestlog
927 mfl = self._repo.manifestlog
921 changedfiles = set()
928 changedfiles = set()
922 clrevtomanifestrev = {}
929 clrevtomanifestrev = {}
923
930
924 # Callback for the changelog, used to collect changed files and
931 # Callback for the changelog, used to collect changed files and
925 # manifest nodes.
932 # manifest nodes.
926 # Returns the linkrev node (identity in the changelog case).
933 # Returns the linkrev node (identity in the changelog case).
927 def lookupcl(x):
934 def lookupcl(x):
928 c = cl.changelogrevision(x)
935 c = cl.changelogrevision(x)
929 clrevorder[x] = len(clrevorder)
936 clrevorder[x] = len(clrevorder)
930
937
931 if self._ellipses:
938 if self._ellipses:
932 # Only update manifests if x is going to be sent. Otherwise we
939 # Only update manifests if x is going to be sent. Otherwise we
933 # end up with bogus linkrevs specified for manifests and
940 # end up with bogus linkrevs specified for manifests and
934 # we skip some manifest nodes that we should otherwise
941 # we skip some manifest nodes that we should otherwise
935 # have sent.
942 # have sent.
936 if (x in self._fullclnodes
943 if (x in self._fullclnodes
937 or cl.rev(x) in self._precomputedellipsis):
944 or cl.rev(x) in self._precomputedellipsis):
938
945
939 manifestnode = c.manifest
946 manifestnode = c.manifest
940 # Record the first changeset introducing this manifest
947 # Record the first changeset introducing this manifest
941 # version.
948 # version.
942 manifests.setdefault(manifestnode, x)
949 manifests.setdefault(manifestnode, x)
943 # Set this narrow-specific dict so we have the lowest
950 # Set this narrow-specific dict so we have the lowest
944 # manifest revnum to look up for this cl revnum. (Part of
951 # manifest revnum to look up for this cl revnum. (Part of
945 # mapping changelog ellipsis parents to manifest ellipsis
952 # mapping changelog ellipsis parents to manifest ellipsis
946 # parents)
953 # parents)
947 clrevtomanifestrev.setdefault(
954 clrevtomanifestrev.setdefault(
948 cl.rev(x), mfl.rev(manifestnode))
955 cl.rev(x), mfl.rev(manifestnode))
949 # We can't trust the changed files list in the changeset if the
956 # We can't trust the changed files list in the changeset if the
950 # client requested a shallow clone.
957 # client requested a shallow clone.
951 if self._isshallow:
958 if self._isshallow:
952 changedfiles.update(mfl[c.manifest].read().keys())
959 changedfiles.update(mfl[c.manifest].read().keys())
953 else:
960 else:
954 changedfiles.update(c.files)
961 changedfiles.update(c.files)
955 else:
962 else:
956 # record the first changeset introducing this manifest version
963 # record the first changeset introducing this manifest version
957 manifests.setdefault(c.manifest, x)
964 manifests.setdefault(c.manifest, x)
958 # Record a complete list of potentially-changed files in
965 # Record a complete list of potentially-changed files in
959 # this manifest.
966 # this manifest.
960 changedfiles.update(c.files)
967 changedfiles.update(c.files)
961
968
962 return x
969 return x
963
970
964 state = {
971 state = {
965 'clrevorder': clrevorder,
972 'clrevorder': clrevorder,
966 'manifests': manifests,
973 'manifests': manifests,
967 'changedfiles': changedfiles,
974 'changedfiles': changedfiles,
968 'clrevtomanifestrev': clrevtomanifestrev,
975 'clrevtomanifestrev': clrevtomanifestrev,
969 }
976 }
970
977
971 gen = deltagroup(
978 gen = deltagroup(
972 self._repo, cl, nodes, True, lookupcl,
979 self._repo, cl, nodes, True, lookupcl,
973 self._forcedeltaparentprev,
980 self._forcedeltaparentprev,
974 ellipses=self._ellipses,
981 ellipses=self._ellipses,
975 topic=_('changesets'),
982 topic=_('changesets'),
976 clrevtolocalrev={},
983 clrevtolocalrev={},
977 fullclnodes=self._fullclnodes,
984 fullclnodes=self._fullclnodes,
978 precomputedellipsis=self._precomputedellipsis)
985 precomputedellipsis=self._precomputedellipsis)
979
986
980 return state, gen
987 return state, gen
981
988
982 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
989 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev,
983 manifests, fnodes, source, clrevtolocalrev):
990 manifests, fnodes, source, clrevtolocalrev):
984 """Returns an iterator of changegroup chunks containing manifests.
991 """Returns an iterator of changegroup chunks containing manifests.
985
992
986 `source` is unused here, but is used by extensions like remotefilelog to
993 `source` is unused here, but is used by extensions like remotefilelog to
987 change what is sent based in pulls vs pushes, etc.
994 change what is sent based in pulls vs pushes, etc.
988 """
995 """
989 repo = self._repo
996 repo = self._repo
990 mfl = repo.manifestlog
997 mfl = repo.manifestlog
991 tmfnodes = {'': manifests}
998 tmfnodes = {'': manifests}
992
999
993 # Callback for the manifest, used to collect linkrevs for filelog
1000 # Callback for the manifest, used to collect linkrevs for filelog
994 # revisions.
1001 # revisions.
995 # Returns the linkrev node (collected in lookupcl).
1002 # Returns the linkrev node (collected in lookupcl).
996 def makelookupmflinknode(tree, nodes):
1003 def makelookupmflinknode(tree, nodes):
997 if fastpathlinkrev:
1004 if fastpathlinkrev:
998 assert not tree
1005 assert not tree
999 return manifests.__getitem__
1006 return manifests.__getitem__
1000
1007
1001 def lookupmflinknode(x):
1008 def lookupmflinknode(x):
1002 """Callback for looking up the linknode for manifests.
1009 """Callback for looking up the linknode for manifests.
1003
1010
1004 Returns the linkrev node for the specified manifest.
1011 Returns the linkrev node for the specified manifest.
1005
1012
1006 SIDE EFFECT:
1013 SIDE EFFECT:
1007
1014
1008 1) fclnodes gets populated with the list of relevant
1015 1) fclnodes gets populated with the list of relevant
1009 file nodes if we're not using fastpathlinkrev
1016 file nodes if we're not using fastpathlinkrev
1010 2) When treemanifests are in use, collects treemanifest nodes
1017 2) When treemanifests are in use, collects treemanifest nodes
1011 to send
1018 to send
1012
1019
1013 Note that this means manifests must be completely sent to
1020 Note that this means manifests must be completely sent to
1014 the client before you can trust the list of files and
1021 the client before you can trust the list of files and
1015 treemanifests to send.
1022 treemanifests to send.
1016 """
1023 """
1017 clnode = nodes[x]
1024 clnode = nodes[x]
1018 mdata = mfl.get(tree, x).readfast(shallow=True)
1025 mdata = mfl.get(tree, x).readfast(shallow=True)
1019 for p, n, fl in mdata.iterentries():
1026 for p, n, fl in mdata.iterentries():
1020 if fl == 't': # subdirectory manifest
1027 if fl == 't': # subdirectory manifest
1021 subtree = tree + p + '/'
1028 subtree = tree + p + '/'
1022 tmfclnodes = tmfnodes.setdefault(subtree, {})
1029 tmfclnodes = tmfnodes.setdefault(subtree, {})
1023 tmfclnode = tmfclnodes.setdefault(n, clnode)
1030 tmfclnode = tmfclnodes.setdefault(n, clnode)
1024 if clrevorder[clnode] < clrevorder[tmfclnode]:
1031 if clrevorder[clnode] < clrevorder[tmfclnode]:
1025 tmfclnodes[n] = clnode
1032 tmfclnodes[n] = clnode
1026 else:
1033 else:
1027 f = tree + p
1034 f = tree + p
1028 fclnodes = fnodes.setdefault(f, {})
1035 fclnodes = fnodes.setdefault(f, {})
1029 fclnode = fclnodes.setdefault(n, clnode)
1036 fclnode = fclnodes.setdefault(n, clnode)
1030 if clrevorder[clnode] < clrevorder[fclnode]:
1037 if clrevorder[clnode] < clrevorder[fclnode]:
1031 fclnodes[n] = clnode
1038 fclnodes[n] = clnode
1032 return clnode
1039 return clnode
1033 return lookupmflinknode
1040 return lookupmflinknode
1034
1041
1035 while tmfnodes:
1042 while tmfnodes:
1036 tree, nodes = tmfnodes.popitem()
1043 tree, nodes = tmfnodes.popitem()
1037 store = mfl.getstorage(tree)
1044 store = mfl.getstorage(tree)
1038
1045
1039 if not self._matcher.visitdir(store.tree[:-1] or '.'):
1046 if not self._matcher.visitdir(store.tree[:-1] or '.'):
1040 # No nodes to send because this directory is out of
1047 # No nodes to send because this directory is out of
1041 # the client's view of the repository (probably
1048 # the client's view of the repository (probably
1042 # because of narrow clones).
1049 # because of narrow clones).
1043 prunednodes = []
1050 prunednodes = []
1044 else:
1051 else:
1045 # Avoid sending any manifest nodes we can prove the
1052 # Avoid sending any manifest nodes we can prove the
1046 # client already has by checking linkrevs. See the
1053 # client already has by checking linkrevs. See the
1047 # related comment in generatefiles().
1054 # related comment in generatefiles().
1048 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1055 prunednodes = self._prunemanifests(store, nodes, commonrevs)
1049 if tree and not prunednodes:
1056 if tree and not prunednodes:
1050 continue
1057 continue
1051
1058
1052 lookupfn = makelookupmflinknode(tree, nodes)
1059 lookupfn = makelookupmflinknode(tree, nodes)
1053
1060
1054 deltas = deltagroup(
1061 deltas = deltagroup(
1055 self._repo, store, prunednodes, False, lookupfn,
1062 self._repo, store, prunednodes, False, lookupfn,
1056 self._forcedeltaparentprev,
1063 self._forcedeltaparentprev,
1057 ellipses=self._ellipses,
1064 ellipses=self._ellipses,
1058 topic=_('manifests'),
1065 topic=_('manifests'),
1059 clrevtolocalrev=clrevtolocalrev,
1066 clrevtolocalrev=clrevtolocalrev,
1060 fullclnodes=self._fullclnodes,
1067 fullclnodes=self._fullclnodes,
1061 precomputedellipsis=self._precomputedellipsis)
1068 precomputedellipsis=self._precomputedellipsis)
1062
1069
1063 if not self._oldmatcher.visitdir(store.tree[:-1] or '.'):
1070 if not self._oldmatcher.visitdir(store.tree[:-1] or '.'):
1064 yield tree, deltas
1071 yield tree, deltas
1065 else:
1072 else:
1066 # 'deltas' is a generator and we need to consume it even if
1073 # 'deltas' is a generator and we need to consume it even if
1067 # we are not going to send it because a side-effect is that
1074 # we are not going to send it because a side-effect is that
1068 # it updates tmdnodes (via lookupfn)
1075 # it updates tmdnodes (via lookupfn)
1069 for d in deltas:
1076 for d in deltas:
1070 pass
1077 pass
1071 if not tree:
1078 if not tree:
1072 yield tree, []
1079 yield tree, []
1073
1080
1074 def _prunemanifests(self, store, nodes, commonrevs):
1081 def _prunemanifests(self, store, nodes, commonrevs):
1075 # This is split out as a separate method to allow filtering
1082 # This is split out as a separate method to allow filtering
1076 # commonrevs in extension code.
1083 # commonrevs in extension code.
1077 #
1084 #
1078 # TODO(augie): this shouldn't be required, instead we should
1085 # TODO(augie): this shouldn't be required, instead we should
1079 # make filtering of revisions to send delegated to the store
1086 # make filtering of revisions to send delegated to the store
1080 # layer.
1087 # layer.
1081 frev, flr = store.rev, store.linkrev
1088 frev, flr = store.rev, store.linkrev
1082 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1089 return [n for n in nodes if flr(frev(n)) not in commonrevs]
1083
1090
1084 # The 'source' parameter is useful for extensions
1091 # The 'source' parameter is useful for extensions
1085 def generatefiles(self, changedfiles, commonrevs, source,
1092 def generatefiles(self, changedfiles, commonrevs, source,
1086 mfdicts, fastpathlinkrev, fnodes, clrevs):
1093 mfdicts, fastpathlinkrev, fnodes, clrevs):
1087 changedfiles = [f for f in changedfiles
1094 changedfiles = [f for f in changedfiles
1088 if self._matcher(f) and not self._oldmatcher(f)]
1095 if self._matcher(f) and not self._oldmatcher(f)]
1089
1096
1090 if not fastpathlinkrev:
1097 if not fastpathlinkrev:
1091 def normallinknodes(unused, fname):
1098 def normallinknodes(unused, fname):
1092 return fnodes.get(fname, {})
1099 return fnodes.get(fname, {})
1093 else:
1100 else:
1094 cln = self._repo.changelog.node
1101 cln = self._repo.changelog.node
1095
1102
1096 def normallinknodes(store, fname):
1103 def normallinknodes(store, fname):
1097 flinkrev = store.linkrev
1104 flinkrev = store.linkrev
1098 fnode = store.node
1105 fnode = store.node
1099 revs = ((r, flinkrev(r)) for r in store)
1106 revs = ((r, flinkrev(r)) for r in store)
1100 return dict((fnode(r), cln(lr))
1107 return dict((fnode(r), cln(lr))
1101 for r, lr in revs if lr in clrevs)
1108 for r, lr in revs if lr in clrevs)
1102
1109
1103 clrevtolocalrev = {}
1110 clrevtolocalrev = {}
1104
1111
1105 if self._isshallow:
1112 if self._isshallow:
1106 # In a shallow clone, the linknodes callback needs to also include
1113 # In a shallow clone, the linknodes callback needs to also include
1107 # those file nodes that are in the manifests we sent but weren't
1114 # those file nodes that are in the manifests we sent but weren't
1108 # introduced by those manifests.
1115 # introduced by those manifests.
1109 commonctxs = [self._repo[c] for c in commonrevs]
1116 commonctxs = [self._repo[c] for c in commonrevs]
1110 clrev = self._repo.changelog.rev
1117 clrev = self._repo.changelog.rev
1111
1118
1112 def linknodes(flog, fname):
1119 def linknodes(flog, fname):
1113 for c in commonctxs:
1120 for c in commonctxs:
1114 try:
1121 try:
1115 fnode = c.filenode(fname)
1122 fnode = c.filenode(fname)
1116 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1123 clrevtolocalrev[c.rev()] = flog.rev(fnode)
1117 except error.ManifestLookupError:
1124 except error.ManifestLookupError:
1118 pass
1125 pass
1119 links = normallinknodes(flog, fname)
1126 links = normallinknodes(flog, fname)
1120 if len(links) != len(mfdicts):
1127 if len(links) != len(mfdicts):
1121 for mf, lr in mfdicts:
1128 for mf, lr in mfdicts:
1122 fnode = mf.get(fname, None)
1129 fnode = mf.get(fname, None)
1123 if fnode in links:
1130 if fnode in links:
1124 links[fnode] = min(links[fnode], lr, key=clrev)
1131 links[fnode] = min(links[fnode], lr, key=clrev)
1125 elif fnode:
1132 elif fnode:
1126 links[fnode] = lr
1133 links[fnode] = lr
1127 return links
1134 return links
1128 else:
1135 else:
1129 linknodes = normallinknodes
1136 linknodes = normallinknodes
1130
1137
1131 repo = self._repo
1138 repo = self._repo
1132 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1139 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1133 total=len(changedfiles))
1140 total=len(changedfiles))
1134 for i, fname in enumerate(sorted(changedfiles)):
1141 for i, fname in enumerate(sorted(changedfiles)):
1135 filerevlog = repo.file(fname)
1142 filerevlog = repo.file(fname)
1136 if not filerevlog:
1143 if not filerevlog:
1137 raise error.Abort(_("empty or missing file data for %s") %
1144 raise error.Abort(_("empty or missing file data for %s") %
1138 fname)
1145 fname)
1139
1146
1140 clrevtolocalrev.clear()
1147 clrevtolocalrev.clear()
1141
1148
1142 linkrevnodes = linknodes(filerevlog, fname)
1149 linkrevnodes = linknodes(filerevlog, fname)
1143 # Lookup for filenodes, we collected the linkrev nodes above in the
1150 # Lookup for filenodes, we collected the linkrev nodes above in the
1144 # fastpath case and with lookupmf in the slowpath case.
1151 # fastpath case and with lookupmf in the slowpath case.
1145 def lookupfilelog(x):
1152 def lookupfilelog(x):
1146 return linkrevnodes[x]
1153 return linkrevnodes[x]
1147
1154
1148 frev, flr = filerevlog.rev, filerevlog.linkrev
1155 frev, flr = filerevlog.rev, filerevlog.linkrev
1149 # Skip sending any filenode we know the client already
1156 # Skip sending any filenode we know the client already
1150 # has. This avoids over-sending files relatively
1157 # has. This avoids over-sending files relatively
1151 # inexpensively, so it's not a problem if we under-filter
1158 # inexpensively, so it's not a problem if we under-filter
1152 # here.
1159 # here.
1153 filenodes = [n for n in linkrevnodes
1160 filenodes = [n for n in linkrevnodes
1154 if flr(frev(n)) not in commonrevs]
1161 if flr(frev(n)) not in commonrevs]
1155
1162
1156 if not filenodes:
1163 if not filenodes:
1157 continue
1164 continue
1158
1165
1159 progress.update(i + 1, item=fname)
1166 progress.update(i + 1, item=fname)
1160
1167
1161 deltas = deltagroup(
1168 deltas = deltagroup(
1162 self._repo, filerevlog, filenodes, False, lookupfilelog,
1169 self._repo, filerevlog, filenodes, False, lookupfilelog,
1163 self._forcedeltaparentprev,
1170 self._forcedeltaparentprev,
1164 ellipses=self._ellipses,
1171 ellipses=self._ellipses,
1165 clrevtolocalrev=clrevtolocalrev,
1172 clrevtolocalrev=clrevtolocalrev,
1166 fullclnodes=self._fullclnodes,
1173 fullclnodes=self._fullclnodes,
1167 precomputedellipsis=self._precomputedellipsis)
1174 precomputedellipsis=self._precomputedellipsis)
1168
1175
1169 yield fname, deltas
1176 yield fname, deltas
1170
1177
1171 progress.complete()
1178 progress.complete()
1172
1179
1173 def _makecg1packer(repo, oldmatcher, matcher, bundlecaps,
1180 def _makecg1packer(repo, oldmatcher, matcher, bundlecaps,
1174 ellipses=False, shallow=False, ellipsisroots=None,
1181 ellipses=False, shallow=False, ellipsisroots=None,
1175 fullnodes=None):
1182 fullnodes=None):
1176 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1183 builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack(
1177 d.node, d.p1node, d.p2node, d.linknode)
1184 d.node, d.p1node, d.p2node, d.linknode)
1178
1185
1179 return cgpacker(repo, oldmatcher, matcher, b'01',
1186 return cgpacker(repo, oldmatcher, matcher, b'01',
1180 builddeltaheader=builddeltaheader,
1187 builddeltaheader=builddeltaheader,
1181 manifestsend=b'',
1188 manifestsend=b'',
1182 forcedeltaparentprev=True,
1189 forcedeltaparentprev=True,
1183 bundlecaps=bundlecaps,
1190 bundlecaps=bundlecaps,
1184 ellipses=ellipses,
1191 ellipses=ellipses,
1185 shallow=shallow,
1192 shallow=shallow,
1186 ellipsisroots=ellipsisroots,
1193 ellipsisroots=ellipsisroots,
1187 fullnodes=fullnodes)
1194 fullnodes=fullnodes)
1188
1195
1189 def _makecg2packer(repo, oldmatcher, matcher, bundlecaps,
1196 def _makecg2packer(repo, oldmatcher, matcher, bundlecaps,
1190 ellipses=False, shallow=False, ellipsisroots=None,
1197 ellipses=False, shallow=False, ellipsisroots=None,
1191 fullnodes=None):
1198 fullnodes=None):
1192 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1199 builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack(
1193 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1200 d.node, d.p1node, d.p2node, d.basenode, d.linknode)
1194
1201
1195 return cgpacker(repo, oldmatcher, matcher, b'02',
1202 return cgpacker(repo, oldmatcher, matcher, b'02',
1196 builddeltaheader=builddeltaheader,
1203 builddeltaheader=builddeltaheader,
1197 manifestsend=b'',
1204 manifestsend=b'',
1198 bundlecaps=bundlecaps,
1205 bundlecaps=bundlecaps,
1199 ellipses=ellipses,
1206 ellipses=ellipses,
1200 shallow=shallow,
1207 shallow=shallow,
1201 ellipsisroots=ellipsisroots,
1208 ellipsisroots=ellipsisroots,
1202 fullnodes=fullnodes)
1209 fullnodes=fullnodes)
1203
1210
1204 def _makecg3packer(repo, oldmatcher, matcher, bundlecaps,
1211 def _makecg3packer(repo, oldmatcher, matcher, bundlecaps,
1205 ellipses=False, shallow=False, ellipsisroots=None,
1212 ellipses=False, shallow=False, ellipsisroots=None,
1206 fullnodes=None):
1213 fullnodes=None):
1207 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1214 builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
1208 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1215 d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags)
1209
1216
1210 return cgpacker(repo, oldmatcher, matcher, b'03',
1217 return cgpacker(repo, oldmatcher, matcher, b'03',
1211 builddeltaheader=builddeltaheader,
1218 builddeltaheader=builddeltaheader,
1212 manifestsend=closechunk(),
1219 manifestsend=closechunk(),
1213 bundlecaps=bundlecaps,
1220 bundlecaps=bundlecaps,
1214 ellipses=ellipses,
1221 ellipses=ellipses,
1215 shallow=shallow,
1222 shallow=shallow,
1216 ellipsisroots=ellipsisroots,
1223 ellipsisroots=ellipsisroots,
1217 fullnodes=fullnodes)
1224 fullnodes=fullnodes)
1218
1225
1219 _packermap = {'01': (_makecg1packer, cg1unpacker),
1226 _packermap = {'01': (_makecg1packer, cg1unpacker),
1220 # cg2 adds support for exchanging generaldelta
1227 # cg2 adds support for exchanging generaldelta
1221 '02': (_makecg2packer, cg2unpacker),
1228 '02': (_makecg2packer, cg2unpacker),
1222 # cg3 adds support for exchanging revlog flags and treemanifests
1229 # cg3 adds support for exchanging revlog flags and treemanifests
1223 '03': (_makecg3packer, cg3unpacker),
1230 '03': (_makecg3packer, cg3unpacker),
1224 }
1231 }
1225
1232
1226 def allsupportedversions(repo):
1233 def allsupportedversions(repo):
1227 versions = set(_packermap.keys())
1234 versions = set(_packermap.keys())
1228 if not (repo.ui.configbool('experimental', 'changegroup3') or
1235 if not (repo.ui.configbool('experimental', 'changegroup3') or
1229 repo.ui.configbool('experimental', 'treemanifest') or
1236 repo.ui.configbool('experimental', 'treemanifest') or
1230 'treemanifest' in repo.requirements):
1237 'treemanifest' in repo.requirements):
1231 versions.discard('03')
1238 versions.discard('03')
1232 return versions
1239 return versions
1233
1240
1234 # Changegroup versions that can be applied to the repo
1241 # Changegroup versions that can be applied to the repo
1235 def supportedincomingversions(repo):
1242 def supportedincomingversions(repo):
1236 return allsupportedversions(repo)
1243 return allsupportedversions(repo)
1237
1244
1238 # Changegroup versions that can be created from the repo
1245 # Changegroup versions that can be created from the repo
1239 def supportedoutgoingversions(repo):
1246 def supportedoutgoingversions(repo):
1240 versions = allsupportedversions(repo)
1247 versions = allsupportedversions(repo)
1241 if 'treemanifest' in repo.requirements:
1248 if 'treemanifest' in repo.requirements:
1242 # Versions 01 and 02 support only flat manifests and it's just too
1249 # Versions 01 and 02 support only flat manifests and it's just too
1243 # expensive to convert between the flat manifest and tree manifest on
1250 # expensive to convert between the flat manifest and tree manifest on
1244 # the fly. Since tree manifests are hashed differently, all of history
1251 # the fly. Since tree manifests are hashed differently, all of history
1245 # would have to be converted. Instead, we simply don't even pretend to
1252 # would have to be converted. Instead, we simply don't even pretend to
1246 # support versions 01 and 02.
1253 # support versions 01 and 02.
1247 versions.discard('01')
1254 versions.discard('01')
1248 versions.discard('02')
1255 versions.discard('02')
1249 if repository.NARROW_REQUIREMENT in repo.requirements:
1256 if repository.NARROW_REQUIREMENT in repo.requirements:
1250 # Versions 01 and 02 don't support revlog flags, and we need to
1257 # Versions 01 and 02 don't support revlog flags, and we need to
1251 # support that for stripping and unbundling to work.
1258 # support that for stripping and unbundling to work.
1252 versions.discard('01')
1259 versions.discard('01')
1253 versions.discard('02')
1260 versions.discard('02')
1254 if LFS_REQUIREMENT in repo.requirements:
1261 if LFS_REQUIREMENT in repo.requirements:
1255 # Versions 01 and 02 don't support revlog flags, and we need to
1262 # Versions 01 and 02 don't support revlog flags, and we need to
1256 # mark LFS entries with REVIDX_EXTSTORED.
1263 # mark LFS entries with REVIDX_EXTSTORED.
1257 versions.discard('01')
1264 versions.discard('01')
1258 versions.discard('02')
1265 versions.discard('02')
1259
1266
1260 return versions
1267 return versions
1261
1268
1262 def localversion(repo):
1269 def localversion(repo):
1263 # Finds the best version to use for bundles that are meant to be used
1270 # Finds the best version to use for bundles that are meant to be used
1264 # locally, such as those from strip and shelve, and temporary bundles.
1271 # locally, such as those from strip and shelve, and temporary bundles.
1265 return max(supportedoutgoingversions(repo))
1272 return max(supportedoutgoingversions(repo))
1266
1273
1267 def safeversion(repo):
1274 def safeversion(repo):
1268 # Finds the smallest version that it's safe to assume clients of the repo
1275 # Finds the smallest version that it's safe to assume clients of the repo
1269 # will support. For example, all hg versions that support generaldelta also
1276 # will support. For example, all hg versions that support generaldelta also
1270 # support changegroup 02.
1277 # support changegroup 02.
1271 versions = supportedoutgoingversions(repo)
1278 versions = supportedoutgoingversions(repo)
1272 if 'generaldelta' in repo.requirements:
1279 if 'generaldelta' in repo.requirements:
1273 versions.discard('01')
1280 versions.discard('01')
1274 assert versions
1281 assert versions
1275 return min(versions)
1282 return min(versions)
1276
1283
1277 def getbundler(version, repo, bundlecaps=None, oldmatcher=None,
1284 def getbundler(version, repo, bundlecaps=None, oldmatcher=None,
1278 matcher=None, ellipses=False, shallow=False,
1285 matcher=None, ellipses=False, shallow=False,
1279 ellipsisroots=None, fullnodes=None):
1286 ellipsisroots=None, fullnodes=None):
1280 assert version in supportedoutgoingversions(repo)
1287 assert version in supportedoutgoingversions(repo)
1281
1288
1282 if matcher is None:
1289 if matcher is None:
1283 matcher = matchmod.alwaysmatcher(repo.root, '')
1290 matcher = matchmod.alwaysmatcher(repo.root, '')
1284 if oldmatcher is None:
1291 if oldmatcher is None:
1285 oldmatcher = matchmod.nevermatcher(repo.root, '')
1292 oldmatcher = matchmod.nevermatcher(repo.root, '')
1286
1293
1287 if version == '01' and not matcher.always():
1294 if version == '01' and not matcher.always():
1288 raise error.ProgrammingError('version 01 changegroups do not support '
1295 raise error.ProgrammingError('version 01 changegroups do not support '
1289 'sparse file matchers')
1296 'sparse file matchers')
1290
1297
1291 if ellipses and version in (b'01', b'02'):
1298 if ellipses and version in (b'01', b'02'):
1292 raise error.Abort(
1299 raise error.Abort(
1293 _('ellipsis nodes require at least cg3 on client and server, '
1300 _('ellipsis nodes require at least cg3 on client and server, '
1294 'but negotiated version %s') % version)
1301 'but negotiated version %s') % version)
1295
1302
1296 # Requested files could include files not in the local store. So
1303 # Requested files could include files not in the local store. So
1297 # filter those out.
1304 # filter those out.
1298 matcher = repo.narrowmatch(matcher)
1305 matcher = repo.narrowmatch(matcher)
1299
1306
1300 fn = _packermap[version][0]
1307 fn = _packermap[version][0]
1301 return fn(repo, oldmatcher, matcher, bundlecaps, ellipses=ellipses,
1308 return fn(repo, oldmatcher, matcher, bundlecaps, ellipses=ellipses,
1302 shallow=shallow, ellipsisroots=ellipsisroots,
1309 shallow=shallow, ellipsisroots=ellipsisroots,
1303 fullnodes=fullnodes)
1310 fullnodes=fullnodes)
1304
1311
1305 def getunbundler(version, fh, alg, extras=None):
1312 def getunbundler(version, fh, alg, extras=None):
1306 return _packermap[version][1](fh, alg, extras=extras)
1313 return _packermap[version][1](fh, alg, extras=extras)
1307
1314
1308 def _changegroupinfo(repo, nodes, source):
1315 def _changegroupinfo(repo, nodes, source):
1309 if repo.ui.verbose or source == 'bundle':
1316 if repo.ui.verbose or source == 'bundle':
1310 repo.ui.status(_("%d changesets found\n") % len(nodes))
1317 repo.ui.status(_("%d changesets found\n") % len(nodes))
1311 if repo.ui.debugflag:
1318 if repo.ui.debugflag:
1312 repo.ui.debug("list of changesets:\n")
1319 repo.ui.debug("list of changesets:\n")
1313 for node in nodes:
1320 for node in nodes:
1314 repo.ui.debug("%s\n" % hex(node))
1321 repo.ui.debug("%s\n" % hex(node))
1315
1322
1316 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1323 def makechangegroup(repo, outgoing, version, source, fastpath=False,
1317 bundlecaps=None):
1324 bundlecaps=None):
1318 cgstream = makestream(repo, outgoing, version, source,
1325 cgstream = makestream(repo, outgoing, version, source,
1319 fastpath=fastpath, bundlecaps=bundlecaps)
1326 fastpath=fastpath, bundlecaps=bundlecaps)
1320 return getunbundler(version, util.chunkbuffer(cgstream), None,
1327 return getunbundler(version, util.chunkbuffer(cgstream), None,
1321 {'clcount': len(outgoing.missing) })
1328 {'clcount': len(outgoing.missing) })
1322
1329
1323 def makestream(repo, outgoing, version, source, fastpath=False,
1330 def makestream(repo, outgoing, version, source, fastpath=False,
1324 bundlecaps=None, matcher=None):
1331 bundlecaps=None, matcher=None):
1325 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1332 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
1326 matcher=matcher)
1333 matcher=matcher)
1327
1334
1328 repo = repo.unfiltered()
1335 repo = repo.unfiltered()
1329 commonrevs = outgoing.common
1336 commonrevs = outgoing.common
1330 csets = outgoing.missing
1337 csets = outgoing.missing
1331 heads = outgoing.missingheads
1338 heads = outgoing.missingheads
1332 # We go through the fast path if we get told to, or if all (unfiltered
1339 # We go through the fast path if we get told to, or if all (unfiltered
1333 # heads have been requested (since we then know there all linkrevs will
1340 # heads have been requested (since we then know there all linkrevs will
1334 # be pulled by the client).
1341 # be pulled by the client).
1335 heads.sort()
1342 heads.sort()
1336 fastpathlinkrev = fastpath or (
1343 fastpathlinkrev = fastpath or (
1337 repo.filtername is None and heads == sorted(repo.heads()))
1344 repo.filtername is None and heads == sorted(repo.heads()))
1338
1345
1339 repo.hook('preoutgoing', throw=True, source=source)
1346 repo.hook('preoutgoing', throw=True, source=source)
1340 _changegroupinfo(repo, csets, source)
1347 _changegroupinfo(repo, csets, source)
1341 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1348 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1342
1349
1343 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1350 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1344 revisions = 0
1351 revisions = 0
1345 files = 0
1352 files = 0
1346 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1353 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1347 total=expectedfiles)
1354 total=expectedfiles)
1348 for chunkdata in iter(source.filelogheader, {}):
1355 for chunkdata in iter(source.filelogheader, {}):
1349 files += 1
1356 files += 1
1350 f = chunkdata["filename"]
1357 f = chunkdata["filename"]
1351 repo.ui.debug("adding %s revisions\n" % f)
1358 repo.ui.debug("adding %s revisions\n" % f)
1352 progress.increment()
1359 progress.increment()
1353 fl = repo.file(f)
1360 fl = repo.file(f)
1354 o = len(fl)
1361 o = len(fl)
1355 try:
1362 try:
1356 deltas = source.deltaiter()
1363 deltas = source.deltaiter()
1357 if not fl.addgroup(deltas, revmap, trp):
1364 if not fl.addgroup(deltas, revmap, trp):
1358 raise error.Abort(_("received file revlog group is empty"))
1365 raise error.Abort(_("received file revlog group is empty"))
1359 except error.CensoredBaseError as e:
1366 except error.CensoredBaseError as e:
1360 raise error.Abort(_("received delta base is censored: %s") % e)
1367 raise error.Abort(_("received delta base is censored: %s") % e)
1361 revisions += len(fl) - o
1368 revisions += len(fl) - o
1362 if f in needfiles:
1369 if f in needfiles:
1363 needs = needfiles[f]
1370 needs = needfiles[f]
1364 for new in pycompat.xrange(o, len(fl)):
1371 for new in pycompat.xrange(o, len(fl)):
1365 n = fl.node(new)
1372 n = fl.node(new)
1366 if n in needs:
1373 if n in needs:
1367 needs.remove(n)
1374 needs.remove(n)
1368 else:
1375 else:
1369 raise error.Abort(
1376 raise error.Abort(
1370 _("received spurious file revlog entry"))
1377 _("received spurious file revlog entry"))
1371 if not needs:
1378 if not needs:
1372 del needfiles[f]
1379 del needfiles[f]
1373 progress.complete()
1380 progress.complete()
1374
1381
1375 for f, needs in needfiles.iteritems():
1382 for f, needs in needfiles.iteritems():
1376 fl = repo.file(f)
1383 fl = repo.file(f)
1377 for n in needs:
1384 for n in needs:
1378 try:
1385 try:
1379 fl.rev(n)
1386 fl.rev(n)
1380 except error.LookupError:
1387 except error.LookupError:
1381 raise error.Abort(
1388 raise error.Abort(
1382 _('missing file data for %s:%s - run hg verify') %
1389 _('missing file data for %s:%s - run hg verify') %
1383 (f, hex(n)))
1390 (f, hex(n)))
1384
1391
1385 return revisions, files
1392 return revisions, files
@@ -1,1435 +1,1438
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in sorted(configtable.items()):
20 for section, items in sorted(configtable.items()):
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg = "extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config='warn-config')
27 ui.develwarn(msg, config='warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31 class configitem(object):
31 class configitem(object):
32 """represent a known config item
32 """represent a known config item
33
33
34 :section: the official config section where to find this item,
34 :section: the official config section where to find this item,
35 :name: the official name within the section,
35 :name: the official name within the section,
36 :default: default value for this item,
36 :default: default value for this item,
37 :alias: optional list of tuples as alternatives,
37 :alias: optional list of tuples as alternatives,
38 :generic: this is a generic definition, match name using regular expression.
38 :generic: this is a generic definition, match name using regular expression.
39 """
39 """
40
40
41 def __init__(self, section, name, default=None, alias=(),
41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0):
42 generic=False, priority=0):
43 self.section = section
43 self.section = section
44 self.name = name
44 self.name = name
45 self.default = default
45 self.default = default
46 self.alias = list(alias)
46 self.alias = list(alias)
47 self.generic = generic
47 self.generic = generic
48 self.priority = priority
48 self.priority = priority
49 self._re = None
49 self._re = None
50 if generic:
50 if generic:
51 self._re = re.compile(self.name)
51 self._re = re.compile(self.name)
52
52
53 class itemregister(dict):
53 class itemregister(dict):
54 """A specialized dictionary that can handle wild-card selection"""
54 """A specialized dictionary that can handle wild-card selection"""
55
55
56 def __init__(self):
56 def __init__(self):
57 super(itemregister, self).__init__()
57 super(itemregister, self).__init__()
58 self._generics = set()
58 self._generics = set()
59
59
60 def update(self, other):
60 def update(self, other):
61 super(itemregister, self).update(other)
61 super(itemregister, self).update(other)
62 self._generics.update(other._generics)
62 self._generics.update(other._generics)
63
63
64 def __setitem__(self, key, item):
64 def __setitem__(self, key, item):
65 super(itemregister, self).__setitem__(key, item)
65 super(itemregister, self).__setitem__(key, item)
66 if item.generic:
66 if item.generic:
67 self._generics.add(item)
67 self._generics.add(item)
68
68
69 def get(self, key):
69 def get(self, key):
70 baseitem = super(itemregister, self).get(key)
70 baseitem = super(itemregister, self).get(key)
71 if baseitem is not None and not baseitem.generic:
71 if baseitem is not None and not baseitem.generic:
72 return baseitem
72 return baseitem
73
73
74 # search for a matching generic item
74 # search for a matching generic item
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 for item in generics:
76 for item in generics:
77 # we use 'match' instead of 'search' to make the matching simpler
77 # we use 'match' instead of 'search' to make the matching simpler
78 # for people unfamiliar with regular expression. Having the match
78 # for people unfamiliar with regular expression. Having the match
79 # rooted to the start of the string will produce less surprising
79 # rooted to the start of the string will produce less surprising
80 # result for user writing simple regex for sub-attribute.
80 # result for user writing simple regex for sub-attribute.
81 #
81 #
82 # For example using "color\..*" match produces an unsurprising
82 # For example using "color\..*" match produces an unsurprising
83 # result, while using search could suddenly match apparently
83 # result, while using search could suddenly match apparently
84 # unrelated configuration that happens to contains "color."
84 # unrelated configuration that happens to contains "color."
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # some match to avoid the need to prefix most pattern with "^".
86 # some match to avoid the need to prefix most pattern with "^".
87 # The "^" seems more error prone.
87 # The "^" seems more error prone.
88 if item._re.match(key):
88 if item._re.match(key):
89 return item
89 return item
90
90
91 return None
91 return None
92
92
93 coreitems = {}
93 coreitems = {}
94
94
95 def _register(configtable, *args, **kwargs):
95 def _register(configtable, *args, **kwargs):
96 item = configitem(*args, **kwargs)
96 item = configitem(*args, **kwargs)
97 section = configtable.setdefault(item.section, itemregister())
97 section = configtable.setdefault(item.section, itemregister())
98 if item.name in section:
98 if item.name in section:
99 msg = "duplicated config item registration for '%s.%s'"
99 msg = "duplicated config item registration for '%s.%s'"
100 raise error.ProgrammingError(msg % (item.section, item.name))
100 raise error.ProgrammingError(msg % (item.section, item.name))
101 section[item.name] = item
101 section[item.name] = item
102
102
103 # special value for case where the default is derived from other values
103 # special value for case where the default is derived from other values
104 dynamicdefault = object()
104 dynamicdefault = object()
105
105
106 # Registering actual config items
106 # Registering actual config items
107
107
108 def getitemregister(configtable):
108 def getitemregister(configtable):
109 f = functools.partial(_register, configtable)
109 f = functools.partial(_register, configtable)
110 # export pseudo enum as configitem.*
110 # export pseudo enum as configitem.*
111 f.dynamicdefault = dynamicdefault
111 f.dynamicdefault = dynamicdefault
112 return f
112 return f
113
113
114 coreconfigitem = getitemregister(coreitems)
114 coreconfigitem = getitemregister(coreitems)
115
115
116 coreconfigitem('alias', '.*',
116 coreconfigitem('alias', '.*',
117 default=dynamicdefault,
117 default=dynamicdefault,
118 generic=True,
118 generic=True,
119 )
119 )
120 coreconfigitem('annotate', 'nodates',
120 coreconfigitem('annotate', 'nodates',
121 default=False,
121 default=False,
122 )
122 )
123 coreconfigitem('annotate', 'showfunc',
123 coreconfigitem('annotate', 'showfunc',
124 default=False,
124 default=False,
125 )
125 )
126 coreconfigitem('annotate', 'unified',
126 coreconfigitem('annotate', 'unified',
127 default=None,
127 default=None,
128 )
128 )
129 coreconfigitem('annotate', 'git',
129 coreconfigitem('annotate', 'git',
130 default=False,
130 default=False,
131 )
131 )
132 coreconfigitem('annotate', 'ignorews',
132 coreconfigitem('annotate', 'ignorews',
133 default=False,
133 default=False,
134 )
134 )
135 coreconfigitem('annotate', 'ignorewsamount',
135 coreconfigitem('annotate', 'ignorewsamount',
136 default=False,
136 default=False,
137 )
137 )
138 coreconfigitem('annotate', 'ignoreblanklines',
138 coreconfigitem('annotate', 'ignoreblanklines',
139 default=False,
139 default=False,
140 )
140 )
141 coreconfigitem('annotate', 'ignorewseol',
141 coreconfigitem('annotate', 'ignorewseol',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem('annotate', 'nobinary',
144 coreconfigitem('annotate', 'nobinary',
145 default=False,
145 default=False,
146 )
146 )
147 coreconfigitem('annotate', 'noprefix',
147 coreconfigitem('annotate', 'noprefix',
148 default=False,
148 default=False,
149 )
149 )
150 coreconfigitem('annotate', 'word-diff',
150 coreconfigitem('annotate', 'word-diff',
151 default=False,
151 default=False,
152 )
152 )
153 coreconfigitem('auth', 'cookiefile',
153 coreconfigitem('auth', 'cookiefile',
154 default=None,
154 default=None,
155 )
155 )
156 # bookmarks.pushing: internal hack for discovery
156 # bookmarks.pushing: internal hack for discovery
157 coreconfigitem('bookmarks', 'pushing',
157 coreconfigitem('bookmarks', 'pushing',
158 default=list,
158 default=list,
159 )
159 )
160 # bundle.mainreporoot: internal hack for bundlerepo
160 # bundle.mainreporoot: internal hack for bundlerepo
161 coreconfigitem('bundle', 'mainreporoot',
161 coreconfigitem('bundle', 'mainreporoot',
162 default='',
162 default='',
163 )
163 )
164 coreconfigitem('censor', 'policy',
164 coreconfigitem('censor', 'policy',
165 default='abort',
165 default='abort',
166 )
166 )
167 coreconfigitem('chgserver', 'idletimeout',
167 coreconfigitem('chgserver', 'idletimeout',
168 default=3600,
168 default=3600,
169 )
169 )
170 coreconfigitem('chgserver', 'skiphash',
170 coreconfigitem('chgserver', 'skiphash',
171 default=False,
171 default=False,
172 )
172 )
173 coreconfigitem('cmdserver', 'log',
173 coreconfigitem('cmdserver', 'log',
174 default=None,
174 default=None,
175 )
175 )
176 coreconfigitem('color', '.*',
176 coreconfigitem('color', '.*',
177 default=None,
177 default=None,
178 generic=True,
178 generic=True,
179 )
179 )
180 coreconfigitem('color', 'mode',
180 coreconfigitem('color', 'mode',
181 default='auto',
181 default='auto',
182 )
182 )
183 coreconfigitem('color', 'pagermode',
183 coreconfigitem('color', 'pagermode',
184 default=dynamicdefault,
184 default=dynamicdefault,
185 )
185 )
186 coreconfigitem('commands', 'grep.all-files',
186 coreconfigitem('commands', 'grep.all-files',
187 default=False,
187 default=False,
188 )
188 )
189 coreconfigitem('commands', 'resolve.confirm',
189 coreconfigitem('commands', 'resolve.confirm',
190 default=False,
190 default=False,
191 )
191 )
192 coreconfigitem('commands', 'resolve.explicit-re-merge',
192 coreconfigitem('commands', 'resolve.explicit-re-merge',
193 default=False,
193 default=False,
194 )
194 )
195 coreconfigitem('commands', 'resolve.mark-check',
195 coreconfigitem('commands', 'resolve.mark-check',
196 default='none',
196 default='none',
197 )
197 )
198 coreconfigitem('commands', 'show.aliasprefix',
198 coreconfigitem('commands', 'show.aliasprefix',
199 default=list,
199 default=list,
200 )
200 )
201 coreconfigitem('commands', 'status.relative',
201 coreconfigitem('commands', 'status.relative',
202 default=False,
202 default=False,
203 )
203 )
204 coreconfigitem('commands', 'status.skipstates',
204 coreconfigitem('commands', 'status.skipstates',
205 default=[],
205 default=[],
206 )
206 )
207 coreconfigitem('commands', 'status.terse',
207 coreconfigitem('commands', 'status.terse',
208 default='',
208 default='',
209 )
209 )
210 coreconfigitem('commands', 'status.verbose',
210 coreconfigitem('commands', 'status.verbose',
211 default=False,
211 default=False,
212 )
212 )
213 coreconfigitem('commands', 'update.check',
213 coreconfigitem('commands', 'update.check',
214 default=None,
214 default=None,
215 )
215 )
216 coreconfigitem('commands', 'update.requiredest',
216 coreconfigitem('commands', 'update.requiredest',
217 default=False,
217 default=False,
218 )
218 )
219 coreconfigitem('committemplate', '.*',
219 coreconfigitem('committemplate', '.*',
220 default=None,
220 default=None,
221 generic=True,
221 generic=True,
222 )
222 )
223 coreconfigitem('convert', 'bzr.saverev',
223 coreconfigitem('convert', 'bzr.saverev',
224 default=True,
224 default=True,
225 )
225 )
226 coreconfigitem('convert', 'cvsps.cache',
226 coreconfigitem('convert', 'cvsps.cache',
227 default=True,
227 default=True,
228 )
228 )
229 coreconfigitem('convert', 'cvsps.fuzz',
229 coreconfigitem('convert', 'cvsps.fuzz',
230 default=60,
230 default=60,
231 )
231 )
232 coreconfigitem('convert', 'cvsps.logencoding',
232 coreconfigitem('convert', 'cvsps.logencoding',
233 default=None,
233 default=None,
234 )
234 )
235 coreconfigitem('convert', 'cvsps.mergefrom',
235 coreconfigitem('convert', 'cvsps.mergefrom',
236 default=None,
236 default=None,
237 )
237 )
238 coreconfigitem('convert', 'cvsps.mergeto',
238 coreconfigitem('convert', 'cvsps.mergeto',
239 default=None,
239 default=None,
240 )
240 )
241 coreconfigitem('convert', 'git.committeractions',
241 coreconfigitem('convert', 'git.committeractions',
242 default=lambda: ['messagedifferent'],
242 default=lambda: ['messagedifferent'],
243 )
243 )
244 coreconfigitem('convert', 'git.extrakeys',
244 coreconfigitem('convert', 'git.extrakeys',
245 default=list,
245 default=list,
246 )
246 )
247 coreconfigitem('convert', 'git.findcopiesharder',
247 coreconfigitem('convert', 'git.findcopiesharder',
248 default=False,
248 default=False,
249 )
249 )
250 coreconfigitem('convert', 'git.remoteprefix',
250 coreconfigitem('convert', 'git.remoteprefix',
251 default='remote',
251 default='remote',
252 )
252 )
253 coreconfigitem('convert', 'git.renamelimit',
253 coreconfigitem('convert', 'git.renamelimit',
254 default=400,
254 default=400,
255 )
255 )
256 coreconfigitem('convert', 'git.saverev',
256 coreconfigitem('convert', 'git.saverev',
257 default=True,
257 default=True,
258 )
258 )
259 coreconfigitem('convert', 'git.similarity',
259 coreconfigitem('convert', 'git.similarity',
260 default=50,
260 default=50,
261 )
261 )
262 coreconfigitem('convert', 'git.skipsubmodules',
262 coreconfigitem('convert', 'git.skipsubmodules',
263 default=False,
263 default=False,
264 )
264 )
265 coreconfigitem('convert', 'hg.clonebranches',
265 coreconfigitem('convert', 'hg.clonebranches',
266 default=False,
266 default=False,
267 )
267 )
268 coreconfigitem('convert', 'hg.ignoreerrors',
268 coreconfigitem('convert', 'hg.ignoreerrors',
269 default=False,
269 default=False,
270 )
270 )
271 coreconfigitem('convert', 'hg.revs',
271 coreconfigitem('convert', 'hg.revs',
272 default=None,
272 default=None,
273 )
273 )
274 coreconfigitem('convert', 'hg.saverev',
274 coreconfigitem('convert', 'hg.saverev',
275 default=False,
275 default=False,
276 )
276 )
277 coreconfigitem('convert', 'hg.sourcename',
277 coreconfigitem('convert', 'hg.sourcename',
278 default=None,
278 default=None,
279 )
279 )
280 coreconfigitem('convert', 'hg.startrev',
280 coreconfigitem('convert', 'hg.startrev',
281 default=None,
281 default=None,
282 )
282 )
283 coreconfigitem('convert', 'hg.tagsbranch',
283 coreconfigitem('convert', 'hg.tagsbranch',
284 default='default',
284 default='default',
285 )
285 )
286 coreconfigitem('convert', 'hg.usebranchnames',
286 coreconfigitem('convert', 'hg.usebranchnames',
287 default=True,
287 default=True,
288 )
288 )
289 coreconfigitem('convert', 'ignoreancestorcheck',
289 coreconfigitem('convert', 'ignoreancestorcheck',
290 default=False,
290 default=False,
291 )
291 )
292 coreconfigitem('convert', 'localtimezone',
292 coreconfigitem('convert', 'localtimezone',
293 default=False,
293 default=False,
294 )
294 )
295 coreconfigitem('convert', 'p4.encoding',
295 coreconfigitem('convert', 'p4.encoding',
296 default=dynamicdefault,
296 default=dynamicdefault,
297 )
297 )
298 coreconfigitem('convert', 'p4.startrev',
298 coreconfigitem('convert', 'p4.startrev',
299 default=0,
299 default=0,
300 )
300 )
301 coreconfigitem('convert', 'skiptags',
301 coreconfigitem('convert', 'skiptags',
302 default=False,
302 default=False,
303 )
303 )
304 coreconfigitem('convert', 'svn.debugsvnlog',
304 coreconfigitem('convert', 'svn.debugsvnlog',
305 default=True,
305 default=True,
306 )
306 )
307 coreconfigitem('convert', 'svn.trunk',
307 coreconfigitem('convert', 'svn.trunk',
308 default=None,
308 default=None,
309 )
309 )
310 coreconfigitem('convert', 'svn.tags',
310 coreconfigitem('convert', 'svn.tags',
311 default=None,
311 default=None,
312 )
312 )
313 coreconfigitem('convert', 'svn.branches',
313 coreconfigitem('convert', 'svn.branches',
314 default=None,
314 default=None,
315 )
315 )
316 coreconfigitem('convert', 'svn.startrev',
316 coreconfigitem('convert', 'svn.startrev',
317 default=0,
317 default=0,
318 )
318 )
319 coreconfigitem('debug', 'dirstate.delaywrite',
319 coreconfigitem('debug', 'dirstate.delaywrite',
320 default=0,
320 default=0,
321 )
321 )
322 coreconfigitem('defaults', '.*',
322 coreconfigitem('defaults', '.*',
323 default=None,
323 default=None,
324 generic=True,
324 generic=True,
325 )
325 )
326 coreconfigitem('devel', 'all-warnings',
326 coreconfigitem('devel', 'all-warnings',
327 default=False,
327 default=False,
328 )
328 )
329 coreconfigitem('devel', 'bundle2.debug',
329 coreconfigitem('devel', 'bundle2.debug',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem('devel', 'bundle.delta',
333 default='',
334 )
332 coreconfigitem('devel', 'cache-vfs',
335 coreconfigitem('devel', 'cache-vfs',
333 default=None,
336 default=None,
334 )
337 )
335 coreconfigitem('devel', 'check-locks',
338 coreconfigitem('devel', 'check-locks',
336 default=False,
339 default=False,
337 )
340 )
338 coreconfigitem('devel', 'check-relroot',
341 coreconfigitem('devel', 'check-relroot',
339 default=False,
342 default=False,
340 )
343 )
341 coreconfigitem('devel', 'default-date',
344 coreconfigitem('devel', 'default-date',
342 default=None,
345 default=None,
343 )
346 )
344 coreconfigitem('devel', 'deprec-warn',
347 coreconfigitem('devel', 'deprec-warn',
345 default=False,
348 default=False,
346 )
349 )
347 coreconfigitem('devel', 'disableloaddefaultcerts',
350 coreconfigitem('devel', 'disableloaddefaultcerts',
348 default=False,
351 default=False,
349 )
352 )
350 coreconfigitem('devel', 'warn-empty-changegroup',
353 coreconfigitem('devel', 'warn-empty-changegroup',
351 default=False,
354 default=False,
352 )
355 )
353 coreconfigitem('devel', 'legacy.exchange',
356 coreconfigitem('devel', 'legacy.exchange',
354 default=list,
357 default=list,
355 )
358 )
356 coreconfigitem('devel', 'servercafile',
359 coreconfigitem('devel', 'servercafile',
357 default='',
360 default='',
358 )
361 )
359 coreconfigitem('devel', 'serverexactprotocol',
362 coreconfigitem('devel', 'serverexactprotocol',
360 default='',
363 default='',
361 )
364 )
362 coreconfigitem('devel', 'serverrequirecert',
365 coreconfigitem('devel', 'serverrequirecert',
363 default=False,
366 default=False,
364 )
367 )
365 coreconfigitem('devel', 'strip-obsmarkers',
368 coreconfigitem('devel', 'strip-obsmarkers',
366 default=True,
369 default=True,
367 )
370 )
368 coreconfigitem('devel', 'warn-config',
371 coreconfigitem('devel', 'warn-config',
369 default=None,
372 default=None,
370 )
373 )
371 coreconfigitem('devel', 'warn-config-default',
374 coreconfigitem('devel', 'warn-config-default',
372 default=None,
375 default=None,
373 )
376 )
374 coreconfigitem('devel', 'user.obsmarker',
377 coreconfigitem('devel', 'user.obsmarker',
375 default=None,
378 default=None,
376 )
379 )
377 coreconfigitem('devel', 'warn-config-unknown',
380 coreconfigitem('devel', 'warn-config-unknown',
378 default=None,
381 default=None,
379 )
382 )
380 coreconfigitem('devel', 'debug.copies',
383 coreconfigitem('devel', 'debug.copies',
381 default=False,
384 default=False,
382 )
385 )
383 coreconfigitem('devel', 'debug.extensions',
386 coreconfigitem('devel', 'debug.extensions',
384 default=False,
387 default=False,
385 )
388 )
386 coreconfigitem('devel', 'debug.peer-request',
389 coreconfigitem('devel', 'debug.peer-request',
387 default=False,
390 default=False,
388 )
391 )
389 coreconfigitem('diff', 'nodates',
392 coreconfigitem('diff', 'nodates',
390 default=False,
393 default=False,
391 )
394 )
392 coreconfigitem('diff', 'showfunc',
395 coreconfigitem('diff', 'showfunc',
393 default=False,
396 default=False,
394 )
397 )
395 coreconfigitem('diff', 'unified',
398 coreconfigitem('diff', 'unified',
396 default=None,
399 default=None,
397 )
400 )
398 coreconfigitem('diff', 'git',
401 coreconfigitem('diff', 'git',
399 default=False,
402 default=False,
400 )
403 )
401 coreconfigitem('diff', 'ignorews',
404 coreconfigitem('diff', 'ignorews',
402 default=False,
405 default=False,
403 )
406 )
404 coreconfigitem('diff', 'ignorewsamount',
407 coreconfigitem('diff', 'ignorewsamount',
405 default=False,
408 default=False,
406 )
409 )
407 coreconfigitem('diff', 'ignoreblanklines',
410 coreconfigitem('diff', 'ignoreblanklines',
408 default=False,
411 default=False,
409 )
412 )
410 coreconfigitem('diff', 'ignorewseol',
413 coreconfigitem('diff', 'ignorewseol',
411 default=False,
414 default=False,
412 )
415 )
413 coreconfigitem('diff', 'nobinary',
416 coreconfigitem('diff', 'nobinary',
414 default=False,
417 default=False,
415 )
418 )
416 coreconfigitem('diff', 'noprefix',
419 coreconfigitem('diff', 'noprefix',
417 default=False,
420 default=False,
418 )
421 )
419 coreconfigitem('diff', 'word-diff',
422 coreconfigitem('diff', 'word-diff',
420 default=False,
423 default=False,
421 )
424 )
422 coreconfigitem('email', 'bcc',
425 coreconfigitem('email', 'bcc',
423 default=None,
426 default=None,
424 )
427 )
425 coreconfigitem('email', 'cc',
428 coreconfigitem('email', 'cc',
426 default=None,
429 default=None,
427 )
430 )
428 coreconfigitem('email', 'charsets',
431 coreconfigitem('email', 'charsets',
429 default=list,
432 default=list,
430 )
433 )
431 coreconfigitem('email', 'from',
434 coreconfigitem('email', 'from',
432 default=None,
435 default=None,
433 )
436 )
434 coreconfigitem('email', 'method',
437 coreconfigitem('email', 'method',
435 default='smtp',
438 default='smtp',
436 )
439 )
437 coreconfigitem('email', 'reply-to',
440 coreconfigitem('email', 'reply-to',
438 default=None,
441 default=None,
439 )
442 )
440 coreconfigitem('email', 'to',
443 coreconfigitem('email', 'to',
441 default=None,
444 default=None,
442 )
445 )
443 coreconfigitem('experimental', 'archivemetatemplate',
446 coreconfigitem('experimental', 'archivemetatemplate',
444 default=dynamicdefault,
447 default=dynamicdefault,
445 )
448 )
446 coreconfigitem('experimental', 'bundle-phases',
449 coreconfigitem('experimental', 'bundle-phases',
447 default=False,
450 default=False,
448 )
451 )
449 coreconfigitem('experimental', 'bundle2-advertise',
452 coreconfigitem('experimental', 'bundle2-advertise',
450 default=True,
453 default=True,
451 )
454 )
452 coreconfigitem('experimental', 'bundle2-output-capture',
455 coreconfigitem('experimental', 'bundle2-output-capture',
453 default=False,
456 default=False,
454 )
457 )
455 coreconfigitem('experimental', 'bundle2.pushback',
458 coreconfigitem('experimental', 'bundle2.pushback',
456 default=False,
459 default=False,
457 )
460 )
458 coreconfigitem('experimental', 'bundle2lazylocking',
461 coreconfigitem('experimental', 'bundle2lazylocking',
459 default=False,
462 default=False,
460 )
463 )
461 coreconfigitem('experimental', 'bundlecomplevel',
464 coreconfigitem('experimental', 'bundlecomplevel',
462 default=None,
465 default=None,
463 )
466 )
464 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
467 coreconfigitem('experimental', 'bundlecomplevel.bzip2',
465 default=None,
468 default=None,
466 )
469 )
467 coreconfigitem('experimental', 'bundlecomplevel.gzip',
470 coreconfigitem('experimental', 'bundlecomplevel.gzip',
468 default=None,
471 default=None,
469 )
472 )
470 coreconfigitem('experimental', 'bundlecomplevel.none',
473 coreconfigitem('experimental', 'bundlecomplevel.none',
471 default=None,
474 default=None,
472 )
475 )
473 coreconfigitem('experimental', 'bundlecomplevel.zstd',
476 coreconfigitem('experimental', 'bundlecomplevel.zstd',
474 default=None,
477 default=None,
475 )
478 )
476 coreconfigitem('experimental', 'changegroup3',
479 coreconfigitem('experimental', 'changegroup3',
477 default=False,
480 default=False,
478 )
481 )
479 coreconfigitem('experimental', 'clientcompressionengines',
482 coreconfigitem('experimental', 'clientcompressionengines',
480 default=list,
483 default=list,
481 )
484 )
482 coreconfigitem('experimental', 'copytrace',
485 coreconfigitem('experimental', 'copytrace',
483 default='on',
486 default='on',
484 )
487 )
485 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
488 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
486 default=100,
489 default=100,
487 )
490 )
488 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
491 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
489 default=100,
492 default=100,
490 )
493 )
491 coreconfigitem('experimental', 'crecordtest',
494 coreconfigitem('experimental', 'crecordtest',
492 default=None,
495 default=None,
493 )
496 )
494 coreconfigitem('experimental', 'directaccess',
497 coreconfigitem('experimental', 'directaccess',
495 default=False,
498 default=False,
496 )
499 )
497 coreconfigitem('experimental', 'directaccess.revnums',
500 coreconfigitem('experimental', 'directaccess.revnums',
498 default=False,
501 default=False,
499 )
502 )
500 coreconfigitem('experimental', 'editortmpinhg',
503 coreconfigitem('experimental', 'editortmpinhg',
501 default=False,
504 default=False,
502 )
505 )
503 coreconfigitem('experimental', 'evolution',
506 coreconfigitem('experimental', 'evolution',
504 default=list,
507 default=list,
505 )
508 )
506 coreconfigitem('experimental', 'evolution.allowdivergence',
509 coreconfigitem('experimental', 'evolution.allowdivergence',
507 default=False,
510 default=False,
508 alias=[('experimental', 'allowdivergence')]
511 alias=[('experimental', 'allowdivergence')]
509 )
512 )
510 coreconfigitem('experimental', 'evolution.allowunstable',
513 coreconfigitem('experimental', 'evolution.allowunstable',
511 default=None,
514 default=None,
512 )
515 )
513 coreconfigitem('experimental', 'evolution.createmarkers',
516 coreconfigitem('experimental', 'evolution.createmarkers',
514 default=None,
517 default=None,
515 )
518 )
516 coreconfigitem('experimental', 'evolution.effect-flags',
519 coreconfigitem('experimental', 'evolution.effect-flags',
517 default=True,
520 default=True,
518 alias=[('experimental', 'effect-flags')]
521 alias=[('experimental', 'effect-flags')]
519 )
522 )
520 coreconfigitem('experimental', 'evolution.exchange',
523 coreconfigitem('experimental', 'evolution.exchange',
521 default=None,
524 default=None,
522 )
525 )
523 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
526 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
524 default=False,
527 default=False,
525 )
528 )
526 coreconfigitem('experimental', 'evolution.report-instabilities',
529 coreconfigitem('experimental', 'evolution.report-instabilities',
527 default=True,
530 default=True,
528 )
531 )
529 coreconfigitem('experimental', 'evolution.track-operation',
532 coreconfigitem('experimental', 'evolution.track-operation',
530 default=True,
533 default=True,
531 )
534 )
532 coreconfigitem('experimental', 'maxdeltachainspan',
535 coreconfigitem('experimental', 'maxdeltachainspan',
533 default=-1,
536 default=-1,
534 )
537 )
535 coreconfigitem('experimental', 'mergetempdirprefix',
538 coreconfigitem('experimental', 'mergetempdirprefix',
536 default=None,
539 default=None,
537 )
540 )
538 coreconfigitem('experimental', 'mmapindexthreshold',
541 coreconfigitem('experimental', 'mmapindexthreshold',
539 default=None,
542 default=None,
540 )
543 )
541 coreconfigitem('experimental', 'narrow',
544 coreconfigitem('experimental', 'narrow',
542 default=False,
545 default=False,
543 )
546 )
544 coreconfigitem('experimental', 'nonnormalparanoidcheck',
547 coreconfigitem('experimental', 'nonnormalparanoidcheck',
545 default=False,
548 default=False,
546 )
549 )
547 coreconfigitem('experimental', 'exportableenviron',
550 coreconfigitem('experimental', 'exportableenviron',
548 default=list,
551 default=list,
549 )
552 )
550 coreconfigitem('experimental', 'extendedheader.index',
553 coreconfigitem('experimental', 'extendedheader.index',
551 default=None,
554 default=None,
552 )
555 )
553 coreconfigitem('experimental', 'extendedheader.similarity',
556 coreconfigitem('experimental', 'extendedheader.similarity',
554 default=False,
557 default=False,
555 )
558 )
556 coreconfigitem('experimental', 'format.compression',
559 coreconfigitem('experimental', 'format.compression',
557 default='zlib',
560 default='zlib',
558 )
561 )
559 coreconfigitem('experimental', 'graphshorten',
562 coreconfigitem('experimental', 'graphshorten',
560 default=False,
563 default=False,
561 )
564 )
562 coreconfigitem('experimental', 'graphstyle.parent',
565 coreconfigitem('experimental', 'graphstyle.parent',
563 default=dynamicdefault,
566 default=dynamicdefault,
564 )
567 )
565 coreconfigitem('experimental', 'graphstyle.missing',
568 coreconfigitem('experimental', 'graphstyle.missing',
566 default=dynamicdefault,
569 default=dynamicdefault,
567 )
570 )
568 coreconfigitem('experimental', 'graphstyle.grandparent',
571 coreconfigitem('experimental', 'graphstyle.grandparent',
569 default=dynamicdefault,
572 default=dynamicdefault,
570 )
573 )
571 coreconfigitem('experimental', 'hook-track-tags',
574 coreconfigitem('experimental', 'hook-track-tags',
572 default=False,
575 default=False,
573 )
576 )
574 coreconfigitem('experimental', 'httppeer.advertise-v2',
577 coreconfigitem('experimental', 'httppeer.advertise-v2',
575 default=False,
578 default=False,
576 )
579 )
577 coreconfigitem('experimental', 'httppeer.v2-encoder-order',
580 coreconfigitem('experimental', 'httppeer.v2-encoder-order',
578 default=None,
581 default=None,
579 )
582 )
580 coreconfigitem('experimental', 'httppostargs',
583 coreconfigitem('experimental', 'httppostargs',
581 default=False,
584 default=False,
582 )
585 )
583 coreconfigitem('experimental', 'mergedriver',
586 coreconfigitem('experimental', 'mergedriver',
584 default=None,
587 default=None,
585 )
588 )
586 coreconfigitem('experimental', 'nointerrupt', default=False)
589 coreconfigitem('experimental', 'nointerrupt', default=False)
587 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
590 coreconfigitem('experimental', 'nointerrupt-interactiveonly', default=True)
588
591
589 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
592 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
590 default=False,
593 default=False,
591 )
594 )
592 coreconfigitem('experimental', 'remotenames',
595 coreconfigitem('experimental', 'remotenames',
593 default=False,
596 default=False,
594 )
597 )
595 coreconfigitem('experimental', 'removeemptydirs',
598 coreconfigitem('experimental', 'removeemptydirs',
596 default=True,
599 default=True,
597 )
600 )
598 coreconfigitem('experimental', 'revisions.prefixhexnode',
601 coreconfigitem('experimental', 'revisions.prefixhexnode',
599 default=False,
602 default=False,
600 )
603 )
601 coreconfigitem('experimental', 'revlogv2',
604 coreconfigitem('experimental', 'revlogv2',
602 default=None,
605 default=None,
603 )
606 )
604 coreconfigitem('experimental', 'revisions.disambiguatewithin',
607 coreconfigitem('experimental', 'revisions.disambiguatewithin',
605 default=None,
608 default=None,
606 )
609 )
607 coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
610 coreconfigitem('experimental', 'server.filesdata.recommended-batch-size',
608 default=50000,
611 default=50000,
609 )
612 )
610 coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
613 coreconfigitem('experimental', 'server.manifestdata.recommended-batch-size',
611 default=100000,
614 default=100000,
612 )
615 )
613 coreconfigitem('experimental.server', 'stream-narrow-clones',
616 coreconfigitem('experimental.server', 'stream-narrow-clones',
614 default=False,
617 default=False,
615 )
618 )
616 coreconfigitem('experimental', 'single-head-per-branch',
619 coreconfigitem('experimental', 'single-head-per-branch',
617 default=False,
620 default=False,
618 )
621 )
619 coreconfigitem('experimental', 'sshserver.support-v2',
622 coreconfigitem('experimental', 'sshserver.support-v2',
620 default=False,
623 default=False,
621 )
624 )
622 coreconfigitem('experimental', 'sparse-read',
625 coreconfigitem('experimental', 'sparse-read',
623 default=False,
626 default=False,
624 )
627 )
625 coreconfigitem('experimental', 'sparse-read.density-threshold',
628 coreconfigitem('experimental', 'sparse-read.density-threshold',
626 default=0.50,
629 default=0.50,
627 )
630 )
628 coreconfigitem('experimental', 'sparse-read.min-gap-size',
631 coreconfigitem('experimental', 'sparse-read.min-gap-size',
629 default='65K',
632 default='65K',
630 )
633 )
631 coreconfigitem('experimental', 'treemanifest',
634 coreconfigitem('experimental', 'treemanifest',
632 default=False,
635 default=False,
633 )
636 )
634 coreconfigitem('experimental', 'update.atomic-file',
637 coreconfigitem('experimental', 'update.atomic-file',
635 default=False,
638 default=False,
636 )
639 )
637 coreconfigitem('experimental', 'sshpeer.advertise-v2',
640 coreconfigitem('experimental', 'sshpeer.advertise-v2',
638 default=False,
641 default=False,
639 )
642 )
640 coreconfigitem('experimental', 'web.apiserver',
643 coreconfigitem('experimental', 'web.apiserver',
641 default=False,
644 default=False,
642 )
645 )
643 coreconfigitem('experimental', 'web.api.http-v2',
646 coreconfigitem('experimental', 'web.api.http-v2',
644 default=False,
647 default=False,
645 )
648 )
646 coreconfigitem('experimental', 'web.api.debugreflect',
649 coreconfigitem('experimental', 'web.api.debugreflect',
647 default=False,
650 default=False,
648 )
651 )
649 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
652 coreconfigitem('experimental', 'worker.wdir-get-thread-safe',
650 default=False,
653 default=False,
651 )
654 )
652 coreconfigitem('experimental', 'xdiff',
655 coreconfigitem('experimental', 'xdiff',
653 default=False,
656 default=False,
654 )
657 )
655 coreconfigitem('extensions', '.*',
658 coreconfigitem('extensions', '.*',
656 default=None,
659 default=None,
657 generic=True,
660 generic=True,
658 )
661 )
659 coreconfigitem('extdata', '.*',
662 coreconfigitem('extdata', '.*',
660 default=None,
663 default=None,
661 generic=True,
664 generic=True,
662 )
665 )
663 coreconfigitem('format', 'chunkcachesize',
666 coreconfigitem('format', 'chunkcachesize',
664 default=None,
667 default=None,
665 )
668 )
666 coreconfigitem('format', 'dotencode',
669 coreconfigitem('format', 'dotencode',
667 default=True,
670 default=True,
668 )
671 )
669 coreconfigitem('format', 'generaldelta',
672 coreconfigitem('format', 'generaldelta',
670 default=False,
673 default=False,
671 )
674 )
672 coreconfigitem('format', 'manifestcachesize',
675 coreconfigitem('format', 'manifestcachesize',
673 default=None,
676 default=None,
674 )
677 )
675 coreconfigitem('format', 'maxchainlen',
678 coreconfigitem('format', 'maxchainlen',
676 default=dynamicdefault,
679 default=dynamicdefault,
677 )
680 )
678 coreconfigitem('format', 'obsstore-version',
681 coreconfigitem('format', 'obsstore-version',
679 default=None,
682 default=None,
680 )
683 )
681 coreconfigitem('format', 'sparse-revlog',
684 coreconfigitem('format', 'sparse-revlog',
682 default=False,
685 default=False,
683 )
686 )
684 coreconfigitem('format', 'usefncache',
687 coreconfigitem('format', 'usefncache',
685 default=True,
688 default=True,
686 )
689 )
687 coreconfigitem('format', 'usegeneraldelta',
690 coreconfigitem('format', 'usegeneraldelta',
688 default=True,
691 default=True,
689 )
692 )
690 coreconfigitem('format', 'usestore',
693 coreconfigitem('format', 'usestore',
691 default=True,
694 default=True,
692 )
695 )
693 coreconfigitem('format', 'internal-phase',
696 coreconfigitem('format', 'internal-phase',
694 default=False,
697 default=False,
695 )
698 )
696 coreconfigitem('fsmonitor', 'warn_when_unused',
699 coreconfigitem('fsmonitor', 'warn_when_unused',
697 default=True,
700 default=True,
698 )
701 )
699 coreconfigitem('fsmonitor', 'warn_update_file_count',
702 coreconfigitem('fsmonitor', 'warn_update_file_count',
700 default=50000,
703 default=50000,
701 )
704 )
702 coreconfigitem('help', 'hidden-command\..*',
705 coreconfigitem('help', 'hidden-command\..*',
703 default=False,
706 default=False,
704 generic=True,
707 generic=True,
705 )
708 )
706 coreconfigitem('help', 'hidden-topic\..*',
709 coreconfigitem('help', 'hidden-topic\..*',
707 default=False,
710 default=False,
708 generic=True,
711 generic=True,
709 )
712 )
710 coreconfigitem('hooks', '.*',
713 coreconfigitem('hooks', '.*',
711 default=dynamicdefault,
714 default=dynamicdefault,
712 generic=True,
715 generic=True,
713 )
716 )
714 coreconfigitem('hgweb-paths', '.*',
717 coreconfigitem('hgweb-paths', '.*',
715 default=list,
718 default=list,
716 generic=True,
719 generic=True,
717 )
720 )
718 coreconfigitem('hostfingerprints', '.*',
721 coreconfigitem('hostfingerprints', '.*',
719 default=list,
722 default=list,
720 generic=True,
723 generic=True,
721 )
724 )
722 coreconfigitem('hostsecurity', 'ciphers',
725 coreconfigitem('hostsecurity', 'ciphers',
723 default=None,
726 default=None,
724 )
727 )
725 coreconfigitem('hostsecurity', 'disabletls10warning',
728 coreconfigitem('hostsecurity', 'disabletls10warning',
726 default=False,
729 default=False,
727 )
730 )
728 coreconfigitem('hostsecurity', 'minimumprotocol',
731 coreconfigitem('hostsecurity', 'minimumprotocol',
729 default=dynamicdefault,
732 default=dynamicdefault,
730 )
733 )
731 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
734 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
732 default=dynamicdefault,
735 default=dynamicdefault,
733 generic=True,
736 generic=True,
734 )
737 )
735 coreconfigitem('hostsecurity', '.*:ciphers$',
738 coreconfigitem('hostsecurity', '.*:ciphers$',
736 default=dynamicdefault,
739 default=dynamicdefault,
737 generic=True,
740 generic=True,
738 )
741 )
739 coreconfigitem('hostsecurity', '.*:fingerprints$',
742 coreconfigitem('hostsecurity', '.*:fingerprints$',
740 default=list,
743 default=list,
741 generic=True,
744 generic=True,
742 )
745 )
743 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
746 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
744 default=None,
747 default=None,
745 generic=True,
748 generic=True,
746 )
749 )
747
750
748 coreconfigitem('http_proxy', 'always',
751 coreconfigitem('http_proxy', 'always',
749 default=False,
752 default=False,
750 )
753 )
751 coreconfigitem('http_proxy', 'host',
754 coreconfigitem('http_proxy', 'host',
752 default=None,
755 default=None,
753 )
756 )
754 coreconfigitem('http_proxy', 'no',
757 coreconfigitem('http_proxy', 'no',
755 default=list,
758 default=list,
756 )
759 )
757 coreconfigitem('http_proxy', 'passwd',
760 coreconfigitem('http_proxy', 'passwd',
758 default=None,
761 default=None,
759 )
762 )
760 coreconfigitem('http_proxy', 'user',
763 coreconfigitem('http_proxy', 'user',
761 default=None,
764 default=None,
762 )
765 )
763
766
764 coreconfigitem('http', 'timeout',
767 coreconfigitem('http', 'timeout',
765 default=None,
768 default=None,
766 )
769 )
767
770
768 coreconfigitem('logtoprocess', 'commandexception',
771 coreconfigitem('logtoprocess', 'commandexception',
769 default=None,
772 default=None,
770 )
773 )
771 coreconfigitem('logtoprocess', 'commandfinish',
774 coreconfigitem('logtoprocess', 'commandfinish',
772 default=None,
775 default=None,
773 )
776 )
774 coreconfigitem('logtoprocess', 'command',
777 coreconfigitem('logtoprocess', 'command',
775 default=None,
778 default=None,
776 )
779 )
777 coreconfigitem('logtoprocess', 'develwarn',
780 coreconfigitem('logtoprocess', 'develwarn',
778 default=None,
781 default=None,
779 )
782 )
780 coreconfigitem('logtoprocess', 'uiblocked',
783 coreconfigitem('logtoprocess', 'uiblocked',
781 default=None,
784 default=None,
782 )
785 )
783 coreconfigitem('merge', 'checkunknown',
786 coreconfigitem('merge', 'checkunknown',
784 default='abort',
787 default='abort',
785 )
788 )
786 coreconfigitem('merge', 'checkignored',
789 coreconfigitem('merge', 'checkignored',
787 default='abort',
790 default='abort',
788 )
791 )
789 coreconfigitem('experimental', 'merge.checkpathconflicts',
792 coreconfigitem('experimental', 'merge.checkpathconflicts',
790 default=False,
793 default=False,
791 )
794 )
792 coreconfigitem('merge', 'followcopies',
795 coreconfigitem('merge', 'followcopies',
793 default=True,
796 default=True,
794 )
797 )
795 coreconfigitem('merge', 'on-failure',
798 coreconfigitem('merge', 'on-failure',
796 default='continue',
799 default='continue',
797 )
800 )
798 coreconfigitem('merge', 'preferancestor',
801 coreconfigitem('merge', 'preferancestor',
799 default=lambda: ['*'],
802 default=lambda: ['*'],
800 )
803 )
801 coreconfigitem('merge', 'strict-capability-check',
804 coreconfigitem('merge', 'strict-capability-check',
802 default=False,
805 default=False,
803 )
806 )
804 coreconfigitem('merge-tools', '.*',
807 coreconfigitem('merge-tools', '.*',
805 default=None,
808 default=None,
806 generic=True,
809 generic=True,
807 )
810 )
808 coreconfigitem('merge-tools', br'.*\.args$',
811 coreconfigitem('merge-tools', br'.*\.args$',
809 default="$local $base $other",
812 default="$local $base $other",
810 generic=True,
813 generic=True,
811 priority=-1,
814 priority=-1,
812 )
815 )
813 coreconfigitem('merge-tools', br'.*\.binary$',
816 coreconfigitem('merge-tools', br'.*\.binary$',
814 default=False,
817 default=False,
815 generic=True,
818 generic=True,
816 priority=-1,
819 priority=-1,
817 )
820 )
818 coreconfigitem('merge-tools', br'.*\.check$',
821 coreconfigitem('merge-tools', br'.*\.check$',
819 default=list,
822 default=list,
820 generic=True,
823 generic=True,
821 priority=-1,
824 priority=-1,
822 )
825 )
823 coreconfigitem('merge-tools', br'.*\.checkchanged$',
826 coreconfigitem('merge-tools', br'.*\.checkchanged$',
824 default=False,
827 default=False,
825 generic=True,
828 generic=True,
826 priority=-1,
829 priority=-1,
827 )
830 )
828 coreconfigitem('merge-tools', br'.*\.executable$',
831 coreconfigitem('merge-tools', br'.*\.executable$',
829 default=dynamicdefault,
832 default=dynamicdefault,
830 generic=True,
833 generic=True,
831 priority=-1,
834 priority=-1,
832 )
835 )
833 coreconfigitem('merge-tools', br'.*\.fixeol$',
836 coreconfigitem('merge-tools', br'.*\.fixeol$',
834 default=False,
837 default=False,
835 generic=True,
838 generic=True,
836 priority=-1,
839 priority=-1,
837 )
840 )
838 coreconfigitem('merge-tools', br'.*\.gui$',
841 coreconfigitem('merge-tools', br'.*\.gui$',
839 default=False,
842 default=False,
840 generic=True,
843 generic=True,
841 priority=-1,
844 priority=-1,
842 )
845 )
843 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
846 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
844 default='basic',
847 default='basic',
845 generic=True,
848 generic=True,
846 priority=-1,
849 priority=-1,
847 )
850 )
848 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
851 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
849 default=dynamicdefault, # take from ui.mergemarkertemplate
852 default=dynamicdefault, # take from ui.mergemarkertemplate
850 generic=True,
853 generic=True,
851 priority=-1,
854 priority=-1,
852 )
855 )
853 coreconfigitem('merge-tools', br'.*\.priority$',
856 coreconfigitem('merge-tools', br'.*\.priority$',
854 default=0,
857 default=0,
855 generic=True,
858 generic=True,
856 priority=-1,
859 priority=-1,
857 )
860 )
858 coreconfigitem('merge-tools', br'.*\.premerge$',
861 coreconfigitem('merge-tools', br'.*\.premerge$',
859 default=dynamicdefault,
862 default=dynamicdefault,
860 generic=True,
863 generic=True,
861 priority=-1,
864 priority=-1,
862 )
865 )
863 coreconfigitem('merge-tools', br'.*\.symlink$',
866 coreconfigitem('merge-tools', br'.*\.symlink$',
864 default=False,
867 default=False,
865 generic=True,
868 generic=True,
866 priority=-1,
869 priority=-1,
867 )
870 )
868 coreconfigitem('pager', 'attend-.*',
871 coreconfigitem('pager', 'attend-.*',
869 default=dynamicdefault,
872 default=dynamicdefault,
870 generic=True,
873 generic=True,
871 )
874 )
872 coreconfigitem('pager', 'ignore',
875 coreconfigitem('pager', 'ignore',
873 default=list,
876 default=list,
874 )
877 )
875 coreconfigitem('pager', 'pager',
878 coreconfigitem('pager', 'pager',
876 default=dynamicdefault,
879 default=dynamicdefault,
877 )
880 )
878 coreconfigitem('patch', 'eol',
881 coreconfigitem('patch', 'eol',
879 default='strict',
882 default='strict',
880 )
883 )
881 coreconfigitem('patch', 'fuzz',
884 coreconfigitem('patch', 'fuzz',
882 default=2,
885 default=2,
883 )
886 )
884 coreconfigitem('paths', 'default',
887 coreconfigitem('paths', 'default',
885 default=None,
888 default=None,
886 )
889 )
887 coreconfigitem('paths', 'default-push',
890 coreconfigitem('paths', 'default-push',
888 default=None,
891 default=None,
889 )
892 )
890 coreconfigitem('paths', '.*',
893 coreconfigitem('paths', '.*',
891 default=None,
894 default=None,
892 generic=True,
895 generic=True,
893 )
896 )
894 coreconfigitem('phases', 'checksubrepos',
897 coreconfigitem('phases', 'checksubrepos',
895 default='follow',
898 default='follow',
896 )
899 )
897 coreconfigitem('phases', 'new-commit',
900 coreconfigitem('phases', 'new-commit',
898 default='draft',
901 default='draft',
899 )
902 )
900 coreconfigitem('phases', 'publish',
903 coreconfigitem('phases', 'publish',
901 default=True,
904 default=True,
902 )
905 )
903 coreconfigitem('profiling', 'enabled',
906 coreconfigitem('profiling', 'enabled',
904 default=False,
907 default=False,
905 )
908 )
906 coreconfigitem('profiling', 'format',
909 coreconfigitem('profiling', 'format',
907 default='text',
910 default='text',
908 )
911 )
909 coreconfigitem('profiling', 'freq',
912 coreconfigitem('profiling', 'freq',
910 default=1000,
913 default=1000,
911 )
914 )
912 coreconfigitem('profiling', 'limit',
915 coreconfigitem('profiling', 'limit',
913 default=30,
916 default=30,
914 )
917 )
915 coreconfigitem('profiling', 'nested',
918 coreconfigitem('profiling', 'nested',
916 default=0,
919 default=0,
917 )
920 )
918 coreconfigitem('profiling', 'output',
921 coreconfigitem('profiling', 'output',
919 default=None,
922 default=None,
920 )
923 )
921 coreconfigitem('profiling', 'showmax',
924 coreconfigitem('profiling', 'showmax',
922 default=0.999,
925 default=0.999,
923 )
926 )
924 coreconfigitem('profiling', 'showmin',
927 coreconfigitem('profiling', 'showmin',
925 default=dynamicdefault,
928 default=dynamicdefault,
926 )
929 )
927 coreconfigitem('profiling', 'sort',
930 coreconfigitem('profiling', 'sort',
928 default='inlinetime',
931 default='inlinetime',
929 )
932 )
930 coreconfigitem('profiling', 'statformat',
933 coreconfigitem('profiling', 'statformat',
931 default='hotpath',
934 default='hotpath',
932 )
935 )
933 coreconfigitem('profiling', 'time-track',
936 coreconfigitem('profiling', 'time-track',
934 default='real',
937 default='real',
935 )
938 )
936 coreconfigitem('profiling', 'type',
939 coreconfigitem('profiling', 'type',
937 default='stat',
940 default='stat',
938 )
941 )
939 coreconfigitem('progress', 'assume-tty',
942 coreconfigitem('progress', 'assume-tty',
940 default=False,
943 default=False,
941 )
944 )
942 coreconfigitem('progress', 'changedelay',
945 coreconfigitem('progress', 'changedelay',
943 default=1,
946 default=1,
944 )
947 )
945 coreconfigitem('progress', 'clear-complete',
948 coreconfigitem('progress', 'clear-complete',
946 default=True,
949 default=True,
947 )
950 )
948 coreconfigitem('progress', 'debug',
951 coreconfigitem('progress', 'debug',
949 default=False,
952 default=False,
950 )
953 )
951 coreconfigitem('progress', 'delay',
954 coreconfigitem('progress', 'delay',
952 default=3,
955 default=3,
953 )
956 )
954 coreconfigitem('progress', 'disable',
957 coreconfigitem('progress', 'disable',
955 default=False,
958 default=False,
956 )
959 )
957 coreconfigitem('progress', 'estimateinterval',
960 coreconfigitem('progress', 'estimateinterval',
958 default=60.0,
961 default=60.0,
959 )
962 )
960 coreconfigitem('progress', 'format',
963 coreconfigitem('progress', 'format',
961 default=lambda: ['topic', 'bar', 'number', 'estimate'],
964 default=lambda: ['topic', 'bar', 'number', 'estimate'],
962 )
965 )
963 coreconfigitem('progress', 'refresh',
966 coreconfigitem('progress', 'refresh',
964 default=0.1,
967 default=0.1,
965 )
968 )
966 coreconfigitem('progress', 'width',
969 coreconfigitem('progress', 'width',
967 default=dynamicdefault,
970 default=dynamicdefault,
968 )
971 )
969 coreconfigitem('push', 'pushvars.server',
972 coreconfigitem('push', 'pushvars.server',
970 default=False,
973 default=False,
971 )
974 )
972 coreconfigitem('storage', 'new-repo-backend',
975 coreconfigitem('storage', 'new-repo-backend',
973 default='revlogv1',
976 default='revlogv1',
974 )
977 )
975 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
978 coreconfigitem('storage', 'revlog.optimize-delta-parent-choice',
976 default=True,
979 default=True,
977 alias=[('format', 'aggressivemergedeltas')],
980 alias=[('format', 'aggressivemergedeltas')],
978 )
981 )
979 coreconfigitem('server', 'bookmarks-pushkey-compat',
982 coreconfigitem('server', 'bookmarks-pushkey-compat',
980 default=True,
983 default=True,
981 )
984 )
982 coreconfigitem('server', 'bundle1',
985 coreconfigitem('server', 'bundle1',
983 default=True,
986 default=True,
984 )
987 )
985 coreconfigitem('server', 'bundle1gd',
988 coreconfigitem('server', 'bundle1gd',
986 default=None,
989 default=None,
987 )
990 )
988 coreconfigitem('server', 'bundle1.pull',
991 coreconfigitem('server', 'bundle1.pull',
989 default=None,
992 default=None,
990 )
993 )
991 coreconfigitem('server', 'bundle1gd.pull',
994 coreconfigitem('server', 'bundle1gd.pull',
992 default=None,
995 default=None,
993 )
996 )
994 coreconfigitem('server', 'bundle1.push',
997 coreconfigitem('server', 'bundle1.push',
995 default=None,
998 default=None,
996 )
999 )
997 coreconfigitem('server', 'bundle1gd.push',
1000 coreconfigitem('server', 'bundle1gd.push',
998 default=None,
1001 default=None,
999 )
1002 )
1000 coreconfigitem('server', 'bundle2.stream',
1003 coreconfigitem('server', 'bundle2.stream',
1001 default=True,
1004 default=True,
1002 alias=[('experimental', 'bundle2.stream')]
1005 alias=[('experimental', 'bundle2.stream')]
1003 )
1006 )
1004 coreconfigitem('server', 'compressionengines',
1007 coreconfigitem('server', 'compressionengines',
1005 default=list,
1008 default=list,
1006 )
1009 )
1007 coreconfigitem('server', 'concurrent-push-mode',
1010 coreconfigitem('server', 'concurrent-push-mode',
1008 default='strict',
1011 default='strict',
1009 )
1012 )
1010 coreconfigitem('server', 'disablefullbundle',
1013 coreconfigitem('server', 'disablefullbundle',
1011 default=False,
1014 default=False,
1012 )
1015 )
1013 coreconfigitem('server', 'maxhttpheaderlen',
1016 coreconfigitem('server', 'maxhttpheaderlen',
1014 default=1024,
1017 default=1024,
1015 )
1018 )
1016 coreconfigitem('server', 'pullbundle',
1019 coreconfigitem('server', 'pullbundle',
1017 default=False,
1020 default=False,
1018 )
1021 )
1019 coreconfigitem('server', 'preferuncompressed',
1022 coreconfigitem('server', 'preferuncompressed',
1020 default=False,
1023 default=False,
1021 )
1024 )
1022 coreconfigitem('server', 'streamunbundle',
1025 coreconfigitem('server', 'streamunbundle',
1023 default=False,
1026 default=False,
1024 )
1027 )
1025 coreconfigitem('server', 'uncompressed',
1028 coreconfigitem('server', 'uncompressed',
1026 default=True,
1029 default=True,
1027 )
1030 )
1028 coreconfigitem('server', 'uncompressedallowsecret',
1031 coreconfigitem('server', 'uncompressedallowsecret',
1029 default=False,
1032 default=False,
1030 )
1033 )
1031 coreconfigitem('server', 'validate',
1034 coreconfigitem('server', 'validate',
1032 default=False,
1035 default=False,
1033 )
1036 )
1034 coreconfigitem('server', 'zliblevel',
1037 coreconfigitem('server', 'zliblevel',
1035 default=-1,
1038 default=-1,
1036 )
1039 )
1037 coreconfigitem('server', 'zstdlevel',
1040 coreconfigitem('server', 'zstdlevel',
1038 default=3,
1041 default=3,
1039 )
1042 )
1040 coreconfigitem('share', 'pool',
1043 coreconfigitem('share', 'pool',
1041 default=None,
1044 default=None,
1042 )
1045 )
1043 coreconfigitem('share', 'poolnaming',
1046 coreconfigitem('share', 'poolnaming',
1044 default='identity',
1047 default='identity',
1045 )
1048 )
1046 coreconfigitem('smtp', 'host',
1049 coreconfigitem('smtp', 'host',
1047 default=None,
1050 default=None,
1048 )
1051 )
1049 coreconfigitem('smtp', 'local_hostname',
1052 coreconfigitem('smtp', 'local_hostname',
1050 default=None,
1053 default=None,
1051 )
1054 )
1052 coreconfigitem('smtp', 'password',
1055 coreconfigitem('smtp', 'password',
1053 default=None,
1056 default=None,
1054 )
1057 )
1055 coreconfigitem('smtp', 'port',
1058 coreconfigitem('smtp', 'port',
1056 default=dynamicdefault,
1059 default=dynamicdefault,
1057 )
1060 )
1058 coreconfigitem('smtp', 'tls',
1061 coreconfigitem('smtp', 'tls',
1059 default='none',
1062 default='none',
1060 )
1063 )
1061 coreconfigitem('smtp', 'username',
1064 coreconfigitem('smtp', 'username',
1062 default=None,
1065 default=None,
1063 )
1066 )
1064 coreconfigitem('sparse', 'missingwarning',
1067 coreconfigitem('sparse', 'missingwarning',
1065 default=True,
1068 default=True,
1066 )
1069 )
1067 coreconfigitem('subrepos', 'allowed',
1070 coreconfigitem('subrepos', 'allowed',
1068 default=dynamicdefault, # to make backporting simpler
1071 default=dynamicdefault, # to make backporting simpler
1069 )
1072 )
1070 coreconfigitem('subrepos', 'hg:allowed',
1073 coreconfigitem('subrepos', 'hg:allowed',
1071 default=dynamicdefault,
1074 default=dynamicdefault,
1072 )
1075 )
1073 coreconfigitem('subrepos', 'git:allowed',
1076 coreconfigitem('subrepos', 'git:allowed',
1074 default=dynamicdefault,
1077 default=dynamicdefault,
1075 )
1078 )
1076 coreconfigitem('subrepos', 'svn:allowed',
1079 coreconfigitem('subrepos', 'svn:allowed',
1077 default=dynamicdefault,
1080 default=dynamicdefault,
1078 )
1081 )
1079 coreconfigitem('templates', '.*',
1082 coreconfigitem('templates', '.*',
1080 default=None,
1083 default=None,
1081 generic=True,
1084 generic=True,
1082 )
1085 )
1083 coreconfigitem('trusted', 'groups',
1086 coreconfigitem('trusted', 'groups',
1084 default=list,
1087 default=list,
1085 )
1088 )
1086 coreconfigitem('trusted', 'users',
1089 coreconfigitem('trusted', 'users',
1087 default=list,
1090 default=list,
1088 )
1091 )
1089 coreconfigitem('ui', '_usedassubrepo',
1092 coreconfigitem('ui', '_usedassubrepo',
1090 default=False,
1093 default=False,
1091 )
1094 )
1092 coreconfigitem('ui', 'allowemptycommit',
1095 coreconfigitem('ui', 'allowemptycommit',
1093 default=False,
1096 default=False,
1094 )
1097 )
1095 coreconfigitem('ui', 'archivemeta',
1098 coreconfigitem('ui', 'archivemeta',
1096 default=True,
1099 default=True,
1097 )
1100 )
1098 coreconfigitem('ui', 'askusername',
1101 coreconfigitem('ui', 'askusername',
1099 default=False,
1102 default=False,
1100 )
1103 )
1101 coreconfigitem('ui', 'clonebundlefallback',
1104 coreconfigitem('ui', 'clonebundlefallback',
1102 default=False,
1105 default=False,
1103 )
1106 )
1104 coreconfigitem('ui', 'clonebundleprefers',
1107 coreconfigitem('ui', 'clonebundleprefers',
1105 default=list,
1108 default=list,
1106 )
1109 )
1107 coreconfigitem('ui', 'clonebundles',
1110 coreconfigitem('ui', 'clonebundles',
1108 default=True,
1111 default=True,
1109 )
1112 )
1110 coreconfigitem('ui', 'color',
1113 coreconfigitem('ui', 'color',
1111 default='auto',
1114 default='auto',
1112 )
1115 )
1113 coreconfigitem('ui', 'commitsubrepos',
1116 coreconfigitem('ui', 'commitsubrepos',
1114 default=False,
1117 default=False,
1115 )
1118 )
1116 coreconfigitem('ui', 'debug',
1119 coreconfigitem('ui', 'debug',
1117 default=False,
1120 default=False,
1118 )
1121 )
1119 coreconfigitem('ui', 'debugger',
1122 coreconfigitem('ui', 'debugger',
1120 default=None,
1123 default=None,
1121 )
1124 )
1122 coreconfigitem('ui', 'editor',
1125 coreconfigitem('ui', 'editor',
1123 default=dynamicdefault,
1126 default=dynamicdefault,
1124 )
1127 )
1125 coreconfigitem('ui', 'fallbackencoding',
1128 coreconfigitem('ui', 'fallbackencoding',
1126 default=None,
1129 default=None,
1127 )
1130 )
1128 coreconfigitem('ui', 'forcecwd',
1131 coreconfigitem('ui', 'forcecwd',
1129 default=None,
1132 default=None,
1130 )
1133 )
1131 coreconfigitem('ui', 'forcemerge',
1134 coreconfigitem('ui', 'forcemerge',
1132 default=None,
1135 default=None,
1133 )
1136 )
1134 coreconfigitem('ui', 'formatdebug',
1137 coreconfigitem('ui', 'formatdebug',
1135 default=False,
1138 default=False,
1136 )
1139 )
1137 coreconfigitem('ui', 'formatjson',
1140 coreconfigitem('ui', 'formatjson',
1138 default=False,
1141 default=False,
1139 )
1142 )
1140 coreconfigitem('ui', 'formatted',
1143 coreconfigitem('ui', 'formatted',
1141 default=None,
1144 default=None,
1142 )
1145 )
1143 coreconfigitem('ui', 'graphnodetemplate',
1146 coreconfigitem('ui', 'graphnodetemplate',
1144 default=None,
1147 default=None,
1145 )
1148 )
1146 coreconfigitem('ui', 'history-editing-backup',
1149 coreconfigitem('ui', 'history-editing-backup',
1147 default=True,
1150 default=True,
1148 )
1151 )
1149 coreconfigitem('ui', 'interactive',
1152 coreconfigitem('ui', 'interactive',
1150 default=None,
1153 default=None,
1151 )
1154 )
1152 coreconfigitem('ui', 'interface',
1155 coreconfigitem('ui', 'interface',
1153 default=None,
1156 default=None,
1154 )
1157 )
1155 coreconfigitem('ui', 'interface.chunkselector',
1158 coreconfigitem('ui', 'interface.chunkselector',
1156 default=None,
1159 default=None,
1157 )
1160 )
1158 coreconfigitem('ui', 'large-file-limit',
1161 coreconfigitem('ui', 'large-file-limit',
1159 default=10000000,
1162 default=10000000,
1160 )
1163 )
1161 coreconfigitem('ui', 'logblockedtimes',
1164 coreconfigitem('ui', 'logblockedtimes',
1162 default=False,
1165 default=False,
1163 )
1166 )
1164 coreconfigitem('ui', 'logtemplate',
1167 coreconfigitem('ui', 'logtemplate',
1165 default=None,
1168 default=None,
1166 )
1169 )
1167 coreconfigitem('ui', 'merge',
1170 coreconfigitem('ui', 'merge',
1168 default=None,
1171 default=None,
1169 )
1172 )
1170 coreconfigitem('ui', 'mergemarkers',
1173 coreconfigitem('ui', 'mergemarkers',
1171 default='basic',
1174 default='basic',
1172 )
1175 )
1173 coreconfigitem('ui', 'mergemarkertemplate',
1176 coreconfigitem('ui', 'mergemarkertemplate',
1174 default=('{node|short} '
1177 default=('{node|short} '
1175 '{ifeq(tags, "tip", "", '
1178 '{ifeq(tags, "tip", "", '
1176 'ifeq(tags, "", "", "{tags} "))}'
1179 'ifeq(tags, "", "", "{tags} "))}'
1177 '{if(bookmarks, "{bookmarks} ")}'
1180 '{if(bookmarks, "{bookmarks} ")}'
1178 '{ifeq(branch, "default", "", "{branch} ")}'
1181 '{ifeq(branch, "default", "", "{branch} ")}'
1179 '- {author|user}: {desc|firstline}')
1182 '- {author|user}: {desc|firstline}')
1180 )
1183 )
1181 coreconfigitem('ui', 'nontty',
1184 coreconfigitem('ui', 'nontty',
1182 default=False,
1185 default=False,
1183 )
1186 )
1184 coreconfigitem('ui', 'origbackuppath',
1187 coreconfigitem('ui', 'origbackuppath',
1185 default=None,
1188 default=None,
1186 )
1189 )
1187 coreconfigitem('ui', 'paginate',
1190 coreconfigitem('ui', 'paginate',
1188 default=True,
1191 default=True,
1189 )
1192 )
1190 coreconfigitem('ui', 'patch',
1193 coreconfigitem('ui', 'patch',
1191 default=None,
1194 default=None,
1192 )
1195 )
1193 coreconfigitem('ui', 'portablefilenames',
1196 coreconfigitem('ui', 'portablefilenames',
1194 default='warn',
1197 default='warn',
1195 )
1198 )
1196 coreconfigitem('ui', 'promptecho',
1199 coreconfigitem('ui', 'promptecho',
1197 default=False,
1200 default=False,
1198 )
1201 )
1199 coreconfigitem('ui', 'quiet',
1202 coreconfigitem('ui', 'quiet',
1200 default=False,
1203 default=False,
1201 )
1204 )
1202 coreconfigitem('ui', 'quietbookmarkmove',
1205 coreconfigitem('ui', 'quietbookmarkmove',
1203 default=False,
1206 default=False,
1204 )
1207 )
1205 coreconfigitem('ui', 'remotecmd',
1208 coreconfigitem('ui', 'remotecmd',
1206 default='hg',
1209 default='hg',
1207 )
1210 )
1208 coreconfigitem('ui', 'report_untrusted',
1211 coreconfigitem('ui', 'report_untrusted',
1209 default=True,
1212 default=True,
1210 )
1213 )
1211 coreconfigitem('ui', 'rollback',
1214 coreconfigitem('ui', 'rollback',
1212 default=True,
1215 default=True,
1213 )
1216 )
1214 coreconfigitem('ui', 'signal-safe-lock',
1217 coreconfigitem('ui', 'signal-safe-lock',
1215 default=True,
1218 default=True,
1216 )
1219 )
1217 coreconfigitem('ui', 'slash',
1220 coreconfigitem('ui', 'slash',
1218 default=False,
1221 default=False,
1219 )
1222 )
1220 coreconfigitem('ui', 'ssh',
1223 coreconfigitem('ui', 'ssh',
1221 default='ssh',
1224 default='ssh',
1222 )
1225 )
1223 coreconfigitem('ui', 'ssherrorhint',
1226 coreconfigitem('ui', 'ssherrorhint',
1224 default=None,
1227 default=None,
1225 )
1228 )
1226 coreconfigitem('ui', 'statuscopies',
1229 coreconfigitem('ui', 'statuscopies',
1227 default=False,
1230 default=False,
1228 )
1231 )
1229 coreconfigitem('ui', 'strict',
1232 coreconfigitem('ui', 'strict',
1230 default=False,
1233 default=False,
1231 )
1234 )
1232 coreconfigitem('ui', 'style',
1235 coreconfigitem('ui', 'style',
1233 default='',
1236 default='',
1234 )
1237 )
1235 coreconfigitem('ui', 'supportcontact',
1238 coreconfigitem('ui', 'supportcontact',
1236 default=None,
1239 default=None,
1237 )
1240 )
1238 coreconfigitem('ui', 'textwidth',
1241 coreconfigitem('ui', 'textwidth',
1239 default=78,
1242 default=78,
1240 )
1243 )
1241 coreconfigitem('ui', 'timeout',
1244 coreconfigitem('ui', 'timeout',
1242 default='600',
1245 default='600',
1243 )
1246 )
1244 coreconfigitem('ui', 'timeout.warn',
1247 coreconfigitem('ui', 'timeout.warn',
1245 default=0,
1248 default=0,
1246 )
1249 )
1247 coreconfigitem('ui', 'traceback',
1250 coreconfigitem('ui', 'traceback',
1248 default=False,
1251 default=False,
1249 )
1252 )
1250 coreconfigitem('ui', 'tweakdefaults',
1253 coreconfigitem('ui', 'tweakdefaults',
1251 default=False,
1254 default=False,
1252 )
1255 )
1253 coreconfigitem('ui', 'username',
1256 coreconfigitem('ui', 'username',
1254 alias=[('ui', 'user')]
1257 alias=[('ui', 'user')]
1255 )
1258 )
1256 coreconfigitem('ui', 'verbose',
1259 coreconfigitem('ui', 'verbose',
1257 default=False,
1260 default=False,
1258 )
1261 )
1259 coreconfigitem('verify', 'skipflags',
1262 coreconfigitem('verify', 'skipflags',
1260 default=None,
1263 default=None,
1261 )
1264 )
1262 coreconfigitem('web', 'allowbz2',
1265 coreconfigitem('web', 'allowbz2',
1263 default=False,
1266 default=False,
1264 )
1267 )
1265 coreconfigitem('web', 'allowgz',
1268 coreconfigitem('web', 'allowgz',
1266 default=False,
1269 default=False,
1267 )
1270 )
1268 coreconfigitem('web', 'allow-pull',
1271 coreconfigitem('web', 'allow-pull',
1269 alias=[('web', 'allowpull')],
1272 alias=[('web', 'allowpull')],
1270 default=True,
1273 default=True,
1271 )
1274 )
1272 coreconfigitem('web', 'allow-push',
1275 coreconfigitem('web', 'allow-push',
1273 alias=[('web', 'allow_push')],
1276 alias=[('web', 'allow_push')],
1274 default=list,
1277 default=list,
1275 )
1278 )
1276 coreconfigitem('web', 'allowzip',
1279 coreconfigitem('web', 'allowzip',
1277 default=False,
1280 default=False,
1278 )
1281 )
1279 coreconfigitem('web', 'archivesubrepos',
1282 coreconfigitem('web', 'archivesubrepos',
1280 default=False,
1283 default=False,
1281 )
1284 )
1282 coreconfigitem('web', 'cache',
1285 coreconfigitem('web', 'cache',
1283 default=True,
1286 default=True,
1284 )
1287 )
1285 coreconfigitem('web', 'contact',
1288 coreconfigitem('web', 'contact',
1286 default=None,
1289 default=None,
1287 )
1290 )
1288 coreconfigitem('web', 'deny_push',
1291 coreconfigitem('web', 'deny_push',
1289 default=list,
1292 default=list,
1290 )
1293 )
1291 coreconfigitem('web', 'guessmime',
1294 coreconfigitem('web', 'guessmime',
1292 default=False,
1295 default=False,
1293 )
1296 )
1294 coreconfigitem('web', 'hidden',
1297 coreconfigitem('web', 'hidden',
1295 default=False,
1298 default=False,
1296 )
1299 )
1297 coreconfigitem('web', 'labels',
1300 coreconfigitem('web', 'labels',
1298 default=list,
1301 default=list,
1299 )
1302 )
1300 coreconfigitem('web', 'logoimg',
1303 coreconfigitem('web', 'logoimg',
1301 default='hglogo.png',
1304 default='hglogo.png',
1302 )
1305 )
1303 coreconfigitem('web', 'logourl',
1306 coreconfigitem('web', 'logourl',
1304 default='https://mercurial-scm.org/',
1307 default='https://mercurial-scm.org/',
1305 )
1308 )
1306 coreconfigitem('web', 'accesslog',
1309 coreconfigitem('web', 'accesslog',
1307 default='-',
1310 default='-',
1308 )
1311 )
1309 coreconfigitem('web', 'address',
1312 coreconfigitem('web', 'address',
1310 default='',
1313 default='',
1311 )
1314 )
1312 coreconfigitem('web', 'allow-archive',
1315 coreconfigitem('web', 'allow-archive',
1313 alias=[('web', 'allow_archive')],
1316 alias=[('web', 'allow_archive')],
1314 default=list,
1317 default=list,
1315 )
1318 )
1316 coreconfigitem('web', 'allow_read',
1319 coreconfigitem('web', 'allow_read',
1317 default=list,
1320 default=list,
1318 )
1321 )
1319 coreconfigitem('web', 'baseurl',
1322 coreconfigitem('web', 'baseurl',
1320 default=None,
1323 default=None,
1321 )
1324 )
1322 coreconfigitem('web', 'cacerts',
1325 coreconfigitem('web', 'cacerts',
1323 default=None,
1326 default=None,
1324 )
1327 )
1325 coreconfigitem('web', 'certificate',
1328 coreconfigitem('web', 'certificate',
1326 default=None,
1329 default=None,
1327 )
1330 )
1328 coreconfigitem('web', 'collapse',
1331 coreconfigitem('web', 'collapse',
1329 default=False,
1332 default=False,
1330 )
1333 )
1331 coreconfigitem('web', 'csp',
1334 coreconfigitem('web', 'csp',
1332 default=None,
1335 default=None,
1333 )
1336 )
1334 coreconfigitem('web', 'deny_read',
1337 coreconfigitem('web', 'deny_read',
1335 default=list,
1338 default=list,
1336 )
1339 )
1337 coreconfigitem('web', 'descend',
1340 coreconfigitem('web', 'descend',
1338 default=True,
1341 default=True,
1339 )
1342 )
1340 coreconfigitem('web', 'description',
1343 coreconfigitem('web', 'description',
1341 default="",
1344 default="",
1342 )
1345 )
1343 coreconfigitem('web', 'encoding',
1346 coreconfigitem('web', 'encoding',
1344 default=lambda: encoding.encoding,
1347 default=lambda: encoding.encoding,
1345 )
1348 )
1346 coreconfigitem('web', 'errorlog',
1349 coreconfigitem('web', 'errorlog',
1347 default='-',
1350 default='-',
1348 )
1351 )
1349 coreconfigitem('web', 'ipv6',
1352 coreconfigitem('web', 'ipv6',
1350 default=False,
1353 default=False,
1351 )
1354 )
1352 coreconfigitem('web', 'maxchanges',
1355 coreconfigitem('web', 'maxchanges',
1353 default=10,
1356 default=10,
1354 )
1357 )
1355 coreconfigitem('web', 'maxfiles',
1358 coreconfigitem('web', 'maxfiles',
1356 default=10,
1359 default=10,
1357 )
1360 )
1358 coreconfigitem('web', 'maxshortchanges',
1361 coreconfigitem('web', 'maxshortchanges',
1359 default=60,
1362 default=60,
1360 )
1363 )
1361 coreconfigitem('web', 'motd',
1364 coreconfigitem('web', 'motd',
1362 default='',
1365 default='',
1363 )
1366 )
1364 coreconfigitem('web', 'name',
1367 coreconfigitem('web', 'name',
1365 default=dynamicdefault,
1368 default=dynamicdefault,
1366 )
1369 )
1367 coreconfigitem('web', 'port',
1370 coreconfigitem('web', 'port',
1368 default=8000,
1371 default=8000,
1369 )
1372 )
1370 coreconfigitem('web', 'prefix',
1373 coreconfigitem('web', 'prefix',
1371 default='',
1374 default='',
1372 )
1375 )
1373 coreconfigitem('web', 'push_ssl',
1376 coreconfigitem('web', 'push_ssl',
1374 default=True,
1377 default=True,
1375 )
1378 )
1376 coreconfigitem('web', 'refreshinterval',
1379 coreconfigitem('web', 'refreshinterval',
1377 default=20,
1380 default=20,
1378 )
1381 )
1379 coreconfigitem('web', 'server-header',
1382 coreconfigitem('web', 'server-header',
1380 default=None,
1383 default=None,
1381 )
1384 )
1382 coreconfigitem('web', 'static',
1385 coreconfigitem('web', 'static',
1383 default=None,
1386 default=None,
1384 )
1387 )
1385 coreconfigitem('web', 'staticurl',
1388 coreconfigitem('web', 'staticurl',
1386 default=None,
1389 default=None,
1387 )
1390 )
1388 coreconfigitem('web', 'stripes',
1391 coreconfigitem('web', 'stripes',
1389 default=1,
1392 default=1,
1390 )
1393 )
1391 coreconfigitem('web', 'style',
1394 coreconfigitem('web', 'style',
1392 default='paper',
1395 default='paper',
1393 )
1396 )
1394 coreconfigitem('web', 'templates',
1397 coreconfigitem('web', 'templates',
1395 default=None,
1398 default=None,
1396 )
1399 )
1397 coreconfigitem('web', 'view',
1400 coreconfigitem('web', 'view',
1398 default='served',
1401 default='served',
1399 )
1402 )
1400 coreconfigitem('worker', 'backgroundclose',
1403 coreconfigitem('worker', 'backgroundclose',
1401 default=dynamicdefault,
1404 default=dynamicdefault,
1402 )
1405 )
1403 # Windows defaults to a limit of 512 open files. A buffer of 128
1406 # Windows defaults to a limit of 512 open files. A buffer of 128
1404 # should give us enough headway.
1407 # should give us enough headway.
1405 coreconfigitem('worker', 'backgroundclosemaxqueue',
1408 coreconfigitem('worker', 'backgroundclosemaxqueue',
1406 default=384,
1409 default=384,
1407 )
1410 )
1408 coreconfigitem('worker', 'backgroundcloseminfilecount',
1411 coreconfigitem('worker', 'backgroundcloseminfilecount',
1409 default=2048,
1412 default=2048,
1410 )
1413 )
1411 coreconfigitem('worker', 'backgroundclosethreadcount',
1414 coreconfigitem('worker', 'backgroundclosethreadcount',
1412 default=4,
1415 default=4,
1413 )
1416 )
1414 coreconfigitem('worker', 'enabled',
1417 coreconfigitem('worker', 'enabled',
1415 default=True,
1418 default=True,
1416 )
1419 )
1417 coreconfigitem('worker', 'numcpus',
1420 coreconfigitem('worker', 'numcpus',
1418 default=None,
1421 default=None,
1419 )
1422 )
1420
1423
1421 # Rebase related configuration moved to core because other extension are doing
1424 # Rebase related configuration moved to core because other extension are doing
1422 # strange things. For example, shelve import the extensions to reuse some bit
1425 # strange things. For example, shelve import the extensions to reuse some bit
1423 # without formally loading it.
1426 # without formally loading it.
1424 coreconfigitem('commands', 'rebase.requiredest',
1427 coreconfigitem('commands', 'rebase.requiredest',
1425 default=False,
1428 default=False,
1426 )
1429 )
1427 coreconfigitem('experimental', 'rebaseskipobsolete',
1430 coreconfigitem('experimental', 'rebaseskipobsolete',
1428 default=True,
1431 default=True,
1429 )
1432 )
1430 coreconfigitem('rebase', 'singletransaction',
1433 coreconfigitem('rebase', 'singletransaction',
1431 default=False,
1434 default=False,
1432 )
1435 )
1433 coreconfigitem('rebase', 'experimental.inmemory',
1436 coreconfigitem('rebase', 'experimental.inmemory',
1434 default=False,
1437 default=False,
1435 )
1438 )
@@ -1,1857 +1,1858
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30 # Repository supports being stream cloned.
30 # Repository supports being stream cloned.
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
32 # Files storage may lack data for all ancestors.
32 # Files storage may lack data for all ancestors.
33 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
33 REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage'
34
34
35 REVISION_FLAG_CENSORED = 1 << 15
35 REVISION_FLAG_CENSORED = 1 << 15
36 REVISION_FLAG_ELLIPSIS = 1 << 14
36 REVISION_FLAG_ELLIPSIS = 1 << 14
37 REVISION_FLAG_EXTSTORED = 1 << 13
37 REVISION_FLAG_EXTSTORED = 1 << 13
38
38
39 REVISION_FLAGS_KNOWN = (
39 REVISION_FLAGS_KNOWN = (
40 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
40 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
41
41
42 CG_DELTAMODE_STD = b'default'
42 CG_DELTAMODE_STD = b'default'
43 CG_DELTAMODE_PREV = b'previous'
43 CG_DELTAMODE_PREV = b'previous'
44 CG_DELTAMODE_FULL = b'fulltext'
44 CG_DELTAMODE_FULL = b'fulltext'
45 CG_DELTAMODE_P1 = b'p1'
45
46
46 class ipeerconnection(interfaceutil.Interface):
47 class ipeerconnection(interfaceutil.Interface):
47 """Represents a "connection" to a repository.
48 """Represents a "connection" to a repository.
48
49
49 This is the base interface for representing a connection to a repository.
50 This is the base interface for representing a connection to a repository.
50 It holds basic properties and methods applicable to all peer types.
51 It holds basic properties and methods applicable to all peer types.
51
52
52 This is not a complete interface definition and should not be used
53 This is not a complete interface definition and should not be used
53 outside of this module.
54 outside of this module.
54 """
55 """
55 ui = interfaceutil.Attribute("""ui.ui instance""")
56 ui = interfaceutil.Attribute("""ui.ui instance""")
56
57
57 def url():
58 def url():
58 """Returns a URL string representing this peer.
59 """Returns a URL string representing this peer.
59
60
60 Currently, implementations expose the raw URL used to construct the
61 Currently, implementations expose the raw URL used to construct the
61 instance. It may contain credentials as part of the URL. The
62 instance. It may contain credentials as part of the URL. The
62 expectations of the value aren't well-defined and this could lead to
63 expectations of the value aren't well-defined and this could lead to
63 data leakage.
64 data leakage.
64
65
65 TODO audit/clean consumers and more clearly define the contents of this
66 TODO audit/clean consumers and more clearly define the contents of this
66 value.
67 value.
67 """
68 """
68
69
69 def local():
70 def local():
70 """Returns a local repository instance.
71 """Returns a local repository instance.
71
72
72 If the peer represents a local repository, returns an object that
73 If the peer represents a local repository, returns an object that
73 can be used to interface with it. Otherwise returns ``None``.
74 can be used to interface with it. Otherwise returns ``None``.
74 """
75 """
75
76
76 def peer():
77 def peer():
77 """Returns an object conforming to this interface.
78 """Returns an object conforming to this interface.
78
79
79 Most implementations will ``return self``.
80 Most implementations will ``return self``.
80 """
81 """
81
82
82 def canpush():
83 def canpush():
83 """Returns a boolean indicating if this peer can be pushed to."""
84 """Returns a boolean indicating if this peer can be pushed to."""
84
85
85 def close():
86 def close():
86 """Close the connection to this peer.
87 """Close the connection to this peer.
87
88
88 This is called when the peer will no longer be used. Resources
89 This is called when the peer will no longer be used. Resources
89 associated with the peer should be cleaned up.
90 associated with the peer should be cleaned up.
90 """
91 """
91
92
92 class ipeercapabilities(interfaceutil.Interface):
93 class ipeercapabilities(interfaceutil.Interface):
93 """Peer sub-interface related to capabilities."""
94 """Peer sub-interface related to capabilities."""
94
95
95 def capable(name):
96 def capable(name):
96 """Determine support for a named capability.
97 """Determine support for a named capability.
97
98
98 Returns ``False`` if capability not supported.
99 Returns ``False`` if capability not supported.
99
100
100 Returns ``True`` if boolean capability is supported. Returns a string
101 Returns ``True`` if boolean capability is supported. Returns a string
101 if capability support is non-boolean.
102 if capability support is non-boolean.
102
103
103 Capability strings may or may not map to wire protocol capabilities.
104 Capability strings may or may not map to wire protocol capabilities.
104 """
105 """
105
106
106 def requirecap(name, purpose):
107 def requirecap(name, purpose):
107 """Require a capability to be present.
108 """Require a capability to be present.
108
109
109 Raises a ``CapabilityError`` if the capability isn't present.
110 Raises a ``CapabilityError`` if the capability isn't present.
110 """
111 """
111
112
112 class ipeercommands(interfaceutil.Interface):
113 class ipeercommands(interfaceutil.Interface):
113 """Client-side interface for communicating over the wire protocol.
114 """Client-side interface for communicating over the wire protocol.
114
115
115 This interface is used as a gateway to the Mercurial wire protocol.
116 This interface is used as a gateway to the Mercurial wire protocol.
116 methods commonly call wire protocol commands of the same name.
117 methods commonly call wire protocol commands of the same name.
117 """
118 """
118
119
119 def branchmap():
120 def branchmap():
120 """Obtain heads in named branches.
121 """Obtain heads in named branches.
121
122
122 Returns a dict mapping branch name to an iterable of nodes that are
123 Returns a dict mapping branch name to an iterable of nodes that are
123 heads on that branch.
124 heads on that branch.
124 """
125 """
125
126
126 def capabilities():
127 def capabilities():
127 """Obtain capabilities of the peer.
128 """Obtain capabilities of the peer.
128
129
129 Returns a set of string capabilities.
130 Returns a set of string capabilities.
130 """
131 """
131
132
132 def clonebundles():
133 def clonebundles():
133 """Obtains the clone bundles manifest for the repo.
134 """Obtains the clone bundles manifest for the repo.
134
135
135 Returns the manifest as unparsed bytes.
136 Returns the manifest as unparsed bytes.
136 """
137 """
137
138
138 def debugwireargs(one, two, three=None, four=None, five=None):
139 def debugwireargs(one, two, three=None, four=None, five=None):
139 """Used to facilitate debugging of arguments passed over the wire."""
140 """Used to facilitate debugging of arguments passed over the wire."""
140
141
141 def getbundle(source, **kwargs):
142 def getbundle(source, **kwargs):
142 """Obtain remote repository data as a bundle.
143 """Obtain remote repository data as a bundle.
143
144
144 This command is how the bulk of repository data is transferred from
145 This command is how the bulk of repository data is transferred from
145 the peer to the local repository
146 the peer to the local repository
146
147
147 Returns a generator of bundle data.
148 Returns a generator of bundle data.
148 """
149 """
149
150
150 def heads():
151 def heads():
151 """Determine all known head revisions in the peer.
152 """Determine all known head revisions in the peer.
152
153
153 Returns an iterable of binary nodes.
154 Returns an iterable of binary nodes.
154 """
155 """
155
156
156 def known(nodes):
157 def known(nodes):
157 """Determine whether multiple nodes are known.
158 """Determine whether multiple nodes are known.
158
159
159 Accepts an iterable of nodes whose presence to check for.
160 Accepts an iterable of nodes whose presence to check for.
160
161
161 Returns an iterable of booleans indicating of the corresponding node
162 Returns an iterable of booleans indicating of the corresponding node
162 at that index is known to the peer.
163 at that index is known to the peer.
163 """
164 """
164
165
165 def listkeys(namespace):
166 def listkeys(namespace):
166 """Obtain all keys in a pushkey namespace.
167 """Obtain all keys in a pushkey namespace.
167
168
168 Returns an iterable of key names.
169 Returns an iterable of key names.
169 """
170 """
170
171
171 def lookup(key):
172 def lookup(key):
172 """Resolve a value to a known revision.
173 """Resolve a value to a known revision.
173
174
174 Returns a binary node of the resolved revision on success.
175 Returns a binary node of the resolved revision on success.
175 """
176 """
176
177
177 def pushkey(namespace, key, old, new):
178 def pushkey(namespace, key, old, new):
178 """Set a value using the ``pushkey`` protocol.
179 """Set a value using the ``pushkey`` protocol.
179
180
180 Arguments correspond to the pushkey namespace and key to operate on and
181 Arguments correspond to the pushkey namespace and key to operate on and
181 the old and new values for that key.
182 the old and new values for that key.
182
183
183 Returns a string with the peer result. The value inside varies by the
184 Returns a string with the peer result. The value inside varies by the
184 namespace.
185 namespace.
185 """
186 """
186
187
187 def stream_out():
188 def stream_out():
188 """Obtain streaming clone data.
189 """Obtain streaming clone data.
189
190
190 Successful result should be a generator of data chunks.
191 Successful result should be a generator of data chunks.
191 """
192 """
192
193
193 def unbundle(bundle, heads, url):
194 def unbundle(bundle, heads, url):
194 """Transfer repository data to the peer.
195 """Transfer repository data to the peer.
195
196
196 This is how the bulk of data during a push is transferred.
197 This is how the bulk of data during a push is transferred.
197
198
198 Returns the integer number of heads added to the peer.
199 Returns the integer number of heads added to the peer.
199 """
200 """
200
201
201 class ipeerlegacycommands(interfaceutil.Interface):
202 class ipeerlegacycommands(interfaceutil.Interface):
202 """Interface for implementing support for legacy wire protocol commands.
203 """Interface for implementing support for legacy wire protocol commands.
203
204
204 Wire protocol commands transition to legacy status when they are no longer
205 Wire protocol commands transition to legacy status when they are no longer
205 used by modern clients. To facilitate identifying which commands are
206 used by modern clients. To facilitate identifying which commands are
206 legacy, the interfaces are split.
207 legacy, the interfaces are split.
207 """
208 """
208
209
209 def between(pairs):
210 def between(pairs):
210 """Obtain nodes between pairs of nodes.
211 """Obtain nodes between pairs of nodes.
211
212
212 ``pairs`` is an iterable of node pairs.
213 ``pairs`` is an iterable of node pairs.
213
214
214 Returns an iterable of iterables of nodes corresponding to each
215 Returns an iterable of iterables of nodes corresponding to each
215 requested pair.
216 requested pair.
216 """
217 """
217
218
218 def branches(nodes):
219 def branches(nodes):
219 """Obtain ancestor changesets of specific nodes back to a branch point.
220 """Obtain ancestor changesets of specific nodes back to a branch point.
220
221
221 For each requested node, the peer finds the first ancestor node that is
222 For each requested node, the peer finds the first ancestor node that is
222 a DAG root or is a merge.
223 a DAG root or is a merge.
223
224
224 Returns an iterable of iterables with the resolved values for each node.
225 Returns an iterable of iterables with the resolved values for each node.
225 """
226 """
226
227
227 def changegroup(nodes, source):
228 def changegroup(nodes, source):
228 """Obtain a changegroup with data for descendants of specified nodes."""
229 """Obtain a changegroup with data for descendants of specified nodes."""
229
230
230 def changegroupsubset(bases, heads, source):
231 def changegroupsubset(bases, heads, source):
231 pass
232 pass
232
233
233 class ipeercommandexecutor(interfaceutil.Interface):
234 class ipeercommandexecutor(interfaceutil.Interface):
234 """Represents a mechanism to execute remote commands.
235 """Represents a mechanism to execute remote commands.
235
236
236 This is the primary interface for requesting that wire protocol commands
237 This is the primary interface for requesting that wire protocol commands
237 be executed. Instances of this interface are active in a context manager
238 be executed. Instances of this interface are active in a context manager
238 and have a well-defined lifetime. When the context manager exits, all
239 and have a well-defined lifetime. When the context manager exits, all
239 outstanding requests are waited on.
240 outstanding requests are waited on.
240 """
241 """
241
242
242 def callcommand(name, args):
243 def callcommand(name, args):
243 """Request that a named command be executed.
244 """Request that a named command be executed.
244
245
245 Receives the command name and a dictionary of command arguments.
246 Receives the command name and a dictionary of command arguments.
246
247
247 Returns a ``concurrent.futures.Future`` that will resolve to the
248 Returns a ``concurrent.futures.Future`` that will resolve to the
248 result of that command request. That exact value is left up to
249 result of that command request. That exact value is left up to
249 the implementation and possibly varies by command.
250 the implementation and possibly varies by command.
250
251
251 Not all commands can coexist with other commands in an executor
252 Not all commands can coexist with other commands in an executor
252 instance: it depends on the underlying wire protocol transport being
253 instance: it depends on the underlying wire protocol transport being
253 used and the command itself.
254 used and the command itself.
254
255
255 Implementations MAY call ``sendcommands()`` automatically if the
256 Implementations MAY call ``sendcommands()`` automatically if the
256 requested command can not coexist with other commands in this executor.
257 requested command can not coexist with other commands in this executor.
257
258
258 Implementations MAY call ``sendcommands()`` automatically when the
259 Implementations MAY call ``sendcommands()`` automatically when the
259 future's ``result()`` is called. So, consumers using multiple
260 future's ``result()`` is called. So, consumers using multiple
260 commands with an executor MUST ensure that ``result()`` is not called
261 commands with an executor MUST ensure that ``result()`` is not called
261 until all command requests have been issued.
262 until all command requests have been issued.
262 """
263 """
263
264
264 def sendcommands():
265 def sendcommands():
265 """Trigger submission of queued command requests.
266 """Trigger submission of queued command requests.
266
267
267 Not all transports submit commands as soon as they are requested to
268 Not all transports submit commands as soon as they are requested to
268 run. When called, this method forces queued command requests to be
269 run. When called, this method forces queued command requests to be
269 issued. It will no-op if all commands have already been sent.
270 issued. It will no-op if all commands have already been sent.
270
271
271 When called, no more new commands may be issued with this executor.
272 When called, no more new commands may be issued with this executor.
272 """
273 """
273
274
274 def close():
275 def close():
275 """Signal that this command request is finished.
276 """Signal that this command request is finished.
276
277
277 When called, no more new commands may be issued. All outstanding
278 When called, no more new commands may be issued. All outstanding
278 commands that have previously been issued are waited on before
279 commands that have previously been issued are waited on before
279 returning. This not only includes waiting for the futures to resolve,
280 returning. This not only includes waiting for the futures to resolve,
280 but also waiting for all response data to arrive. In other words,
281 but also waiting for all response data to arrive. In other words,
281 calling this waits for all on-wire state for issued command requests
282 calling this waits for all on-wire state for issued command requests
282 to finish.
283 to finish.
283
284
284 When used as a context manager, this method is called when exiting the
285 When used as a context manager, this method is called when exiting the
285 context manager.
286 context manager.
286
287
287 This method may call ``sendcommands()`` if there are buffered commands.
288 This method may call ``sendcommands()`` if there are buffered commands.
288 """
289 """
289
290
290 class ipeerrequests(interfaceutil.Interface):
291 class ipeerrequests(interfaceutil.Interface):
291 """Interface for executing commands on a peer."""
292 """Interface for executing commands on a peer."""
292
293
293 def commandexecutor():
294 def commandexecutor():
294 """A context manager that resolves to an ipeercommandexecutor.
295 """A context manager that resolves to an ipeercommandexecutor.
295
296
296 The object this resolves to can be used to issue command requests
297 The object this resolves to can be used to issue command requests
297 to the peer.
298 to the peer.
298
299
299 Callers should call its ``callcommand`` method to issue command
300 Callers should call its ``callcommand`` method to issue command
300 requests.
301 requests.
301
302
302 A new executor should be obtained for each distinct set of commands
303 A new executor should be obtained for each distinct set of commands
303 (possibly just a single command) that the consumer wants to execute
304 (possibly just a single command) that the consumer wants to execute
304 as part of a single operation or round trip. This is because some
305 as part of a single operation or round trip. This is because some
305 peers are half-duplex and/or don't support persistent connections.
306 peers are half-duplex and/or don't support persistent connections.
306 e.g. in the case of HTTP peers, commands sent to an executor represent
307 e.g. in the case of HTTP peers, commands sent to an executor represent
307 a single HTTP request. While some peers may support multiple command
308 a single HTTP request. While some peers may support multiple command
308 sends over the wire per executor, consumers need to code to the least
309 sends over the wire per executor, consumers need to code to the least
309 capable peer. So it should be assumed that command executors buffer
310 capable peer. So it should be assumed that command executors buffer
310 called commands until they are told to send them and that each
311 called commands until they are told to send them and that each
311 command executor could result in a new connection or wire-level request
312 command executor could result in a new connection or wire-level request
312 being issued.
313 being issued.
313 """
314 """
314
315
315 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
316 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
316 """Unified interface for peer repositories.
317 """Unified interface for peer repositories.
317
318
318 All peer instances must conform to this interface.
319 All peer instances must conform to this interface.
319 """
320 """
320
321
321 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
322 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
322 """Unified peer interface for wire protocol version 2 peers."""
323 """Unified peer interface for wire protocol version 2 peers."""
323
324
324 apidescriptor = interfaceutil.Attribute(
325 apidescriptor = interfaceutil.Attribute(
325 """Data structure holding description of server API.""")
326 """Data structure holding description of server API.""")
326
327
327 @interfaceutil.implementer(ipeerbase)
328 @interfaceutil.implementer(ipeerbase)
328 class peer(object):
329 class peer(object):
329 """Base class for peer repositories."""
330 """Base class for peer repositories."""
330
331
331 def capable(self, name):
332 def capable(self, name):
332 caps = self.capabilities()
333 caps = self.capabilities()
333 if name in caps:
334 if name in caps:
334 return True
335 return True
335
336
336 name = '%s=' % name
337 name = '%s=' % name
337 for cap in caps:
338 for cap in caps:
338 if cap.startswith(name):
339 if cap.startswith(name):
339 return cap[len(name):]
340 return cap[len(name):]
340
341
341 return False
342 return False
342
343
343 def requirecap(self, name, purpose):
344 def requirecap(self, name, purpose):
344 if self.capable(name):
345 if self.capable(name):
345 return
346 return
346
347
347 raise error.CapabilityError(
348 raise error.CapabilityError(
348 _('cannot %s; remote repository does not support the %r '
349 _('cannot %s; remote repository does not support the %r '
349 'capability') % (purpose, name))
350 'capability') % (purpose, name))
350
351
351 class iverifyproblem(interfaceutil.Interface):
352 class iverifyproblem(interfaceutil.Interface):
352 """Represents a problem with the integrity of the repository.
353 """Represents a problem with the integrity of the repository.
353
354
354 Instances of this interface are emitted to describe an integrity issue
355 Instances of this interface are emitted to describe an integrity issue
355 with a repository (e.g. corrupt storage, missing data, etc).
356 with a repository (e.g. corrupt storage, missing data, etc).
356
357
357 Instances are essentially messages associated with severity.
358 Instances are essentially messages associated with severity.
358 """
359 """
359 warning = interfaceutil.Attribute(
360 warning = interfaceutil.Attribute(
360 """Message indicating a non-fatal problem.""")
361 """Message indicating a non-fatal problem.""")
361
362
362 error = interfaceutil.Attribute(
363 error = interfaceutil.Attribute(
363 """Message indicating a fatal problem.""")
364 """Message indicating a fatal problem.""")
364
365
365 node = interfaceutil.Attribute(
366 node = interfaceutil.Attribute(
366 """Revision encountering the problem.
367 """Revision encountering the problem.
367
368
368 ``None`` means the problem doesn't apply to a single revision.
369 ``None`` means the problem doesn't apply to a single revision.
369 """)
370 """)
370
371
371 class irevisiondelta(interfaceutil.Interface):
372 class irevisiondelta(interfaceutil.Interface):
372 """Represents a delta between one revision and another.
373 """Represents a delta between one revision and another.
373
374
374 Instances convey enough information to allow a revision to be exchanged
375 Instances convey enough information to allow a revision to be exchanged
375 with another repository.
376 with another repository.
376
377
377 Instances represent the fulltext revision data or a delta against
378 Instances represent the fulltext revision data or a delta against
378 another revision. Therefore the ``revision`` and ``delta`` attributes
379 another revision. Therefore the ``revision`` and ``delta`` attributes
379 are mutually exclusive.
380 are mutually exclusive.
380
381
381 Typically used for changegroup generation.
382 Typically used for changegroup generation.
382 """
383 """
383
384
384 node = interfaceutil.Attribute(
385 node = interfaceutil.Attribute(
385 """20 byte node of this revision.""")
386 """20 byte node of this revision.""")
386
387
387 p1node = interfaceutil.Attribute(
388 p1node = interfaceutil.Attribute(
388 """20 byte node of 1st parent of this revision.""")
389 """20 byte node of 1st parent of this revision.""")
389
390
390 p2node = interfaceutil.Attribute(
391 p2node = interfaceutil.Attribute(
391 """20 byte node of 2nd parent of this revision.""")
392 """20 byte node of 2nd parent of this revision.""")
392
393
393 linknode = interfaceutil.Attribute(
394 linknode = interfaceutil.Attribute(
394 """20 byte node of the changelog revision this node is linked to.""")
395 """20 byte node of the changelog revision this node is linked to.""")
395
396
396 flags = interfaceutil.Attribute(
397 flags = interfaceutil.Attribute(
397 """2 bytes of integer flags that apply to this revision.
398 """2 bytes of integer flags that apply to this revision.
398
399
399 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
400 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
400 """)
401 """)
401
402
402 basenode = interfaceutil.Attribute(
403 basenode = interfaceutil.Attribute(
403 """20 byte node of the revision this data is a delta against.
404 """20 byte node of the revision this data is a delta against.
404
405
405 ``nullid`` indicates that the revision is a full revision and not
406 ``nullid`` indicates that the revision is a full revision and not
406 a delta.
407 a delta.
407 """)
408 """)
408
409
409 baserevisionsize = interfaceutil.Attribute(
410 baserevisionsize = interfaceutil.Attribute(
410 """Size of base revision this delta is against.
411 """Size of base revision this delta is against.
411
412
412 May be ``None`` if ``basenode`` is ``nullid``.
413 May be ``None`` if ``basenode`` is ``nullid``.
413 """)
414 """)
414
415
415 revision = interfaceutil.Attribute(
416 revision = interfaceutil.Attribute(
416 """Raw fulltext of revision data for this node.""")
417 """Raw fulltext of revision data for this node.""")
417
418
418 delta = interfaceutil.Attribute(
419 delta = interfaceutil.Attribute(
419 """Delta between ``basenode`` and ``node``.
420 """Delta between ``basenode`` and ``node``.
420
421
421 Stored in the bdiff delta format.
422 Stored in the bdiff delta format.
422 """)
423 """)
423
424
424 class ifilerevisionssequence(interfaceutil.Interface):
425 class ifilerevisionssequence(interfaceutil.Interface):
425 """Contains index data for all revisions of a file.
426 """Contains index data for all revisions of a file.
426
427
427 Types implementing this behave like lists of tuples. The index
428 Types implementing this behave like lists of tuples. The index
428 in the list corresponds to the revision number. The values contain
429 in the list corresponds to the revision number. The values contain
429 index metadata.
430 index metadata.
430
431
431 The *null* revision (revision number -1) is always the last item
432 The *null* revision (revision number -1) is always the last item
432 in the index.
433 in the index.
433 """
434 """
434
435
435 def __len__():
436 def __len__():
436 """The total number of revisions."""
437 """The total number of revisions."""
437
438
438 def __getitem__(rev):
439 def __getitem__(rev):
439 """Returns the object having a specific revision number.
440 """Returns the object having a specific revision number.
440
441
441 Returns an 8-tuple with the following fields:
442 Returns an 8-tuple with the following fields:
442
443
443 offset+flags
444 offset+flags
444 Contains the offset and flags for the revision. 64-bit unsigned
445 Contains the offset and flags for the revision. 64-bit unsigned
445 integer where first 6 bytes are the offset and the next 2 bytes
446 integer where first 6 bytes are the offset and the next 2 bytes
446 are flags. The offset can be 0 if it is not used by the store.
447 are flags. The offset can be 0 if it is not used by the store.
447 compressed size
448 compressed size
448 Size of the revision data in the store. It can be 0 if it isn't
449 Size of the revision data in the store. It can be 0 if it isn't
449 needed by the store.
450 needed by the store.
450 uncompressed size
451 uncompressed size
451 Fulltext size. It can be 0 if it isn't needed by the store.
452 Fulltext size. It can be 0 if it isn't needed by the store.
452 base revision
453 base revision
453 Revision number of revision the delta for storage is encoded
454 Revision number of revision the delta for storage is encoded
454 against. -1 indicates not encoded against a base revision.
455 against. -1 indicates not encoded against a base revision.
455 link revision
456 link revision
456 Revision number of changelog revision this entry is related to.
457 Revision number of changelog revision this entry is related to.
457 p1 revision
458 p1 revision
458 Revision number of 1st parent. -1 if no 1st parent.
459 Revision number of 1st parent. -1 if no 1st parent.
459 p2 revision
460 p2 revision
460 Revision number of 2nd parent. -1 if no 1st parent.
461 Revision number of 2nd parent. -1 if no 1st parent.
461 node
462 node
462 Binary node value for this revision number.
463 Binary node value for this revision number.
463
464
464 Negative values should index off the end of the sequence. ``-1``
465 Negative values should index off the end of the sequence. ``-1``
465 should return the null revision. ``-2`` should return the most
466 should return the null revision. ``-2`` should return the most
466 recent revision.
467 recent revision.
467 """
468 """
468
469
469 def __contains__(rev):
470 def __contains__(rev):
470 """Whether a revision number exists."""
471 """Whether a revision number exists."""
471
472
472 def insert(self, i, entry):
473 def insert(self, i, entry):
473 """Add an item to the index at specific revision."""
474 """Add an item to the index at specific revision."""
474
475
475 class ifileindex(interfaceutil.Interface):
476 class ifileindex(interfaceutil.Interface):
476 """Storage interface for index data of a single file.
477 """Storage interface for index data of a single file.
477
478
478 File storage data is divided into index metadata and data storage.
479 File storage data is divided into index metadata and data storage.
479 This interface defines the index portion of the interface.
480 This interface defines the index portion of the interface.
480
481
481 The index logically consists of:
482 The index logically consists of:
482
483
483 * A mapping between revision numbers and nodes.
484 * A mapping between revision numbers and nodes.
484 * DAG data (storing and querying the relationship between nodes).
485 * DAG data (storing and querying the relationship between nodes).
485 * Metadata to facilitate storage.
486 * Metadata to facilitate storage.
486 """
487 """
487 def __len__():
488 def __len__():
488 """Obtain the number of revisions stored for this file."""
489 """Obtain the number of revisions stored for this file."""
489
490
490 def __iter__():
491 def __iter__():
491 """Iterate over revision numbers for this file."""
492 """Iterate over revision numbers for this file."""
492
493
493 def hasnode(node):
494 def hasnode(node):
494 """Returns a bool indicating if a node is known to this store.
495 """Returns a bool indicating if a node is known to this store.
495
496
496 Implementations must only return True for full, binary node values:
497 Implementations must only return True for full, binary node values:
497 hex nodes, revision numbers, and partial node matches must be
498 hex nodes, revision numbers, and partial node matches must be
498 rejected.
499 rejected.
499
500
500 The null node is never present.
501 The null node is never present.
501 """
502 """
502
503
503 def revs(start=0, stop=None):
504 def revs(start=0, stop=None):
504 """Iterate over revision numbers for this file, with control."""
505 """Iterate over revision numbers for this file, with control."""
505
506
506 def parents(node):
507 def parents(node):
507 """Returns a 2-tuple of parent nodes for a revision.
508 """Returns a 2-tuple of parent nodes for a revision.
508
509
509 Values will be ``nullid`` if the parent is empty.
510 Values will be ``nullid`` if the parent is empty.
510 """
511 """
511
512
512 def parentrevs(rev):
513 def parentrevs(rev):
513 """Like parents() but operates on revision numbers."""
514 """Like parents() but operates on revision numbers."""
514
515
515 def rev(node):
516 def rev(node):
516 """Obtain the revision number given a node.
517 """Obtain the revision number given a node.
517
518
518 Raises ``error.LookupError`` if the node is not known.
519 Raises ``error.LookupError`` if the node is not known.
519 """
520 """
520
521
521 def node(rev):
522 def node(rev):
522 """Obtain the node value given a revision number.
523 """Obtain the node value given a revision number.
523
524
524 Raises ``IndexError`` if the node is not known.
525 Raises ``IndexError`` if the node is not known.
525 """
526 """
526
527
527 def lookup(node):
528 def lookup(node):
528 """Attempt to resolve a value to a node.
529 """Attempt to resolve a value to a node.
529
530
530 Value can be a binary node, hex node, revision number, or a string
531 Value can be a binary node, hex node, revision number, or a string
531 that can be converted to an integer.
532 that can be converted to an integer.
532
533
533 Raises ``error.LookupError`` if a node could not be resolved.
534 Raises ``error.LookupError`` if a node could not be resolved.
534 """
535 """
535
536
536 def linkrev(rev):
537 def linkrev(rev):
537 """Obtain the changeset revision number a revision is linked to."""
538 """Obtain the changeset revision number a revision is linked to."""
538
539
539 def iscensored(rev):
540 def iscensored(rev):
540 """Return whether a revision's content has been censored."""
541 """Return whether a revision's content has been censored."""
541
542
542 def commonancestorsheads(node1, node2):
543 def commonancestorsheads(node1, node2):
543 """Obtain an iterable of nodes containing heads of common ancestors.
544 """Obtain an iterable of nodes containing heads of common ancestors.
544
545
545 See ``ancestor.commonancestorsheads()``.
546 See ``ancestor.commonancestorsheads()``.
546 """
547 """
547
548
548 def descendants(revs):
549 def descendants(revs):
549 """Obtain descendant revision numbers for a set of revision numbers.
550 """Obtain descendant revision numbers for a set of revision numbers.
550
551
551 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
552 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
552 """
553 """
553
554
554 def heads(start=None, stop=None):
555 def heads(start=None, stop=None):
555 """Obtain a list of nodes that are DAG heads, with control.
556 """Obtain a list of nodes that are DAG heads, with control.
556
557
557 The set of revisions examined can be limited by specifying
558 The set of revisions examined can be limited by specifying
558 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
559 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
559 iterable of nodes. DAG traversal starts at earlier revision
560 iterable of nodes. DAG traversal starts at earlier revision
560 ``start`` and iterates forward until any node in ``stop`` is
561 ``start`` and iterates forward until any node in ``stop`` is
561 encountered.
562 encountered.
562 """
563 """
563
564
564 def children(node):
565 def children(node):
565 """Obtain nodes that are children of a node.
566 """Obtain nodes that are children of a node.
566
567
567 Returns a list of nodes.
568 Returns a list of nodes.
568 """
569 """
569
570
570 class ifiledata(interfaceutil.Interface):
571 class ifiledata(interfaceutil.Interface):
571 """Storage interface for data storage of a specific file.
572 """Storage interface for data storage of a specific file.
572
573
573 This complements ``ifileindex`` and provides an interface for accessing
574 This complements ``ifileindex`` and provides an interface for accessing
574 data for a tracked file.
575 data for a tracked file.
575 """
576 """
576 def size(rev):
577 def size(rev):
577 """Obtain the fulltext size of file data.
578 """Obtain the fulltext size of file data.
578
579
579 Any metadata is excluded from size measurements.
580 Any metadata is excluded from size measurements.
580 """
581 """
581
582
582 def revision(node, raw=False):
583 def revision(node, raw=False):
583 """"Obtain fulltext data for a node.
584 """"Obtain fulltext data for a node.
584
585
585 By default, any storage transformations are applied before the data
586 By default, any storage transformations are applied before the data
586 is returned. If ``raw`` is True, non-raw storage transformations
587 is returned. If ``raw`` is True, non-raw storage transformations
587 are not applied.
588 are not applied.
588
589
589 The fulltext data may contain a header containing metadata. Most
590 The fulltext data may contain a header containing metadata. Most
590 consumers should use ``read()`` to obtain the actual file data.
591 consumers should use ``read()`` to obtain the actual file data.
591 """
592 """
592
593
593 def read(node):
594 def read(node):
594 """Resolve file fulltext data.
595 """Resolve file fulltext data.
595
596
596 This is similar to ``revision()`` except any metadata in the data
597 This is similar to ``revision()`` except any metadata in the data
597 headers is stripped.
598 headers is stripped.
598 """
599 """
599
600
600 def renamed(node):
601 def renamed(node):
601 """Obtain copy metadata for a node.
602 """Obtain copy metadata for a node.
602
603
603 Returns ``False`` if no copy metadata is stored or a 2-tuple of
604 Returns ``False`` if no copy metadata is stored or a 2-tuple of
604 (path, node) from which this revision was copied.
605 (path, node) from which this revision was copied.
605 """
606 """
606
607
607 def cmp(node, fulltext):
608 def cmp(node, fulltext):
608 """Compare fulltext to another revision.
609 """Compare fulltext to another revision.
609
610
610 Returns True if the fulltext is different from what is stored.
611 Returns True if the fulltext is different from what is stored.
611
612
612 This takes copy metadata into account.
613 This takes copy metadata into account.
613
614
614 TODO better document the copy metadata and censoring logic.
615 TODO better document the copy metadata and censoring logic.
615 """
616 """
616
617
617 def emitrevisions(nodes,
618 def emitrevisions(nodes,
618 nodesorder=None,
619 nodesorder=None,
619 revisiondata=False,
620 revisiondata=False,
620 assumehaveparentrevisions=False,
621 assumehaveparentrevisions=False,
621 deltamode=CG_DELTAMODE_STD):
622 deltamode=CG_DELTAMODE_STD):
622 """Produce ``irevisiondelta`` for revisions.
623 """Produce ``irevisiondelta`` for revisions.
623
624
624 Given an iterable of nodes, emits objects conforming to the
625 Given an iterable of nodes, emits objects conforming to the
625 ``irevisiondelta`` interface that describe revisions in storage.
626 ``irevisiondelta`` interface that describe revisions in storage.
626
627
627 This method is a generator.
628 This method is a generator.
628
629
629 The input nodes may be unordered. Implementations must ensure that a
630 The input nodes may be unordered. Implementations must ensure that a
630 node's parents are emitted before the node itself. Transitively, this
631 node's parents are emitted before the node itself. Transitively, this
631 means that a node may only be emitted once all its ancestors in
632 means that a node may only be emitted once all its ancestors in
632 ``nodes`` have also been emitted.
633 ``nodes`` have also been emitted.
633
634
634 By default, emits "index" data (the ``node``, ``p1node``, and
635 By default, emits "index" data (the ``node``, ``p1node``, and
635 ``p2node`` attributes). If ``revisiondata`` is set, revision data
636 ``p2node`` attributes). If ``revisiondata`` is set, revision data
636 will also be present on the emitted objects.
637 will also be present on the emitted objects.
637
638
638 With default argument values, implementations can choose to emit
639 With default argument values, implementations can choose to emit
639 either fulltext revision data or a delta. When emitting deltas,
640 either fulltext revision data or a delta. When emitting deltas,
640 implementations must consider whether the delta's base revision
641 implementations must consider whether the delta's base revision
641 fulltext is available to the receiver.
642 fulltext is available to the receiver.
642
643
643 The base revision fulltext is guaranteed to be available if any of
644 The base revision fulltext is guaranteed to be available if any of
644 the following are met:
645 the following are met:
645
646
646 * Its fulltext revision was emitted by this method call.
647 * Its fulltext revision was emitted by this method call.
647 * A delta for that revision was emitted by this method call.
648 * A delta for that revision was emitted by this method call.
648 * ``assumehaveparentrevisions`` is True and the base revision is a
649 * ``assumehaveparentrevisions`` is True and the base revision is a
649 parent of the node.
650 parent of the node.
650
651
651 ``nodesorder`` can be used to control the order that revisions are
652 ``nodesorder`` can be used to control the order that revisions are
652 emitted. By default, revisions can be reordered as long as they are
653 emitted. By default, revisions can be reordered as long as they are
653 in DAG topological order (see above). If the value is ``nodes``,
654 in DAG topological order (see above). If the value is ``nodes``,
654 the iteration order from ``nodes`` should be used. If the value is
655 the iteration order from ``nodes`` should be used. If the value is
655 ``storage``, then the native order from the backing storage layer
656 ``storage``, then the native order from the backing storage layer
656 is used. (Not all storage layers will have strong ordering and behavior
657 is used. (Not all storage layers will have strong ordering and behavior
657 of this mode is storage-dependent.) ``nodes`` ordering can force
658 of this mode is storage-dependent.) ``nodes`` ordering can force
658 revisions to be emitted before their ancestors, so consumers should
659 revisions to be emitted before their ancestors, so consumers should
659 use it with care.
660 use it with care.
660
661
661 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
662 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
662 be set and it is the caller's responsibility to resolve it, if needed.
663 be set and it is the caller's responsibility to resolve it, if needed.
663
664
664 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
665 If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
665 all revision data should be emitted as deltas against the revision
666 all revision data should be emitted as deltas against the revision
666 emitted just prior. The initial revision should be a delta against its
667 emitted just prior. The initial revision should be a delta against its
667 1st parent.
668 1st parent.
668 """
669 """
669
670
670 class ifilemutation(interfaceutil.Interface):
671 class ifilemutation(interfaceutil.Interface):
671 """Storage interface for mutation events of a tracked file."""
672 """Storage interface for mutation events of a tracked file."""
672
673
673 def add(filedata, meta, transaction, linkrev, p1, p2):
674 def add(filedata, meta, transaction, linkrev, p1, p2):
674 """Add a new revision to the store.
675 """Add a new revision to the store.
675
676
676 Takes file data, dictionary of metadata, a transaction, linkrev,
677 Takes file data, dictionary of metadata, a transaction, linkrev,
677 and parent nodes.
678 and parent nodes.
678
679
679 Returns the node that was added.
680 Returns the node that was added.
680
681
681 May no-op if a revision matching the supplied data is already stored.
682 May no-op if a revision matching the supplied data is already stored.
682 """
683 """
683
684
684 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
685 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
685 flags=0, cachedelta=None):
686 flags=0, cachedelta=None):
686 """Add a new revision to the store.
687 """Add a new revision to the store.
687
688
688 This is similar to ``add()`` except it operates at a lower level.
689 This is similar to ``add()`` except it operates at a lower level.
689
690
690 The data passed in already contains a metadata header, if any.
691 The data passed in already contains a metadata header, if any.
691
692
692 ``node`` and ``flags`` can be used to define the expected node and
693 ``node`` and ``flags`` can be used to define the expected node and
693 the flags to use with storage. ``flags`` is a bitwise value composed
694 the flags to use with storage. ``flags`` is a bitwise value composed
694 of the various ``REVISION_FLAG_*`` constants.
695 of the various ``REVISION_FLAG_*`` constants.
695
696
696 ``add()`` is usually called when adding files from e.g. the working
697 ``add()`` is usually called when adding files from e.g. the working
697 directory. ``addrevision()`` is often called by ``add()`` and for
698 directory. ``addrevision()`` is often called by ``add()`` and for
698 scenarios where revision data has already been computed, such as when
699 scenarios where revision data has already been computed, such as when
699 applying raw data from a peer repo.
700 applying raw data from a peer repo.
700 """
701 """
701
702
702 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
703 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
703 maybemissingparents=False):
704 maybemissingparents=False):
704 """Process a series of deltas for storage.
705 """Process a series of deltas for storage.
705
706
706 ``deltas`` is an iterable of 7-tuples of
707 ``deltas`` is an iterable of 7-tuples of
707 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
708 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
708 to add.
709 to add.
709
710
710 The ``delta`` field contains ``mpatch`` data to apply to a base
711 The ``delta`` field contains ``mpatch`` data to apply to a base
711 revision, identified by ``deltabase``. The base node can be
712 revision, identified by ``deltabase``. The base node can be
712 ``nullid``, in which case the header from the delta can be ignored
713 ``nullid``, in which case the header from the delta can be ignored
713 and the delta used as the fulltext.
714 and the delta used as the fulltext.
714
715
715 ``addrevisioncb`` should be called for each node as it is committed.
716 ``addrevisioncb`` should be called for each node as it is committed.
716
717
717 ``maybemissingparents`` is a bool indicating whether the incoming
718 ``maybemissingparents`` is a bool indicating whether the incoming
718 data may reference parents/ancestor revisions that aren't present.
719 data may reference parents/ancestor revisions that aren't present.
719 This flag is set when receiving data into a "shallow" store that
720 This flag is set when receiving data into a "shallow" store that
720 doesn't hold all history.
721 doesn't hold all history.
721
722
722 Returns a list of nodes that were processed. A node will be in the list
723 Returns a list of nodes that were processed. A node will be in the list
723 even if it existed in the store previously.
724 even if it existed in the store previously.
724 """
725 """
725
726
726 def censorrevision(tr, node, tombstone=b''):
727 def censorrevision(tr, node, tombstone=b''):
727 """Remove the content of a single revision.
728 """Remove the content of a single revision.
728
729
729 The specified ``node`` will have its content purged from storage.
730 The specified ``node`` will have its content purged from storage.
730 Future attempts to access the revision data for this node will
731 Future attempts to access the revision data for this node will
731 result in failure.
732 result in failure.
732
733
733 A ``tombstone`` message can optionally be stored. This message may be
734 A ``tombstone`` message can optionally be stored. This message may be
734 displayed to users when they attempt to access the missing revision
735 displayed to users when they attempt to access the missing revision
735 data.
736 data.
736
737
737 Storage backends may have stored deltas against the previous content
738 Storage backends may have stored deltas against the previous content
738 in this revision. As part of censoring a revision, these storage
739 in this revision. As part of censoring a revision, these storage
739 backends are expected to rewrite any internally stored deltas such
740 backends are expected to rewrite any internally stored deltas such
740 that they no longer reference the deleted content.
741 that they no longer reference the deleted content.
741 """
742 """
742
743
743 def getstrippoint(minlink):
744 def getstrippoint(minlink):
744 """Find the minimum revision that must be stripped to strip a linkrev.
745 """Find the minimum revision that must be stripped to strip a linkrev.
745
746
746 Returns a 2-tuple containing the minimum revision number and a set
747 Returns a 2-tuple containing the minimum revision number and a set
747 of all revisions numbers that would be broken by this strip.
748 of all revisions numbers that would be broken by this strip.
748
749
749 TODO this is highly revlog centric and should be abstracted into
750 TODO this is highly revlog centric and should be abstracted into
750 a higher-level deletion API. ``repair.strip()`` relies on this.
751 a higher-level deletion API. ``repair.strip()`` relies on this.
751 """
752 """
752
753
753 def strip(minlink, transaction):
754 def strip(minlink, transaction):
754 """Remove storage of items starting at a linkrev.
755 """Remove storage of items starting at a linkrev.
755
756
756 This uses ``getstrippoint()`` to determine the first node to remove.
757 This uses ``getstrippoint()`` to determine the first node to remove.
757 Then it effectively truncates storage for all revisions after that.
758 Then it effectively truncates storage for all revisions after that.
758
759
759 TODO this is highly revlog centric and should be abstracted into a
760 TODO this is highly revlog centric and should be abstracted into a
760 higher-level deletion API.
761 higher-level deletion API.
761 """
762 """
762
763
763 class ifilestorage(ifileindex, ifiledata, ifilemutation):
764 class ifilestorage(ifileindex, ifiledata, ifilemutation):
764 """Complete storage interface for a single tracked file."""
765 """Complete storage interface for a single tracked file."""
765
766
766 def files():
767 def files():
767 """Obtain paths that are backing storage for this file.
768 """Obtain paths that are backing storage for this file.
768
769
769 TODO this is used heavily by verify code and there should probably
770 TODO this is used heavily by verify code and there should probably
770 be a better API for that.
771 be a better API for that.
771 """
772 """
772
773
773 def storageinfo(exclusivefiles=False, sharedfiles=False,
774 def storageinfo(exclusivefiles=False, sharedfiles=False,
774 revisionscount=False, trackedsize=False,
775 revisionscount=False, trackedsize=False,
775 storedsize=False):
776 storedsize=False):
776 """Obtain information about storage for this file's data.
777 """Obtain information about storage for this file's data.
777
778
778 Returns a dict describing storage for this tracked path. The keys
779 Returns a dict describing storage for this tracked path. The keys
779 in the dict map to arguments of the same. The arguments are bools
780 in the dict map to arguments of the same. The arguments are bools
780 indicating whether to calculate and obtain that data.
781 indicating whether to calculate and obtain that data.
781
782
782 exclusivefiles
783 exclusivefiles
783 Iterable of (vfs, path) describing files that are exclusively
784 Iterable of (vfs, path) describing files that are exclusively
784 used to back storage for this tracked path.
785 used to back storage for this tracked path.
785
786
786 sharedfiles
787 sharedfiles
787 Iterable of (vfs, path) describing files that are used to back
788 Iterable of (vfs, path) describing files that are used to back
788 storage for this tracked path. Those files may also provide storage
789 storage for this tracked path. Those files may also provide storage
789 for other stored entities.
790 for other stored entities.
790
791
791 revisionscount
792 revisionscount
792 Number of revisions available for retrieval.
793 Number of revisions available for retrieval.
793
794
794 trackedsize
795 trackedsize
795 Total size in bytes of all tracked revisions. This is a sum of the
796 Total size in bytes of all tracked revisions. This is a sum of the
796 length of the fulltext of all revisions.
797 length of the fulltext of all revisions.
797
798
798 storedsize
799 storedsize
799 Total size in bytes used to store data for all tracked revisions.
800 Total size in bytes used to store data for all tracked revisions.
800 This is commonly less than ``trackedsize`` due to internal usage
801 This is commonly less than ``trackedsize`` due to internal usage
801 of deltas rather than fulltext revisions.
802 of deltas rather than fulltext revisions.
802
803
803 Not all storage backends may support all queries are have a reasonable
804 Not all storage backends may support all queries are have a reasonable
804 value to use. In that case, the value should be set to ``None`` and
805 value to use. In that case, the value should be set to ``None`` and
805 callers are expected to handle this special value.
806 callers are expected to handle this special value.
806 """
807 """
807
808
808 def verifyintegrity(state):
809 def verifyintegrity(state):
809 """Verifies the integrity of file storage.
810 """Verifies the integrity of file storage.
810
811
811 ``state`` is a dict holding state of the verifier process. It can be
812 ``state`` is a dict holding state of the verifier process. It can be
812 used to communicate data between invocations of multiple storage
813 used to communicate data between invocations of multiple storage
813 primitives.
814 primitives.
814
815
815 If individual revisions cannot have their revision content resolved,
816 If individual revisions cannot have their revision content resolved,
816 the method is expected to set the ``skipread`` key to a set of nodes
817 the method is expected to set the ``skipread`` key to a set of nodes
817 that encountered problems.
818 that encountered problems.
818
819
819 The method yields objects conforming to the ``iverifyproblem``
820 The method yields objects conforming to the ``iverifyproblem``
820 interface.
821 interface.
821 """
822 """
822
823
823 class idirs(interfaceutil.Interface):
824 class idirs(interfaceutil.Interface):
824 """Interface representing a collection of directories from paths.
825 """Interface representing a collection of directories from paths.
825
826
826 This interface is essentially a derived data structure representing
827 This interface is essentially a derived data structure representing
827 directories from a collection of paths.
828 directories from a collection of paths.
828 """
829 """
829
830
830 def addpath(path):
831 def addpath(path):
831 """Add a path to the collection.
832 """Add a path to the collection.
832
833
833 All directories in the path will be added to the collection.
834 All directories in the path will be added to the collection.
834 """
835 """
835
836
836 def delpath(path):
837 def delpath(path):
837 """Remove a path from the collection.
838 """Remove a path from the collection.
838
839
839 If the removal was the last path in a particular directory, the
840 If the removal was the last path in a particular directory, the
840 directory is removed from the collection.
841 directory is removed from the collection.
841 """
842 """
842
843
843 def __iter__():
844 def __iter__():
844 """Iterate over the directories in this collection of paths."""
845 """Iterate over the directories in this collection of paths."""
845
846
846 def __contains__(path):
847 def __contains__(path):
847 """Whether a specific directory is in this collection."""
848 """Whether a specific directory is in this collection."""
848
849
849 class imanifestdict(interfaceutil.Interface):
850 class imanifestdict(interfaceutil.Interface):
850 """Interface representing a manifest data structure.
851 """Interface representing a manifest data structure.
851
852
852 A manifest is effectively a dict mapping paths to entries. Each entry
853 A manifest is effectively a dict mapping paths to entries. Each entry
853 consists of a binary node and extra flags affecting that entry.
854 consists of a binary node and extra flags affecting that entry.
854 """
855 """
855
856
856 def __getitem__(path):
857 def __getitem__(path):
857 """Returns the binary node value for a path in the manifest.
858 """Returns the binary node value for a path in the manifest.
858
859
859 Raises ``KeyError`` if the path does not exist in the manifest.
860 Raises ``KeyError`` if the path does not exist in the manifest.
860
861
861 Equivalent to ``self.find(path)[0]``.
862 Equivalent to ``self.find(path)[0]``.
862 """
863 """
863
864
864 def find(path):
865 def find(path):
865 """Returns the entry for a path in the manifest.
866 """Returns the entry for a path in the manifest.
866
867
867 Returns a 2-tuple of (node, flags).
868 Returns a 2-tuple of (node, flags).
868
869
869 Raises ``KeyError`` if the path does not exist in the manifest.
870 Raises ``KeyError`` if the path does not exist in the manifest.
870 """
871 """
871
872
872 def __len__():
873 def __len__():
873 """Return the number of entries in the manifest."""
874 """Return the number of entries in the manifest."""
874
875
875 def __nonzero__():
876 def __nonzero__():
876 """Returns True if the manifest has entries, False otherwise."""
877 """Returns True if the manifest has entries, False otherwise."""
877
878
878 __bool__ = __nonzero__
879 __bool__ = __nonzero__
879
880
880 def __setitem__(path, node):
881 def __setitem__(path, node):
881 """Define the node value for a path in the manifest.
882 """Define the node value for a path in the manifest.
882
883
883 If the path is already in the manifest, its flags will be copied to
884 If the path is already in the manifest, its flags will be copied to
884 the new entry.
885 the new entry.
885 """
886 """
886
887
887 def __contains__(path):
888 def __contains__(path):
888 """Whether a path exists in the manifest."""
889 """Whether a path exists in the manifest."""
889
890
890 def __delitem__(path):
891 def __delitem__(path):
891 """Remove a path from the manifest.
892 """Remove a path from the manifest.
892
893
893 Raises ``KeyError`` if the path is not in the manifest.
894 Raises ``KeyError`` if the path is not in the manifest.
894 """
895 """
895
896
896 def __iter__():
897 def __iter__():
897 """Iterate over paths in the manifest."""
898 """Iterate over paths in the manifest."""
898
899
899 def iterkeys():
900 def iterkeys():
900 """Iterate over paths in the manifest."""
901 """Iterate over paths in the manifest."""
901
902
902 def keys():
903 def keys():
903 """Obtain a list of paths in the manifest."""
904 """Obtain a list of paths in the manifest."""
904
905
905 def filesnotin(other, match=None):
906 def filesnotin(other, match=None):
906 """Obtain the set of paths in this manifest but not in another.
907 """Obtain the set of paths in this manifest but not in another.
907
908
908 ``match`` is an optional matcher function to be applied to both
909 ``match`` is an optional matcher function to be applied to both
909 manifests.
910 manifests.
910
911
911 Returns a set of paths.
912 Returns a set of paths.
912 """
913 """
913
914
914 def dirs():
915 def dirs():
915 """Returns an object implementing the ``idirs`` interface."""
916 """Returns an object implementing the ``idirs`` interface."""
916
917
917 def hasdir(dir):
918 def hasdir(dir):
918 """Returns a bool indicating if a directory is in this manifest."""
919 """Returns a bool indicating if a directory is in this manifest."""
919
920
920 def matches(match):
921 def matches(match):
921 """Generate a new manifest filtered through a matcher.
922 """Generate a new manifest filtered through a matcher.
922
923
923 Returns an object conforming to the ``imanifestdict`` interface.
924 Returns an object conforming to the ``imanifestdict`` interface.
924 """
925 """
925
926
926 def walk(match):
927 def walk(match):
927 """Generator of paths in manifest satisfying a matcher.
928 """Generator of paths in manifest satisfying a matcher.
928
929
929 This is equivalent to ``self.matches(match).iterkeys()`` except a new
930 This is equivalent to ``self.matches(match).iterkeys()`` except a new
930 manifest object is not created.
931 manifest object is not created.
931
932
932 If the matcher has explicit files listed and they don't exist in
933 If the matcher has explicit files listed and they don't exist in
933 the manifest, ``match.bad()`` is called for each missing file.
934 the manifest, ``match.bad()`` is called for each missing file.
934 """
935 """
935
936
936 def diff(other, match=None, clean=False):
937 def diff(other, match=None, clean=False):
937 """Find differences between this manifest and another.
938 """Find differences between this manifest and another.
938
939
939 This manifest is compared to ``other``.
940 This manifest is compared to ``other``.
940
941
941 If ``match`` is provided, the two manifests are filtered against this
942 If ``match`` is provided, the two manifests are filtered against this
942 matcher and only entries satisfying the matcher are compared.
943 matcher and only entries satisfying the matcher are compared.
943
944
944 If ``clean`` is True, unchanged files are included in the returned
945 If ``clean`` is True, unchanged files are included in the returned
945 object.
946 object.
946
947
947 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
948 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
948 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
949 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
949 represents the node and flags for this manifest and ``(node2, flag2)``
950 represents the node and flags for this manifest and ``(node2, flag2)``
950 are the same for the other manifest.
951 are the same for the other manifest.
951 """
952 """
952
953
953 def setflag(path, flag):
954 def setflag(path, flag):
954 """Set the flag value for a given path.
955 """Set the flag value for a given path.
955
956
956 Raises ``KeyError`` if the path is not already in the manifest.
957 Raises ``KeyError`` if the path is not already in the manifest.
957 """
958 """
958
959
959 def get(path, default=None):
960 def get(path, default=None):
960 """Obtain the node value for a path or a default value if missing."""
961 """Obtain the node value for a path or a default value if missing."""
961
962
962 def flags(path, default=''):
963 def flags(path, default=''):
963 """Return the flags value for a path or a default value if missing."""
964 """Return the flags value for a path or a default value if missing."""
964
965
965 def copy():
966 def copy():
966 """Return a copy of this manifest."""
967 """Return a copy of this manifest."""
967
968
968 def items():
969 def items():
969 """Returns an iterable of (path, node) for items in this manifest."""
970 """Returns an iterable of (path, node) for items in this manifest."""
970
971
971 def iteritems():
972 def iteritems():
972 """Identical to items()."""
973 """Identical to items()."""
973
974
974 def iterentries():
975 def iterentries():
975 """Returns an iterable of (path, node, flags) for this manifest.
976 """Returns an iterable of (path, node, flags) for this manifest.
976
977
977 Similar to ``iteritems()`` except items are a 3-tuple and include
978 Similar to ``iteritems()`` except items are a 3-tuple and include
978 flags.
979 flags.
979 """
980 """
980
981
981 def text():
982 def text():
982 """Obtain the raw data representation for this manifest.
983 """Obtain the raw data representation for this manifest.
983
984
984 Result is used to create a manifest revision.
985 Result is used to create a manifest revision.
985 """
986 """
986
987
987 def fastdelta(base, changes):
988 def fastdelta(base, changes):
988 """Obtain a delta between this manifest and another given changes.
989 """Obtain a delta between this manifest and another given changes.
989
990
990 ``base`` in the raw data representation for another manifest.
991 ``base`` in the raw data representation for another manifest.
991
992
992 ``changes`` is an iterable of ``(path, to_delete)``.
993 ``changes`` is an iterable of ``(path, to_delete)``.
993
994
994 Returns a 2-tuple containing ``bytearray(self.text())`` and the
995 Returns a 2-tuple containing ``bytearray(self.text())`` and the
995 delta between ``base`` and this manifest.
996 delta between ``base`` and this manifest.
996 """
997 """
997
998
998 class imanifestrevisionbase(interfaceutil.Interface):
999 class imanifestrevisionbase(interfaceutil.Interface):
999 """Base interface representing a single revision of a manifest.
1000 """Base interface representing a single revision of a manifest.
1000
1001
1001 Should not be used as a primary interface: should always be inherited
1002 Should not be used as a primary interface: should always be inherited
1002 as part of a larger interface.
1003 as part of a larger interface.
1003 """
1004 """
1004
1005
1005 def new():
1006 def new():
1006 """Obtain a new manifest instance.
1007 """Obtain a new manifest instance.
1007
1008
1008 Returns an object conforming to the ``imanifestrevisionwritable``
1009 Returns an object conforming to the ``imanifestrevisionwritable``
1009 interface. The instance will be associated with the same
1010 interface. The instance will be associated with the same
1010 ``imanifestlog`` collection as this instance.
1011 ``imanifestlog`` collection as this instance.
1011 """
1012 """
1012
1013
1013 def copy():
1014 def copy():
1014 """Obtain a copy of this manifest instance.
1015 """Obtain a copy of this manifest instance.
1015
1016
1016 Returns an object conforming to the ``imanifestrevisionwritable``
1017 Returns an object conforming to the ``imanifestrevisionwritable``
1017 interface. The instance will be associated with the same
1018 interface. The instance will be associated with the same
1018 ``imanifestlog`` collection as this instance.
1019 ``imanifestlog`` collection as this instance.
1019 """
1020 """
1020
1021
1021 def read():
1022 def read():
1022 """Obtain the parsed manifest data structure.
1023 """Obtain the parsed manifest data structure.
1023
1024
1024 The returned object conforms to the ``imanifestdict`` interface.
1025 The returned object conforms to the ``imanifestdict`` interface.
1025 """
1026 """
1026
1027
1027 class imanifestrevisionstored(imanifestrevisionbase):
1028 class imanifestrevisionstored(imanifestrevisionbase):
1028 """Interface representing a manifest revision committed to storage."""
1029 """Interface representing a manifest revision committed to storage."""
1029
1030
1030 def node():
1031 def node():
1031 """The binary node for this manifest."""
1032 """The binary node for this manifest."""
1032
1033
1033 parents = interfaceutil.Attribute(
1034 parents = interfaceutil.Attribute(
1034 """List of binary nodes that are parents for this manifest revision."""
1035 """List of binary nodes that are parents for this manifest revision."""
1035 )
1036 )
1036
1037
1037 def readdelta(shallow=False):
1038 def readdelta(shallow=False):
1038 """Obtain the manifest data structure representing changes from parent.
1039 """Obtain the manifest data structure representing changes from parent.
1039
1040
1040 This manifest is compared to its 1st parent. A new manifest representing
1041 This manifest is compared to its 1st parent. A new manifest representing
1041 those differences is constructed.
1042 those differences is constructed.
1042
1043
1043 The returned object conforms to the ``imanifestdict`` interface.
1044 The returned object conforms to the ``imanifestdict`` interface.
1044 """
1045 """
1045
1046
1046 def readfast(shallow=False):
1047 def readfast(shallow=False):
1047 """Calls either ``read()`` or ``readdelta()``.
1048 """Calls either ``read()`` or ``readdelta()``.
1048
1049
1049 The faster of the two options is called.
1050 The faster of the two options is called.
1050 """
1051 """
1051
1052
1052 def find(key):
1053 def find(key):
1053 """Calls self.read().find(key)``.
1054 """Calls self.read().find(key)``.
1054
1055
1055 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1056 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1056 """
1057 """
1057
1058
1058 class imanifestrevisionwritable(imanifestrevisionbase):
1059 class imanifestrevisionwritable(imanifestrevisionbase):
1059 """Interface representing a manifest revision that can be committed."""
1060 """Interface representing a manifest revision that can be committed."""
1060
1061
1061 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1062 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1062 """Add this revision to storage.
1063 """Add this revision to storage.
1063
1064
1064 Takes a transaction object, the changeset revision number it will
1065 Takes a transaction object, the changeset revision number it will
1065 be associated with, its parent nodes, and lists of added and
1066 be associated with, its parent nodes, and lists of added and
1066 removed paths.
1067 removed paths.
1067
1068
1068 If match is provided, storage can choose not to inspect or write out
1069 If match is provided, storage can choose not to inspect or write out
1069 items that do not match. Storage is still required to be able to provide
1070 items that do not match. Storage is still required to be able to provide
1070 the full manifest in the future for any directories written (these
1071 the full manifest in the future for any directories written (these
1071 manifests should not be "narrowed on disk").
1072 manifests should not be "narrowed on disk").
1072
1073
1073 Returns the binary node of the created revision.
1074 Returns the binary node of the created revision.
1074 """
1075 """
1075
1076
1076 class imanifeststorage(interfaceutil.Interface):
1077 class imanifeststorage(interfaceutil.Interface):
1077 """Storage interface for manifest data."""
1078 """Storage interface for manifest data."""
1078
1079
1079 tree = interfaceutil.Attribute(
1080 tree = interfaceutil.Attribute(
1080 """The path to the directory this manifest tracks.
1081 """The path to the directory this manifest tracks.
1081
1082
1082 The empty bytestring represents the root manifest.
1083 The empty bytestring represents the root manifest.
1083 """)
1084 """)
1084
1085
1085 index = interfaceutil.Attribute(
1086 index = interfaceutil.Attribute(
1086 """An ``ifilerevisionssequence`` instance.""")
1087 """An ``ifilerevisionssequence`` instance.""")
1087
1088
1088 indexfile = interfaceutil.Attribute(
1089 indexfile = interfaceutil.Attribute(
1089 """Path of revlog index file.
1090 """Path of revlog index file.
1090
1091
1091 TODO this is revlog specific and should not be exposed.
1092 TODO this is revlog specific and should not be exposed.
1092 """)
1093 """)
1093
1094
1094 opener = interfaceutil.Attribute(
1095 opener = interfaceutil.Attribute(
1095 """VFS opener to use to access underlying files used for storage.
1096 """VFS opener to use to access underlying files used for storage.
1096
1097
1097 TODO this is revlog specific and should not be exposed.
1098 TODO this is revlog specific and should not be exposed.
1098 """)
1099 """)
1099
1100
1100 version = interfaceutil.Attribute(
1101 version = interfaceutil.Attribute(
1101 """Revlog version number.
1102 """Revlog version number.
1102
1103
1103 TODO this is revlog specific and should not be exposed.
1104 TODO this is revlog specific and should not be exposed.
1104 """)
1105 """)
1105
1106
1106 _generaldelta = interfaceutil.Attribute(
1107 _generaldelta = interfaceutil.Attribute(
1107 """Whether generaldelta storage is being used.
1108 """Whether generaldelta storage is being used.
1108
1109
1109 TODO this is revlog specific and should not be exposed.
1110 TODO this is revlog specific and should not be exposed.
1110 """)
1111 """)
1111
1112
1112 fulltextcache = interfaceutil.Attribute(
1113 fulltextcache = interfaceutil.Attribute(
1113 """Dict with cache of fulltexts.
1114 """Dict with cache of fulltexts.
1114
1115
1115 TODO this doesn't feel appropriate for the storage interface.
1116 TODO this doesn't feel appropriate for the storage interface.
1116 """)
1117 """)
1117
1118
1118 def __len__():
1119 def __len__():
1119 """Obtain the number of revisions stored for this manifest."""
1120 """Obtain the number of revisions stored for this manifest."""
1120
1121
1121 def __iter__():
1122 def __iter__():
1122 """Iterate over revision numbers for this manifest."""
1123 """Iterate over revision numbers for this manifest."""
1123
1124
1124 def rev(node):
1125 def rev(node):
1125 """Obtain the revision number given a binary node.
1126 """Obtain the revision number given a binary node.
1126
1127
1127 Raises ``error.LookupError`` if the node is not known.
1128 Raises ``error.LookupError`` if the node is not known.
1128 """
1129 """
1129
1130
1130 def node(rev):
1131 def node(rev):
1131 """Obtain the node value given a revision number.
1132 """Obtain the node value given a revision number.
1132
1133
1133 Raises ``error.LookupError`` if the revision is not known.
1134 Raises ``error.LookupError`` if the revision is not known.
1134 """
1135 """
1135
1136
1136 def lookup(value):
1137 def lookup(value):
1137 """Attempt to resolve a value to a node.
1138 """Attempt to resolve a value to a node.
1138
1139
1139 Value can be a binary node, hex node, revision number, or a bytes
1140 Value can be a binary node, hex node, revision number, or a bytes
1140 that can be converted to an integer.
1141 that can be converted to an integer.
1141
1142
1142 Raises ``error.LookupError`` if a ndoe could not be resolved.
1143 Raises ``error.LookupError`` if a ndoe could not be resolved.
1143 """
1144 """
1144
1145
1145 def parents(node):
1146 def parents(node):
1146 """Returns a 2-tuple of parent nodes for a node.
1147 """Returns a 2-tuple of parent nodes for a node.
1147
1148
1148 Values will be ``nullid`` if the parent is empty.
1149 Values will be ``nullid`` if the parent is empty.
1149 """
1150 """
1150
1151
1151 def parentrevs(rev):
1152 def parentrevs(rev):
1152 """Like parents() but operates on revision numbers."""
1153 """Like parents() but operates on revision numbers."""
1153
1154
1154 def linkrev(rev):
1155 def linkrev(rev):
1155 """Obtain the changeset revision number a revision is linked to."""
1156 """Obtain the changeset revision number a revision is linked to."""
1156
1157
1157 def revision(node, _df=None, raw=False):
1158 def revision(node, _df=None, raw=False):
1158 """Obtain fulltext data for a node."""
1159 """Obtain fulltext data for a node."""
1159
1160
1160 def revdiff(rev1, rev2):
1161 def revdiff(rev1, rev2):
1161 """Obtain a delta between two revision numbers.
1162 """Obtain a delta between two revision numbers.
1162
1163
1163 The returned data is the result of ``bdiff.bdiff()`` on the raw
1164 The returned data is the result of ``bdiff.bdiff()`` on the raw
1164 revision data.
1165 revision data.
1165 """
1166 """
1166
1167
1167 def cmp(node, fulltext):
1168 def cmp(node, fulltext):
1168 """Compare fulltext to another revision.
1169 """Compare fulltext to another revision.
1169
1170
1170 Returns True if the fulltext is different from what is stored.
1171 Returns True if the fulltext is different from what is stored.
1171 """
1172 """
1172
1173
1173 def emitrevisions(nodes,
1174 def emitrevisions(nodes,
1174 nodesorder=None,
1175 nodesorder=None,
1175 revisiondata=False,
1176 revisiondata=False,
1176 assumehaveparentrevisions=False):
1177 assumehaveparentrevisions=False):
1177 """Produce ``irevisiondelta`` describing revisions.
1178 """Produce ``irevisiondelta`` describing revisions.
1178
1179
1179 See the documentation for ``ifiledata`` for more.
1180 See the documentation for ``ifiledata`` for more.
1180 """
1181 """
1181
1182
1182 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1183 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1183 """Process a series of deltas for storage.
1184 """Process a series of deltas for storage.
1184
1185
1185 See the documentation in ``ifilemutation`` for more.
1186 See the documentation in ``ifilemutation`` for more.
1186 """
1187 """
1187
1188
1188 def rawsize(rev):
1189 def rawsize(rev):
1189 """Obtain the size of tracked data.
1190 """Obtain the size of tracked data.
1190
1191
1191 Is equivalent to ``len(m.revision(node, raw=True))``.
1192 Is equivalent to ``len(m.revision(node, raw=True))``.
1192
1193
1193 TODO this method is only used by upgrade code and may be removed.
1194 TODO this method is only used by upgrade code and may be removed.
1194 """
1195 """
1195
1196
1196 def getstrippoint(minlink):
1197 def getstrippoint(minlink):
1197 """Find minimum revision that must be stripped to strip a linkrev.
1198 """Find minimum revision that must be stripped to strip a linkrev.
1198
1199
1199 See the documentation in ``ifilemutation`` for more.
1200 See the documentation in ``ifilemutation`` for more.
1200 """
1201 """
1201
1202
1202 def strip(minlink, transaction):
1203 def strip(minlink, transaction):
1203 """Remove storage of items starting at a linkrev.
1204 """Remove storage of items starting at a linkrev.
1204
1205
1205 See the documentation in ``ifilemutation`` for more.
1206 See the documentation in ``ifilemutation`` for more.
1206 """
1207 """
1207
1208
1208 def checksize():
1209 def checksize():
1209 """Obtain the expected sizes of backing files.
1210 """Obtain the expected sizes of backing files.
1210
1211
1211 TODO this is used by verify and it should not be part of the interface.
1212 TODO this is used by verify and it should not be part of the interface.
1212 """
1213 """
1213
1214
1214 def files():
1215 def files():
1215 """Obtain paths that are backing storage for this manifest.
1216 """Obtain paths that are backing storage for this manifest.
1216
1217
1217 TODO this is used by verify and there should probably be a better API
1218 TODO this is used by verify and there should probably be a better API
1218 for this functionality.
1219 for this functionality.
1219 """
1220 """
1220
1221
1221 def deltaparent(rev):
1222 def deltaparent(rev):
1222 """Obtain the revision that a revision is delta'd against.
1223 """Obtain the revision that a revision is delta'd against.
1223
1224
1224 TODO delta encoding is an implementation detail of storage and should
1225 TODO delta encoding is an implementation detail of storage and should
1225 not be exposed to the storage interface.
1226 not be exposed to the storage interface.
1226 """
1227 """
1227
1228
1228 def clone(tr, dest, **kwargs):
1229 def clone(tr, dest, **kwargs):
1229 """Clone this instance to another."""
1230 """Clone this instance to another."""
1230
1231
1231 def clearcaches(clear_persisted_data=False):
1232 def clearcaches(clear_persisted_data=False):
1232 """Clear any caches associated with this instance."""
1233 """Clear any caches associated with this instance."""
1233
1234
1234 def dirlog(d):
1235 def dirlog(d):
1235 """Obtain a manifest storage instance for a tree."""
1236 """Obtain a manifest storage instance for a tree."""
1236
1237
1237 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1238 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1238 match=None):
1239 match=None):
1239 """Add a revision to storage.
1240 """Add a revision to storage.
1240
1241
1241 ``m`` is an object conforming to ``imanifestdict``.
1242 ``m`` is an object conforming to ``imanifestdict``.
1242
1243
1243 ``link`` is the linkrev revision number.
1244 ``link`` is the linkrev revision number.
1244
1245
1245 ``p1`` and ``p2`` are the parent revision numbers.
1246 ``p1`` and ``p2`` are the parent revision numbers.
1246
1247
1247 ``added`` and ``removed`` are iterables of added and removed paths,
1248 ``added`` and ``removed`` are iterables of added and removed paths,
1248 respectively.
1249 respectively.
1249
1250
1250 ``readtree`` is a function that can be used to read the child tree(s)
1251 ``readtree`` is a function that can be used to read the child tree(s)
1251 when recursively writing the full tree structure when using
1252 when recursively writing the full tree structure when using
1252 treemanifets.
1253 treemanifets.
1253
1254
1254 ``match`` is a matcher that can be used to hint to storage that not all
1255 ``match`` is a matcher that can be used to hint to storage that not all
1255 paths must be inspected; this is an optimization and can be safely
1256 paths must be inspected; this is an optimization and can be safely
1256 ignored. Note that the storage must still be able to reproduce a full
1257 ignored. Note that the storage must still be able to reproduce a full
1257 manifest including files that did not match.
1258 manifest including files that did not match.
1258 """
1259 """
1259
1260
1260 def storageinfo(exclusivefiles=False, sharedfiles=False,
1261 def storageinfo(exclusivefiles=False, sharedfiles=False,
1261 revisionscount=False, trackedsize=False,
1262 revisionscount=False, trackedsize=False,
1262 storedsize=False):
1263 storedsize=False):
1263 """Obtain information about storage for this manifest's data.
1264 """Obtain information about storage for this manifest's data.
1264
1265
1265 See ``ifilestorage.storageinfo()`` for a description of this method.
1266 See ``ifilestorage.storageinfo()`` for a description of this method.
1266 This one behaves the same way, except for manifest data.
1267 This one behaves the same way, except for manifest data.
1267 """
1268 """
1268
1269
1269 class imanifestlog(interfaceutil.Interface):
1270 class imanifestlog(interfaceutil.Interface):
1270 """Interface representing a collection of manifest snapshots.
1271 """Interface representing a collection of manifest snapshots.
1271
1272
1272 Represents the root manifest in a repository.
1273 Represents the root manifest in a repository.
1273
1274
1274 Also serves as a means to access nested tree manifests and to cache
1275 Also serves as a means to access nested tree manifests and to cache
1275 tree manifests.
1276 tree manifests.
1276 """
1277 """
1277
1278
1278 def __getitem__(node):
1279 def __getitem__(node):
1279 """Obtain a manifest instance for a given binary node.
1280 """Obtain a manifest instance for a given binary node.
1280
1281
1281 Equivalent to calling ``self.get('', node)``.
1282 Equivalent to calling ``self.get('', node)``.
1282
1283
1283 The returned object conforms to the ``imanifestrevisionstored``
1284 The returned object conforms to the ``imanifestrevisionstored``
1284 interface.
1285 interface.
1285 """
1286 """
1286
1287
1287 def get(tree, node, verify=True):
1288 def get(tree, node, verify=True):
1288 """Retrieve the manifest instance for a given directory and binary node.
1289 """Retrieve the manifest instance for a given directory and binary node.
1289
1290
1290 ``node`` always refers to the node of the root manifest (which will be
1291 ``node`` always refers to the node of the root manifest (which will be
1291 the only manifest if flat manifests are being used).
1292 the only manifest if flat manifests are being used).
1292
1293
1293 If ``tree`` is the empty string, the root manifest is returned.
1294 If ``tree`` is the empty string, the root manifest is returned.
1294 Otherwise the manifest for the specified directory will be returned
1295 Otherwise the manifest for the specified directory will be returned
1295 (requires tree manifests).
1296 (requires tree manifests).
1296
1297
1297 If ``verify`` is True, ``LookupError`` is raised if the node is not
1298 If ``verify`` is True, ``LookupError`` is raised if the node is not
1298 known.
1299 known.
1299
1300
1300 The returned object conforms to the ``imanifestrevisionstored``
1301 The returned object conforms to the ``imanifestrevisionstored``
1301 interface.
1302 interface.
1302 """
1303 """
1303
1304
1304 def getstorage(tree):
1305 def getstorage(tree):
1305 """Retrieve an interface to storage for a particular tree.
1306 """Retrieve an interface to storage for a particular tree.
1306
1307
1307 If ``tree`` is the empty bytestring, storage for the root manifest will
1308 If ``tree`` is the empty bytestring, storage for the root manifest will
1308 be returned. Otherwise storage for a tree manifest is returned.
1309 be returned. Otherwise storage for a tree manifest is returned.
1309
1310
1310 TODO formalize interface for returned object.
1311 TODO formalize interface for returned object.
1311 """
1312 """
1312
1313
1313 def clearcaches():
1314 def clearcaches():
1314 """Clear caches associated with this collection."""
1315 """Clear caches associated with this collection."""
1315
1316
1316 def rev(node):
1317 def rev(node):
1317 """Obtain the revision number for a binary node.
1318 """Obtain the revision number for a binary node.
1318
1319
1319 Raises ``error.LookupError`` if the node is not known.
1320 Raises ``error.LookupError`` if the node is not known.
1320 """
1321 """
1321
1322
1322 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1323 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1323 """Local repository sub-interface providing access to tracked file storage.
1324 """Local repository sub-interface providing access to tracked file storage.
1324
1325
1325 This interface defines how a repository accesses storage for a single
1326 This interface defines how a repository accesses storage for a single
1326 tracked file path.
1327 tracked file path.
1327 """
1328 """
1328
1329
1329 def file(f):
1330 def file(f):
1330 """Obtain a filelog for a tracked path.
1331 """Obtain a filelog for a tracked path.
1331
1332
1332 The returned type conforms to the ``ifilestorage`` interface.
1333 The returned type conforms to the ``ifilestorage`` interface.
1333 """
1334 """
1334
1335
1335 class ilocalrepositorymain(interfaceutil.Interface):
1336 class ilocalrepositorymain(interfaceutil.Interface):
1336 """Main interface for local repositories.
1337 """Main interface for local repositories.
1337
1338
1338 This currently captures the reality of things - not how things should be.
1339 This currently captures the reality of things - not how things should be.
1339 """
1340 """
1340
1341
1341 supportedformats = interfaceutil.Attribute(
1342 supportedformats = interfaceutil.Attribute(
1342 """Set of requirements that apply to stream clone.
1343 """Set of requirements that apply to stream clone.
1343
1344
1344 This is actually a class attribute and is shared among all instances.
1345 This is actually a class attribute and is shared among all instances.
1345 """)
1346 """)
1346
1347
1347 supported = interfaceutil.Attribute(
1348 supported = interfaceutil.Attribute(
1348 """Set of requirements that this repo is capable of opening.""")
1349 """Set of requirements that this repo is capable of opening.""")
1349
1350
1350 requirements = interfaceutil.Attribute(
1351 requirements = interfaceutil.Attribute(
1351 """Set of requirements this repo uses.""")
1352 """Set of requirements this repo uses.""")
1352
1353
1353 features = interfaceutil.Attribute(
1354 features = interfaceutil.Attribute(
1354 """Set of "features" this repository supports.
1355 """Set of "features" this repository supports.
1355
1356
1356 A "feature" is a loosely-defined term. It can refer to a feature
1357 A "feature" is a loosely-defined term. It can refer to a feature
1357 in the classical sense or can describe an implementation detail
1358 in the classical sense or can describe an implementation detail
1358 of the repository. For example, a ``readonly`` feature may denote
1359 of the repository. For example, a ``readonly`` feature may denote
1359 the repository as read-only. Or a ``revlogfilestore`` feature may
1360 the repository as read-only. Or a ``revlogfilestore`` feature may
1360 denote that the repository is using revlogs for file storage.
1361 denote that the repository is using revlogs for file storage.
1361
1362
1362 The intent of features is to provide a machine-queryable mechanism
1363 The intent of features is to provide a machine-queryable mechanism
1363 for repo consumers to test for various repository characteristics.
1364 for repo consumers to test for various repository characteristics.
1364
1365
1365 Features are similar to ``requirements``. The main difference is that
1366 Features are similar to ``requirements``. The main difference is that
1366 requirements are stored on-disk and represent requirements to open the
1367 requirements are stored on-disk and represent requirements to open the
1367 repository. Features are more run-time capabilities of the repository
1368 repository. Features are more run-time capabilities of the repository
1368 and more granular capabilities (which may be derived from requirements).
1369 and more granular capabilities (which may be derived from requirements).
1369 """)
1370 """)
1370
1371
1371 filtername = interfaceutil.Attribute(
1372 filtername = interfaceutil.Attribute(
1372 """Name of the repoview that is active on this repo.""")
1373 """Name of the repoview that is active on this repo.""")
1373
1374
1374 wvfs = interfaceutil.Attribute(
1375 wvfs = interfaceutil.Attribute(
1375 """VFS used to access the working directory.""")
1376 """VFS used to access the working directory.""")
1376
1377
1377 vfs = interfaceutil.Attribute(
1378 vfs = interfaceutil.Attribute(
1378 """VFS rooted at the .hg directory.
1379 """VFS rooted at the .hg directory.
1379
1380
1380 Used to access repository data not in the store.
1381 Used to access repository data not in the store.
1381 """)
1382 """)
1382
1383
1383 svfs = interfaceutil.Attribute(
1384 svfs = interfaceutil.Attribute(
1384 """VFS rooted at the store.
1385 """VFS rooted at the store.
1385
1386
1386 Used to access repository data in the store. Typically .hg/store.
1387 Used to access repository data in the store. Typically .hg/store.
1387 But can point elsewhere if the store is shared.
1388 But can point elsewhere if the store is shared.
1388 """)
1389 """)
1389
1390
1390 root = interfaceutil.Attribute(
1391 root = interfaceutil.Attribute(
1391 """Path to the root of the working directory.""")
1392 """Path to the root of the working directory.""")
1392
1393
1393 path = interfaceutil.Attribute(
1394 path = interfaceutil.Attribute(
1394 """Path to the .hg directory.""")
1395 """Path to the .hg directory.""")
1395
1396
1396 origroot = interfaceutil.Attribute(
1397 origroot = interfaceutil.Attribute(
1397 """The filesystem path that was used to construct the repo.""")
1398 """The filesystem path that was used to construct the repo.""")
1398
1399
1399 auditor = interfaceutil.Attribute(
1400 auditor = interfaceutil.Attribute(
1400 """A pathauditor for the working directory.
1401 """A pathauditor for the working directory.
1401
1402
1402 This checks if a path refers to a nested repository.
1403 This checks if a path refers to a nested repository.
1403
1404
1404 Operates on the filesystem.
1405 Operates on the filesystem.
1405 """)
1406 """)
1406
1407
1407 nofsauditor = interfaceutil.Attribute(
1408 nofsauditor = interfaceutil.Attribute(
1408 """A pathauditor for the working directory.
1409 """A pathauditor for the working directory.
1409
1410
1410 This is like ``auditor`` except it doesn't do filesystem checks.
1411 This is like ``auditor`` except it doesn't do filesystem checks.
1411 """)
1412 """)
1412
1413
1413 baseui = interfaceutil.Attribute(
1414 baseui = interfaceutil.Attribute(
1414 """Original ui instance passed into constructor.""")
1415 """Original ui instance passed into constructor.""")
1415
1416
1416 ui = interfaceutil.Attribute(
1417 ui = interfaceutil.Attribute(
1417 """Main ui instance for this instance.""")
1418 """Main ui instance for this instance.""")
1418
1419
1419 sharedpath = interfaceutil.Attribute(
1420 sharedpath = interfaceutil.Attribute(
1420 """Path to the .hg directory of the repo this repo was shared from.""")
1421 """Path to the .hg directory of the repo this repo was shared from.""")
1421
1422
1422 store = interfaceutil.Attribute(
1423 store = interfaceutil.Attribute(
1423 """A store instance.""")
1424 """A store instance.""")
1424
1425
1425 spath = interfaceutil.Attribute(
1426 spath = interfaceutil.Attribute(
1426 """Path to the store.""")
1427 """Path to the store.""")
1427
1428
1428 sjoin = interfaceutil.Attribute(
1429 sjoin = interfaceutil.Attribute(
1429 """Alias to self.store.join.""")
1430 """Alias to self.store.join.""")
1430
1431
1431 cachevfs = interfaceutil.Attribute(
1432 cachevfs = interfaceutil.Attribute(
1432 """A VFS used to access the cache directory.
1433 """A VFS used to access the cache directory.
1433
1434
1434 Typically .hg/cache.
1435 Typically .hg/cache.
1435 """)
1436 """)
1436
1437
1437 filteredrevcache = interfaceutil.Attribute(
1438 filteredrevcache = interfaceutil.Attribute(
1438 """Holds sets of revisions to be filtered.""")
1439 """Holds sets of revisions to be filtered.""")
1439
1440
1440 names = interfaceutil.Attribute(
1441 names = interfaceutil.Attribute(
1441 """A ``namespaces`` instance.""")
1442 """A ``namespaces`` instance.""")
1442
1443
1443 def close():
1444 def close():
1444 """Close the handle on this repository."""
1445 """Close the handle on this repository."""
1445
1446
1446 def peer():
1447 def peer():
1447 """Obtain an object conforming to the ``peer`` interface."""
1448 """Obtain an object conforming to the ``peer`` interface."""
1448
1449
1449 def unfiltered():
1450 def unfiltered():
1450 """Obtain an unfiltered/raw view of this repo."""
1451 """Obtain an unfiltered/raw view of this repo."""
1451
1452
1452 def filtered(name, visibilityexceptions=None):
1453 def filtered(name, visibilityexceptions=None):
1453 """Obtain a named view of this repository."""
1454 """Obtain a named view of this repository."""
1454
1455
1455 obsstore = interfaceutil.Attribute(
1456 obsstore = interfaceutil.Attribute(
1456 """A store of obsolescence data.""")
1457 """A store of obsolescence data.""")
1457
1458
1458 changelog = interfaceutil.Attribute(
1459 changelog = interfaceutil.Attribute(
1459 """A handle on the changelog revlog.""")
1460 """A handle on the changelog revlog.""")
1460
1461
1461 manifestlog = interfaceutil.Attribute(
1462 manifestlog = interfaceutil.Attribute(
1462 """An instance conforming to the ``imanifestlog`` interface.
1463 """An instance conforming to the ``imanifestlog`` interface.
1463
1464
1464 Provides access to manifests for the repository.
1465 Provides access to manifests for the repository.
1465 """)
1466 """)
1466
1467
1467 dirstate = interfaceutil.Attribute(
1468 dirstate = interfaceutil.Attribute(
1468 """Working directory state.""")
1469 """Working directory state.""")
1469
1470
1470 narrowpats = interfaceutil.Attribute(
1471 narrowpats = interfaceutil.Attribute(
1471 """Matcher patterns for this repository's narrowspec.""")
1472 """Matcher patterns for this repository's narrowspec.""")
1472
1473
1473 def narrowmatch():
1474 def narrowmatch():
1474 """Obtain a matcher for the narrowspec."""
1475 """Obtain a matcher for the narrowspec."""
1475
1476
1476 def setnarrowpats(newincludes, newexcludes):
1477 def setnarrowpats(newincludes, newexcludes):
1477 """Define the narrowspec for this repository."""
1478 """Define the narrowspec for this repository."""
1478
1479
1479 def __getitem__(changeid):
1480 def __getitem__(changeid):
1480 """Try to resolve a changectx."""
1481 """Try to resolve a changectx."""
1481
1482
1482 def __contains__(changeid):
1483 def __contains__(changeid):
1483 """Whether a changeset exists."""
1484 """Whether a changeset exists."""
1484
1485
1485 def __nonzero__():
1486 def __nonzero__():
1486 """Always returns True."""
1487 """Always returns True."""
1487 return True
1488 return True
1488
1489
1489 __bool__ = __nonzero__
1490 __bool__ = __nonzero__
1490
1491
1491 def __len__():
1492 def __len__():
1492 """Returns the number of changesets in the repo."""
1493 """Returns the number of changesets in the repo."""
1493
1494
1494 def __iter__():
1495 def __iter__():
1495 """Iterate over revisions in the changelog."""
1496 """Iterate over revisions in the changelog."""
1496
1497
1497 def revs(expr, *args):
1498 def revs(expr, *args):
1498 """Evaluate a revset.
1499 """Evaluate a revset.
1499
1500
1500 Emits revisions.
1501 Emits revisions.
1501 """
1502 """
1502
1503
1503 def set(expr, *args):
1504 def set(expr, *args):
1504 """Evaluate a revset.
1505 """Evaluate a revset.
1505
1506
1506 Emits changectx instances.
1507 Emits changectx instances.
1507 """
1508 """
1508
1509
1509 def anyrevs(specs, user=False, localalias=None):
1510 def anyrevs(specs, user=False, localalias=None):
1510 """Find revisions matching one of the given revsets."""
1511 """Find revisions matching one of the given revsets."""
1511
1512
1512 def url():
1513 def url():
1513 """Returns a string representing the location of this repo."""
1514 """Returns a string representing the location of this repo."""
1514
1515
1515 def hook(name, throw=False, **args):
1516 def hook(name, throw=False, **args):
1516 """Call a hook."""
1517 """Call a hook."""
1517
1518
1518 def tags():
1519 def tags():
1519 """Return a mapping of tag to node."""
1520 """Return a mapping of tag to node."""
1520
1521
1521 def tagtype(tagname):
1522 def tagtype(tagname):
1522 """Return the type of a given tag."""
1523 """Return the type of a given tag."""
1523
1524
1524 def tagslist():
1525 def tagslist():
1525 """Return a list of tags ordered by revision."""
1526 """Return a list of tags ordered by revision."""
1526
1527
1527 def nodetags(node):
1528 def nodetags(node):
1528 """Return the tags associated with a node."""
1529 """Return the tags associated with a node."""
1529
1530
1530 def nodebookmarks(node):
1531 def nodebookmarks(node):
1531 """Return the list of bookmarks pointing to the specified node."""
1532 """Return the list of bookmarks pointing to the specified node."""
1532
1533
1533 def branchmap():
1534 def branchmap():
1534 """Return a mapping of branch to heads in that branch."""
1535 """Return a mapping of branch to heads in that branch."""
1535
1536
1536 def revbranchcache():
1537 def revbranchcache():
1537 pass
1538 pass
1538
1539
1539 def branchtip(branchtip, ignoremissing=False):
1540 def branchtip(branchtip, ignoremissing=False):
1540 """Return the tip node for a given branch."""
1541 """Return the tip node for a given branch."""
1541
1542
1542 def lookup(key):
1543 def lookup(key):
1543 """Resolve the node for a revision."""
1544 """Resolve the node for a revision."""
1544
1545
1545 def lookupbranch(key):
1546 def lookupbranch(key):
1546 """Look up the branch name of the given revision or branch name."""
1547 """Look up the branch name of the given revision or branch name."""
1547
1548
1548 def known(nodes):
1549 def known(nodes):
1549 """Determine whether a series of nodes is known.
1550 """Determine whether a series of nodes is known.
1550
1551
1551 Returns a list of bools.
1552 Returns a list of bools.
1552 """
1553 """
1553
1554
1554 def local():
1555 def local():
1555 """Whether the repository is local."""
1556 """Whether the repository is local."""
1556 return True
1557 return True
1557
1558
1558 def publishing():
1559 def publishing():
1559 """Whether the repository is a publishing repository."""
1560 """Whether the repository is a publishing repository."""
1560
1561
1561 def cancopy():
1562 def cancopy():
1562 pass
1563 pass
1563
1564
1564 def shared():
1565 def shared():
1565 """The type of shared repository or None."""
1566 """The type of shared repository or None."""
1566
1567
1567 def wjoin(f, *insidef):
1568 def wjoin(f, *insidef):
1568 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1569 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1569
1570
1570 def setparents(p1, p2):
1571 def setparents(p1, p2):
1571 """Set the parent nodes of the working directory."""
1572 """Set the parent nodes of the working directory."""
1572
1573
1573 def filectx(path, changeid=None, fileid=None):
1574 def filectx(path, changeid=None, fileid=None):
1574 """Obtain a filectx for the given file revision."""
1575 """Obtain a filectx for the given file revision."""
1575
1576
1576 def getcwd():
1577 def getcwd():
1577 """Obtain the current working directory from the dirstate."""
1578 """Obtain the current working directory from the dirstate."""
1578
1579
1579 def pathto(f, cwd=None):
1580 def pathto(f, cwd=None):
1580 """Obtain the relative path to a file."""
1581 """Obtain the relative path to a file."""
1581
1582
1582 def adddatafilter(name, fltr):
1583 def adddatafilter(name, fltr):
1583 pass
1584 pass
1584
1585
1585 def wread(filename):
1586 def wread(filename):
1586 """Read a file from wvfs, using data filters."""
1587 """Read a file from wvfs, using data filters."""
1587
1588
1588 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1589 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1589 """Write data to a file in the wvfs, using data filters."""
1590 """Write data to a file in the wvfs, using data filters."""
1590
1591
1591 def wwritedata(filename, data):
1592 def wwritedata(filename, data):
1592 """Resolve data for writing to the wvfs, using data filters."""
1593 """Resolve data for writing to the wvfs, using data filters."""
1593
1594
1594 def currenttransaction():
1595 def currenttransaction():
1595 """Obtain the current transaction instance or None."""
1596 """Obtain the current transaction instance or None."""
1596
1597
1597 def transaction(desc, report=None):
1598 def transaction(desc, report=None):
1598 """Open a new transaction to write to the repository."""
1599 """Open a new transaction to write to the repository."""
1599
1600
1600 def undofiles():
1601 def undofiles():
1601 """Returns a list of (vfs, path) for files to undo transactions."""
1602 """Returns a list of (vfs, path) for files to undo transactions."""
1602
1603
1603 def recover():
1604 def recover():
1604 """Roll back an interrupted transaction."""
1605 """Roll back an interrupted transaction."""
1605
1606
1606 def rollback(dryrun=False, force=False):
1607 def rollback(dryrun=False, force=False):
1607 """Undo the last transaction.
1608 """Undo the last transaction.
1608
1609
1609 DANGEROUS.
1610 DANGEROUS.
1610 """
1611 """
1611
1612
1612 def updatecaches(tr=None, full=False):
1613 def updatecaches(tr=None, full=False):
1613 """Warm repo caches."""
1614 """Warm repo caches."""
1614
1615
1615 def invalidatecaches():
1616 def invalidatecaches():
1616 """Invalidate cached data due to the repository mutating."""
1617 """Invalidate cached data due to the repository mutating."""
1617
1618
1618 def invalidatevolatilesets():
1619 def invalidatevolatilesets():
1619 pass
1620 pass
1620
1621
1621 def invalidatedirstate():
1622 def invalidatedirstate():
1622 """Invalidate the dirstate."""
1623 """Invalidate the dirstate."""
1623
1624
1624 def invalidate(clearfilecache=False):
1625 def invalidate(clearfilecache=False):
1625 pass
1626 pass
1626
1627
1627 def invalidateall():
1628 def invalidateall():
1628 pass
1629 pass
1629
1630
1630 def lock(wait=True):
1631 def lock(wait=True):
1631 """Lock the repository store and return a lock instance."""
1632 """Lock the repository store and return a lock instance."""
1632
1633
1633 def wlock(wait=True):
1634 def wlock(wait=True):
1634 """Lock the non-store parts of the repository."""
1635 """Lock the non-store parts of the repository."""
1635
1636
1636 def currentwlock():
1637 def currentwlock():
1637 """Return the wlock if it's held or None."""
1638 """Return the wlock if it's held or None."""
1638
1639
1639 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1640 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1640 pass
1641 pass
1641
1642
1642 def commit(text='', user=None, date=None, match=None, force=False,
1643 def commit(text='', user=None, date=None, match=None, force=False,
1643 editor=False, extra=None):
1644 editor=False, extra=None):
1644 """Add a new revision to the repository."""
1645 """Add a new revision to the repository."""
1645
1646
1646 def commitctx(ctx, error=False):
1647 def commitctx(ctx, error=False):
1647 """Commit a commitctx instance to the repository."""
1648 """Commit a commitctx instance to the repository."""
1648
1649
1649 def destroying():
1650 def destroying():
1650 """Inform the repository that nodes are about to be destroyed."""
1651 """Inform the repository that nodes are about to be destroyed."""
1651
1652
1652 def destroyed():
1653 def destroyed():
1653 """Inform the repository that nodes have been destroyed."""
1654 """Inform the repository that nodes have been destroyed."""
1654
1655
1655 def status(node1='.', node2=None, match=None, ignored=False,
1656 def status(node1='.', node2=None, match=None, ignored=False,
1656 clean=False, unknown=False, listsubrepos=False):
1657 clean=False, unknown=False, listsubrepos=False):
1657 """Convenience method to call repo[x].status()."""
1658 """Convenience method to call repo[x].status()."""
1658
1659
1659 def addpostdsstatus(ps):
1660 def addpostdsstatus(ps):
1660 pass
1661 pass
1661
1662
1662 def postdsstatus():
1663 def postdsstatus():
1663 pass
1664 pass
1664
1665
1665 def clearpostdsstatus():
1666 def clearpostdsstatus():
1666 pass
1667 pass
1667
1668
1668 def heads(start=None):
1669 def heads(start=None):
1669 """Obtain list of nodes that are DAG heads."""
1670 """Obtain list of nodes that are DAG heads."""
1670
1671
1671 def branchheads(branch=None, start=None, closed=False):
1672 def branchheads(branch=None, start=None, closed=False):
1672 pass
1673 pass
1673
1674
1674 def branches(nodes):
1675 def branches(nodes):
1675 pass
1676 pass
1676
1677
1677 def between(pairs):
1678 def between(pairs):
1678 pass
1679 pass
1679
1680
1680 def checkpush(pushop):
1681 def checkpush(pushop):
1681 pass
1682 pass
1682
1683
1683 prepushoutgoinghooks = interfaceutil.Attribute(
1684 prepushoutgoinghooks = interfaceutil.Attribute(
1684 """util.hooks instance.""")
1685 """util.hooks instance.""")
1685
1686
1686 def pushkey(namespace, key, old, new):
1687 def pushkey(namespace, key, old, new):
1687 pass
1688 pass
1688
1689
1689 def listkeys(namespace):
1690 def listkeys(namespace):
1690 pass
1691 pass
1691
1692
1692 def debugwireargs(one, two, three=None, four=None, five=None):
1693 def debugwireargs(one, two, three=None, four=None, five=None):
1693 pass
1694 pass
1694
1695
1695 def savecommitmessage(text):
1696 def savecommitmessage(text):
1696 pass
1697 pass
1697
1698
1698 class completelocalrepository(ilocalrepositorymain,
1699 class completelocalrepository(ilocalrepositorymain,
1699 ilocalrepositoryfilestorage):
1700 ilocalrepositoryfilestorage):
1700 """Complete interface for a local repository."""
1701 """Complete interface for a local repository."""
1701
1702
1702 class iwireprotocolcommandcacher(interfaceutil.Interface):
1703 class iwireprotocolcommandcacher(interfaceutil.Interface):
1703 """Represents a caching backend for wire protocol commands.
1704 """Represents a caching backend for wire protocol commands.
1704
1705
1705 Wire protocol version 2 supports transparent caching of many commands.
1706 Wire protocol version 2 supports transparent caching of many commands.
1706 To leverage this caching, servers can activate objects that cache
1707 To leverage this caching, servers can activate objects that cache
1707 command responses. Objects handle both cache writing and reading.
1708 command responses. Objects handle both cache writing and reading.
1708 This interface defines how that response caching mechanism works.
1709 This interface defines how that response caching mechanism works.
1709
1710
1710 Wire protocol version 2 commands emit a series of objects that are
1711 Wire protocol version 2 commands emit a series of objects that are
1711 serialized and sent to the client. The caching layer exists between
1712 serialized and sent to the client. The caching layer exists between
1712 the invocation of the command function and the sending of its output
1713 the invocation of the command function and the sending of its output
1713 objects to an output layer.
1714 objects to an output layer.
1714
1715
1715 Instances of this interface represent a binding to a cache that
1716 Instances of this interface represent a binding to a cache that
1716 can serve a response (in place of calling a command function) and/or
1717 can serve a response (in place of calling a command function) and/or
1717 write responses to a cache for subsequent use.
1718 write responses to a cache for subsequent use.
1718
1719
1719 When a command request arrives, the following happens with regards
1720 When a command request arrives, the following happens with regards
1720 to this interface:
1721 to this interface:
1721
1722
1722 1. The server determines whether the command request is cacheable.
1723 1. The server determines whether the command request is cacheable.
1723 2. If it is, an instance of this interface is spawned.
1724 2. If it is, an instance of this interface is spawned.
1724 3. The cacher is activated in a context manager (``__enter__`` is called).
1725 3. The cacher is activated in a context manager (``__enter__`` is called).
1725 4. A cache *key* for that request is derived. This will call the
1726 4. A cache *key* for that request is derived. This will call the
1726 instance's ``adjustcachekeystate()`` method so the derivation
1727 instance's ``adjustcachekeystate()`` method so the derivation
1727 can be influenced.
1728 can be influenced.
1728 5. The cacher is informed of the derived cache key via a call to
1729 5. The cacher is informed of the derived cache key via a call to
1729 ``setcachekey()``.
1730 ``setcachekey()``.
1730 6. The cacher's ``lookup()`` method is called to test for presence of
1731 6. The cacher's ``lookup()`` method is called to test for presence of
1731 the derived key in the cache.
1732 the derived key in the cache.
1732 7. If ``lookup()`` returns a hit, that cached result is used in place
1733 7. If ``lookup()`` returns a hit, that cached result is used in place
1733 of invoking the command function. ``__exit__`` is called and the instance
1734 of invoking the command function. ``__exit__`` is called and the instance
1734 is discarded.
1735 is discarded.
1735 8. The command function is invoked.
1736 8. The command function is invoked.
1736 9. ``onobject()`` is called for each object emitted by the command
1737 9. ``onobject()`` is called for each object emitted by the command
1737 function.
1738 function.
1738 10. After the final object is seen, ``onfinished()`` is called.
1739 10. After the final object is seen, ``onfinished()`` is called.
1739 11. ``__exit__`` is called to signal the end of use of the instance.
1740 11. ``__exit__`` is called to signal the end of use of the instance.
1740
1741
1741 Cache *key* derivation can be influenced by the instance.
1742 Cache *key* derivation can be influenced by the instance.
1742
1743
1743 Cache keys are initially derived by a deterministic representation of
1744 Cache keys are initially derived by a deterministic representation of
1744 the command request. This includes the command name, arguments, protocol
1745 the command request. This includes the command name, arguments, protocol
1745 version, etc. This initial key derivation is performed by CBOR-encoding a
1746 version, etc. This initial key derivation is performed by CBOR-encoding a
1746 data structure and feeding that output into a hasher.
1747 data structure and feeding that output into a hasher.
1747
1748
1748 Instances of this interface can influence this initial key derivation
1749 Instances of this interface can influence this initial key derivation
1749 via ``adjustcachekeystate()``.
1750 via ``adjustcachekeystate()``.
1750
1751
1751 The instance is informed of the derived cache key via a call to
1752 The instance is informed of the derived cache key via a call to
1752 ``setcachekey()``. The instance must store the key locally so it can
1753 ``setcachekey()``. The instance must store the key locally so it can
1753 be consulted on subsequent operations that may require it.
1754 be consulted on subsequent operations that may require it.
1754
1755
1755 When constructed, the instance has access to a callable that can be used
1756 When constructed, the instance has access to a callable that can be used
1756 for encoding response objects. This callable receives as its single
1757 for encoding response objects. This callable receives as its single
1757 argument an object emitted by a command function. It returns an iterable
1758 argument an object emitted by a command function. It returns an iterable
1758 of bytes chunks representing the encoded object. Unless the cacher is
1759 of bytes chunks representing the encoded object. Unless the cacher is
1759 caching native Python objects in memory or has a way of reconstructing
1760 caching native Python objects in memory or has a way of reconstructing
1760 the original Python objects, implementations typically call this function
1761 the original Python objects, implementations typically call this function
1761 to produce bytes from the output objects and then store those bytes in
1762 to produce bytes from the output objects and then store those bytes in
1762 the cache. When it comes time to re-emit those bytes, they are wrapped
1763 the cache. When it comes time to re-emit those bytes, they are wrapped
1763 in a ``wireprototypes.encodedresponse`` instance to tell the output
1764 in a ``wireprototypes.encodedresponse`` instance to tell the output
1764 layer that they are pre-encoded.
1765 layer that they are pre-encoded.
1765
1766
1766 When receiving the objects emitted by the command function, instances
1767 When receiving the objects emitted by the command function, instances
1767 can choose what to do with those objects. The simplest thing to do is
1768 can choose what to do with those objects. The simplest thing to do is
1768 re-emit the original objects. They will be forwarded to the output
1769 re-emit the original objects. They will be forwarded to the output
1769 layer and will be processed as if the cacher did not exist.
1770 layer and will be processed as if the cacher did not exist.
1770
1771
1771 Implementations could also choose to not emit objects - instead locally
1772 Implementations could also choose to not emit objects - instead locally
1772 buffering objects or their encoded representation. They could then emit
1773 buffering objects or their encoded representation. They could then emit
1773 a single "coalesced" object when ``onfinished()`` is called. In
1774 a single "coalesced" object when ``onfinished()`` is called. In
1774 this way, the implementation would function as a filtering layer of
1775 this way, the implementation would function as a filtering layer of
1775 sorts.
1776 sorts.
1776
1777
1777 When caching objects, typically the encoded form of the object will
1778 When caching objects, typically the encoded form of the object will
1778 be stored. Keep in mind that if the original object is forwarded to
1779 be stored. Keep in mind that if the original object is forwarded to
1779 the output layer, it will need to be encoded there as well. For large
1780 the output layer, it will need to be encoded there as well. For large
1780 output, this redundant encoding could add overhead. Implementations
1781 output, this redundant encoding could add overhead. Implementations
1781 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1782 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1782 instances to avoid this overhead.
1783 instances to avoid this overhead.
1783 """
1784 """
1784 def __enter__():
1785 def __enter__():
1785 """Marks the instance as active.
1786 """Marks the instance as active.
1786
1787
1787 Should return self.
1788 Should return self.
1788 """
1789 """
1789
1790
1790 def __exit__(exctype, excvalue, exctb):
1791 def __exit__(exctype, excvalue, exctb):
1791 """Called when cacher is no longer used.
1792 """Called when cacher is no longer used.
1792
1793
1793 This can be used by implementations to perform cleanup actions (e.g.
1794 This can be used by implementations to perform cleanup actions (e.g.
1794 disconnecting network sockets, aborting a partially cached response.
1795 disconnecting network sockets, aborting a partially cached response.
1795 """
1796 """
1796
1797
1797 def adjustcachekeystate(state):
1798 def adjustcachekeystate(state):
1798 """Influences cache key derivation by adjusting state to derive key.
1799 """Influences cache key derivation by adjusting state to derive key.
1799
1800
1800 A dict defining the state used to derive the cache key is passed.
1801 A dict defining the state used to derive the cache key is passed.
1801
1802
1802 Implementations can modify this dict to record additional state that
1803 Implementations can modify this dict to record additional state that
1803 is wanted to influence key derivation.
1804 is wanted to influence key derivation.
1804
1805
1805 Implementations are *highly* encouraged to not modify or delete
1806 Implementations are *highly* encouraged to not modify or delete
1806 existing keys.
1807 existing keys.
1807 """
1808 """
1808
1809
1809 def setcachekey(key):
1810 def setcachekey(key):
1810 """Record the derived cache key for this request.
1811 """Record the derived cache key for this request.
1811
1812
1812 Instances may mutate the key for internal usage, as desired. e.g.
1813 Instances may mutate the key for internal usage, as desired. e.g.
1813 instances may wish to prepend the repo name, introduce path
1814 instances may wish to prepend the repo name, introduce path
1814 components for filesystem or URL addressing, etc. Behavior is up to
1815 components for filesystem or URL addressing, etc. Behavior is up to
1815 the cache.
1816 the cache.
1816
1817
1817 Returns a bool indicating if the request is cacheable by this
1818 Returns a bool indicating if the request is cacheable by this
1818 instance.
1819 instance.
1819 """
1820 """
1820
1821
1821 def lookup():
1822 def lookup():
1822 """Attempt to resolve an entry in the cache.
1823 """Attempt to resolve an entry in the cache.
1823
1824
1824 The instance is instructed to look for the cache key that it was
1825 The instance is instructed to look for the cache key that it was
1825 informed about via the call to ``setcachekey()``.
1826 informed about via the call to ``setcachekey()``.
1826
1827
1827 If there's no cache hit or the cacher doesn't wish to use the cached
1828 If there's no cache hit or the cacher doesn't wish to use the cached
1828 entry, ``None`` should be returned.
1829 entry, ``None`` should be returned.
1829
1830
1830 Else, a dict defining the cached result should be returned. The
1831 Else, a dict defining the cached result should be returned. The
1831 dict may have the following keys:
1832 dict may have the following keys:
1832
1833
1833 objs
1834 objs
1834 An iterable of objects that should be sent to the client. That
1835 An iterable of objects that should be sent to the client. That
1835 iterable of objects is expected to be what the command function
1836 iterable of objects is expected to be what the command function
1836 would return if invoked or an equivalent representation thereof.
1837 would return if invoked or an equivalent representation thereof.
1837 """
1838 """
1838
1839
1839 def onobject(obj):
1840 def onobject(obj):
1840 """Called when a new object is emitted from the command function.
1841 """Called when a new object is emitted from the command function.
1841
1842
1842 Receives as its argument the object that was emitted from the
1843 Receives as its argument the object that was emitted from the
1843 command function.
1844 command function.
1844
1845
1845 This method returns an iterator of objects to forward to the output
1846 This method returns an iterator of objects to forward to the output
1846 layer. The easiest implementation is a generator that just
1847 layer. The easiest implementation is a generator that just
1847 ``yield obj``.
1848 ``yield obj``.
1848 """
1849 """
1849
1850
1850 def onfinished():
1851 def onfinished():
1851 """Called after all objects have been emitted from the command function.
1852 """Called after all objects have been emitted from the command function.
1852
1853
1853 Implementations should return an iterator of objects to forward to
1854 Implementations should return an iterator of objects to forward to
1854 the output layer.
1855 the output layer.
1855
1856
1856 This method can be a generator.
1857 This method can be a generator.
1857 """
1858 """
@@ -1,484 +1,487
1 # storageutil.py - Storage functionality agnostic of backend implementation.
1 # storageutil.py - Storage functionality agnostic of backend implementation.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import re
11 import re
12 import struct
12 import struct
13
13
14 from ..i18n import _
14 from ..i18n import _
15 from ..node import (
15 from ..node import (
16 bin,
16 bin,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from .. import (
20 from .. import (
21 dagop,
21 dagop,
22 error,
22 error,
23 mdiff,
23 mdiff,
24 pycompat,
24 pycompat,
25 repository,
25 repository,
26 )
26 )
27
27
28 _nullhash = hashlib.sha1(nullid)
28 _nullhash = hashlib.sha1(nullid)
29
29
30 def hashrevisionsha1(text, p1, p2):
30 def hashrevisionsha1(text, p1, p2):
31 """Compute the SHA-1 for revision data and its parents.
31 """Compute the SHA-1 for revision data and its parents.
32
32
33 This hash combines both the current file contents and its history
33 This hash combines both the current file contents and its history
34 in a manner that makes it easy to distinguish nodes with the same
34 in a manner that makes it easy to distinguish nodes with the same
35 content in the revision graph.
35 content in the revision graph.
36 """
36 """
37 # As of now, if one of the parent node is null, p2 is null
37 # As of now, if one of the parent node is null, p2 is null
38 if p2 == nullid:
38 if p2 == nullid:
39 # deep copy of a hash is faster than creating one
39 # deep copy of a hash is faster than creating one
40 s = _nullhash.copy()
40 s = _nullhash.copy()
41 s.update(p1)
41 s.update(p1)
42 else:
42 else:
43 # none of the parent nodes are nullid
43 # none of the parent nodes are nullid
44 if p1 < p2:
44 if p1 < p2:
45 a = p1
45 a = p1
46 b = p2
46 b = p2
47 else:
47 else:
48 a = p2
48 a = p2
49 b = p1
49 b = p1
50 s = hashlib.sha1(a)
50 s = hashlib.sha1(a)
51 s.update(b)
51 s.update(b)
52 s.update(text)
52 s.update(text)
53 return s.digest()
53 return s.digest()
54
54
55 METADATA_RE = re.compile(b'\x01\n')
55 METADATA_RE = re.compile(b'\x01\n')
56
56
57 def parsemeta(text):
57 def parsemeta(text):
58 """Parse metadata header from revision data.
58 """Parse metadata header from revision data.
59
59
60 Returns a 2-tuple of (metadata, offset), where both can be None if there
60 Returns a 2-tuple of (metadata, offset), where both can be None if there
61 is no metadata.
61 is no metadata.
62 """
62 """
63 # text can be buffer, so we can't use .startswith or .index
63 # text can be buffer, so we can't use .startswith or .index
64 if text[:2] != b'\x01\n':
64 if text[:2] != b'\x01\n':
65 return None, None
65 return None, None
66 s = METADATA_RE.search(text, 2).start()
66 s = METADATA_RE.search(text, 2).start()
67 mtext = text[2:s]
67 mtext = text[2:s]
68 meta = {}
68 meta = {}
69 for l in mtext.splitlines():
69 for l in mtext.splitlines():
70 k, v = l.split(b': ', 1)
70 k, v = l.split(b': ', 1)
71 meta[k] = v
71 meta[k] = v
72 return meta, s + 2
72 return meta, s + 2
73
73
74 def packmeta(meta, text):
74 def packmeta(meta, text):
75 """Add metadata to fulltext to produce revision text."""
75 """Add metadata to fulltext to produce revision text."""
76 keys = sorted(meta)
76 keys = sorted(meta)
77 metatext = b''.join(b'%s: %s\n' % (k, meta[k]) for k in keys)
77 metatext = b''.join(b'%s: %s\n' % (k, meta[k]) for k in keys)
78 return b'\x01\n%s\x01\n%s' % (metatext, text)
78 return b'\x01\n%s\x01\n%s' % (metatext, text)
79
79
80 def iscensoredtext(text):
80 def iscensoredtext(text):
81 meta = parsemeta(text)[0]
81 meta = parsemeta(text)[0]
82 return meta and b'censored' in meta
82 return meta and b'censored' in meta
83
83
84 def filtermetadata(text):
84 def filtermetadata(text):
85 """Extract just the revision data from source text.
85 """Extract just the revision data from source text.
86
86
87 Returns ``text`` unless it has a metadata header, in which case we return
87 Returns ``text`` unless it has a metadata header, in which case we return
88 a new buffer without hte metadata.
88 a new buffer without hte metadata.
89 """
89 """
90 if not text.startswith(b'\x01\n'):
90 if not text.startswith(b'\x01\n'):
91 return text
91 return text
92
92
93 offset = text.index(b'\x01\n', 2)
93 offset = text.index(b'\x01\n', 2)
94 return text[offset + 2:]
94 return text[offset + 2:]
95
95
96 def filerevisioncopied(store, node):
96 def filerevisioncopied(store, node):
97 """Resolve file revision copy metadata.
97 """Resolve file revision copy metadata.
98
98
99 Returns ``False`` if the file has no copy metadata. Otherwise a
99 Returns ``False`` if the file has no copy metadata. Otherwise a
100 2-tuple of the source filename and node.
100 2-tuple of the source filename and node.
101 """
101 """
102 if store.parents(node)[0] != nullid:
102 if store.parents(node)[0] != nullid:
103 return False
103 return False
104
104
105 meta = parsemeta(store.revision(node))[0]
105 meta = parsemeta(store.revision(node))[0]
106
106
107 # copy and copyrev occur in pairs. In rare cases due to old bugs,
107 # copy and copyrev occur in pairs. In rare cases due to old bugs,
108 # one can occur without the other. So ensure both are present to flag
108 # one can occur without the other. So ensure both are present to flag
109 # as a copy.
109 # as a copy.
110 if meta and b'copy' in meta and b'copyrev' in meta:
110 if meta and b'copy' in meta and b'copyrev' in meta:
111 return meta[b'copy'], bin(meta[b'copyrev'])
111 return meta[b'copy'], bin(meta[b'copyrev'])
112
112
113 return False
113 return False
114
114
115 def filedataequivalent(store, node, filedata):
115 def filedataequivalent(store, node, filedata):
116 """Determines whether file data is equivalent to a stored node.
116 """Determines whether file data is equivalent to a stored node.
117
117
118 Returns True if the passed file data would hash to the same value
118 Returns True if the passed file data would hash to the same value
119 as a stored revision and False otherwise.
119 as a stored revision and False otherwise.
120
120
121 When a stored revision is censored, filedata must be empty to have
121 When a stored revision is censored, filedata must be empty to have
122 equivalence.
122 equivalence.
123
123
124 When a stored revision has copy metadata, it is ignored as part
124 When a stored revision has copy metadata, it is ignored as part
125 of the compare.
125 of the compare.
126 """
126 """
127
127
128 if filedata.startswith(b'\x01\n'):
128 if filedata.startswith(b'\x01\n'):
129 revisiontext = b'\x01\n\x01\n' + filedata
129 revisiontext = b'\x01\n\x01\n' + filedata
130 else:
130 else:
131 revisiontext = filedata
131 revisiontext = filedata
132
132
133 p1, p2 = store.parents(node)
133 p1, p2 = store.parents(node)
134
134
135 computednode = hashrevisionsha1(revisiontext, p1, p2)
135 computednode = hashrevisionsha1(revisiontext, p1, p2)
136
136
137 if computednode == node:
137 if computednode == node:
138 return True
138 return True
139
139
140 # Censored files compare against the empty file.
140 # Censored files compare against the empty file.
141 if store.iscensored(store.rev(node)):
141 if store.iscensored(store.rev(node)):
142 return filedata == b''
142 return filedata == b''
143
143
144 # Renaming a file produces a different hash, even if the data
144 # Renaming a file produces a different hash, even if the data
145 # remains unchanged. Check if that's the case.
145 # remains unchanged. Check if that's the case.
146 if store.renamed(node):
146 if store.renamed(node):
147 return store.read(node) == filedata
147 return store.read(node) == filedata
148
148
149 return False
149 return False
150
150
151 def iterrevs(storelen, start=0, stop=None):
151 def iterrevs(storelen, start=0, stop=None):
152 """Iterate over revision numbers in a store."""
152 """Iterate over revision numbers in a store."""
153 step = 1
153 step = 1
154
154
155 if stop is not None:
155 if stop is not None:
156 if start > stop:
156 if start > stop:
157 step = -1
157 step = -1
158 stop += step
158 stop += step
159 if stop > storelen:
159 if stop > storelen:
160 stop = storelen
160 stop = storelen
161 else:
161 else:
162 stop = storelen
162 stop = storelen
163
163
164 return pycompat.xrange(start, stop, step)
164 return pycompat.xrange(start, stop, step)
165
165
166 def fileidlookup(store, fileid, identifier):
166 def fileidlookup(store, fileid, identifier):
167 """Resolve the file node for a value.
167 """Resolve the file node for a value.
168
168
169 ``store`` is an object implementing the ``ifileindex`` interface.
169 ``store`` is an object implementing the ``ifileindex`` interface.
170
170
171 ``fileid`` can be:
171 ``fileid`` can be:
172
172
173 * A 20 byte binary node.
173 * A 20 byte binary node.
174 * An integer revision number
174 * An integer revision number
175 * A 40 byte hex node.
175 * A 40 byte hex node.
176 * A bytes that can be parsed as an integer representing a revision number.
176 * A bytes that can be parsed as an integer representing a revision number.
177
177
178 ``identifier`` is used to populate ``error.LookupError`` with an identifier
178 ``identifier`` is used to populate ``error.LookupError`` with an identifier
179 for the store.
179 for the store.
180
180
181 Raises ``error.LookupError`` on failure.
181 Raises ``error.LookupError`` on failure.
182 """
182 """
183 if isinstance(fileid, int):
183 if isinstance(fileid, int):
184 try:
184 try:
185 return store.node(fileid)
185 return store.node(fileid)
186 except IndexError:
186 except IndexError:
187 raise error.LookupError('%d' % fileid, identifier,
187 raise error.LookupError('%d' % fileid, identifier,
188 _('no match found'))
188 _('no match found'))
189
189
190 if len(fileid) == 20:
190 if len(fileid) == 20:
191 try:
191 try:
192 store.rev(fileid)
192 store.rev(fileid)
193 return fileid
193 return fileid
194 except error.LookupError:
194 except error.LookupError:
195 pass
195 pass
196
196
197 if len(fileid) == 40:
197 if len(fileid) == 40:
198 try:
198 try:
199 rawnode = bin(fileid)
199 rawnode = bin(fileid)
200 store.rev(rawnode)
200 store.rev(rawnode)
201 return rawnode
201 return rawnode
202 except TypeError:
202 except TypeError:
203 pass
203 pass
204
204
205 try:
205 try:
206 rev = int(fileid)
206 rev = int(fileid)
207
207
208 if b'%d' % rev != fileid:
208 if b'%d' % rev != fileid:
209 raise ValueError
209 raise ValueError
210
210
211 try:
211 try:
212 return store.node(rev)
212 return store.node(rev)
213 except (IndexError, TypeError):
213 except (IndexError, TypeError):
214 pass
214 pass
215 except (ValueError, OverflowError):
215 except (ValueError, OverflowError):
216 pass
216 pass
217
217
218 raise error.LookupError(fileid, identifier, _('no match found'))
218 raise error.LookupError(fileid, identifier, _('no match found'))
219
219
220 def resolvestripinfo(minlinkrev, tiprev, headrevs, linkrevfn, parentrevsfn):
220 def resolvestripinfo(minlinkrev, tiprev, headrevs, linkrevfn, parentrevsfn):
221 """Resolve information needed to strip revisions.
221 """Resolve information needed to strip revisions.
222
222
223 Finds the minimum revision number that must be stripped in order to
223 Finds the minimum revision number that must be stripped in order to
224 strip ``minlinkrev``.
224 strip ``minlinkrev``.
225
225
226 Returns a 2-tuple of the minimum revision number to do that and a set
226 Returns a 2-tuple of the minimum revision number to do that and a set
227 of all revision numbers that have linkrevs that would be broken
227 of all revision numbers that have linkrevs that would be broken
228 by that strip.
228 by that strip.
229
229
230 ``tiprev`` is the current tip-most revision. It is ``len(store) - 1``.
230 ``tiprev`` is the current tip-most revision. It is ``len(store) - 1``.
231 ``headrevs`` is an iterable of head revisions.
231 ``headrevs`` is an iterable of head revisions.
232 ``linkrevfn`` is a callable that receives a revision and returns a linked
232 ``linkrevfn`` is a callable that receives a revision and returns a linked
233 revision.
233 revision.
234 ``parentrevsfn`` is a callable that receives a revision number and returns
234 ``parentrevsfn`` is a callable that receives a revision number and returns
235 an iterable of its parent revision numbers.
235 an iterable of its parent revision numbers.
236 """
236 """
237 brokenrevs = set()
237 brokenrevs = set()
238 strippoint = tiprev + 1
238 strippoint = tiprev + 1
239
239
240 heads = {}
240 heads = {}
241 futurelargelinkrevs = set()
241 futurelargelinkrevs = set()
242 for head in headrevs:
242 for head in headrevs:
243 headlinkrev = linkrevfn(head)
243 headlinkrev = linkrevfn(head)
244 heads[head] = headlinkrev
244 heads[head] = headlinkrev
245 if headlinkrev >= minlinkrev:
245 if headlinkrev >= minlinkrev:
246 futurelargelinkrevs.add(headlinkrev)
246 futurelargelinkrevs.add(headlinkrev)
247
247
248 # This algorithm involves walking down the rev graph, starting at the
248 # This algorithm involves walking down the rev graph, starting at the
249 # heads. Since the revs are topologically sorted according to linkrev,
249 # heads. Since the revs are topologically sorted according to linkrev,
250 # once all head linkrevs are below the minlink, we know there are
250 # once all head linkrevs are below the minlink, we know there are
251 # no more revs that could have a linkrev greater than minlink.
251 # no more revs that could have a linkrev greater than minlink.
252 # So we can stop walking.
252 # So we can stop walking.
253 while futurelargelinkrevs:
253 while futurelargelinkrevs:
254 strippoint -= 1
254 strippoint -= 1
255 linkrev = heads.pop(strippoint)
255 linkrev = heads.pop(strippoint)
256
256
257 if linkrev < minlinkrev:
257 if linkrev < minlinkrev:
258 brokenrevs.add(strippoint)
258 brokenrevs.add(strippoint)
259 else:
259 else:
260 futurelargelinkrevs.remove(linkrev)
260 futurelargelinkrevs.remove(linkrev)
261
261
262 for p in parentrevsfn(strippoint):
262 for p in parentrevsfn(strippoint):
263 if p != nullrev:
263 if p != nullrev:
264 plinkrev = linkrevfn(p)
264 plinkrev = linkrevfn(p)
265 heads[p] = plinkrev
265 heads[p] = plinkrev
266 if plinkrev >= minlinkrev:
266 if plinkrev >= minlinkrev:
267 futurelargelinkrevs.add(plinkrev)
267 futurelargelinkrevs.add(plinkrev)
268
268
269 return strippoint, brokenrevs
269 return strippoint, brokenrevs
270
270
271 def emitrevisions(store, nodes, nodesorder, resultcls, deltaparentfn=None,
271 def emitrevisions(store, nodes, nodesorder, resultcls, deltaparentfn=None,
272 candeltafn=None, rawsizefn=None, revdifffn=None, flagsfn=None,
272 candeltafn=None, rawsizefn=None, revdifffn=None, flagsfn=None,
273 deltamode=repository.CG_DELTAMODE_STD,
273 deltamode=repository.CG_DELTAMODE_STD,
274 revisiondata=False, assumehaveparentrevisions=False):
274 revisiondata=False, assumehaveparentrevisions=False):
275 """Generic implementation of ifiledata.emitrevisions().
275 """Generic implementation of ifiledata.emitrevisions().
276
276
277 Emitting revision data is subtly complex. This function attempts to
277 Emitting revision data is subtly complex. This function attempts to
278 encapsulate all the logic for doing so in a backend-agnostic way.
278 encapsulate all the logic for doing so in a backend-agnostic way.
279
279
280 ``store``
280 ``store``
281 Object conforming to ``ifilestorage`` interface.
281 Object conforming to ``ifilestorage`` interface.
282
282
283 ``nodes``
283 ``nodes``
284 List of revision nodes whose data to emit.
284 List of revision nodes whose data to emit.
285
285
286 ``resultcls``
286 ``resultcls``
287 A type implementing the ``irevisiondelta`` interface that will be
287 A type implementing the ``irevisiondelta`` interface that will be
288 constructed and returned.
288 constructed and returned.
289
289
290 ``deltaparentfn`` (optional)
290 ``deltaparentfn`` (optional)
291 Callable receiving a revision number and returning the revision number
291 Callable receiving a revision number and returning the revision number
292 of a revision that the internal delta is stored against. This delta
292 of a revision that the internal delta is stored against. This delta
293 will be preferred over computing a new arbitrary delta.
293 will be preferred over computing a new arbitrary delta.
294
294
295 If not defined, a delta will always be computed from raw revision
295 If not defined, a delta will always be computed from raw revision
296 data.
296 data.
297
297
298 ``candeltafn`` (optional)
298 ``candeltafn`` (optional)
299 Callable receiving a pair of revision numbers that returns a bool
299 Callable receiving a pair of revision numbers that returns a bool
300 indicating whether a delta between them can be produced.
300 indicating whether a delta between them can be produced.
301
301
302 If not defined, it is assumed that any two revisions can delta with
302 If not defined, it is assumed that any two revisions can delta with
303 each other.
303 each other.
304
304
305 ``rawsizefn`` (optional)
305 ``rawsizefn`` (optional)
306 Callable receiving a revision number and returning the length of the
306 Callable receiving a revision number and returning the length of the
307 ``store.revision(rev, raw=True)``.
307 ``store.revision(rev, raw=True)``.
308
308
309 If not defined, ``len(store.revision(rev, raw=True))`` will be called.
309 If not defined, ``len(store.revision(rev, raw=True))`` will be called.
310
310
311 ``revdifffn`` (optional)
311 ``revdifffn`` (optional)
312 Callable receiving a pair of revision numbers that returns a delta
312 Callable receiving a pair of revision numbers that returns a delta
313 between them.
313 between them.
314
314
315 If not defined, a delta will be computed by invoking mdiff code
315 If not defined, a delta will be computed by invoking mdiff code
316 on ``store.revision()`` results.
316 on ``store.revision()`` results.
317
317
318 Defining this function allows a precomputed or stored delta to be
318 Defining this function allows a precomputed or stored delta to be
319 used without having to compute on.
319 used without having to compute on.
320
320
321 ``flagsfn`` (optional)
321 ``flagsfn`` (optional)
322 Callable receiving a revision number and returns the integer flags
322 Callable receiving a revision number and returns the integer flags
323 value for it. If not defined, flags value will be 0.
323 value for it. If not defined, flags value will be 0.
324
324
325 ``deltamode``
325 ``deltamode``
326 constaint on delta to be sent:
326 constaint on delta to be sent:
327 * CG_DELTAMODE_STD - normal mode, try to reuse storage deltas,
327 * CG_DELTAMODE_STD - normal mode, try to reuse storage deltas,
328 * CG_DELTAMODE_PREV - only delta against "prev",
328 * CG_DELTAMODE_PREV - only delta against "prev",
329 * CG_DELTAMODE_FULL - only issue full snapshot.
329 * CG_DELTAMODE_FULL - only issue full snapshot.
330
330
331 Whether to send fulltext revisions instead of deltas, if allowed.
331 Whether to send fulltext revisions instead of deltas, if allowed.
332
332
333 ``nodesorder``
333 ``nodesorder``
334 ``revisiondata``
334 ``revisiondata``
335 ``assumehaveparentrevisions``
335 ``assumehaveparentrevisions``
336 """
336 """
337
337
338 fnode = store.node
338 fnode = store.node
339 frev = store.rev
339 frev = store.rev
340
340
341 if nodesorder == 'nodes':
341 if nodesorder == 'nodes':
342 revs = [frev(n) for n in nodes]
342 revs = [frev(n) for n in nodes]
343 elif nodesorder == 'storage':
343 elif nodesorder == 'storage':
344 revs = sorted(frev(n) for n in nodes)
344 revs = sorted(frev(n) for n in nodes)
345 else:
345 else:
346 revs = set(frev(n) for n in nodes)
346 revs = set(frev(n) for n in nodes)
347 revs = dagop.linearize(revs, store.parentrevs)
347 revs = dagop.linearize(revs, store.parentrevs)
348
348
349 prevrev = None
349 prevrev = None
350
350
351 if deltamode == repository.CG_DELTAMODE_PREV or assumehaveparentrevisions:
351 if deltamode == repository.CG_DELTAMODE_PREV or assumehaveparentrevisions:
352 prevrev = store.parentrevs(revs[0])[0]
352 prevrev = store.parentrevs(revs[0])[0]
353
353
354 # Set of revs available to delta against.
354 # Set of revs available to delta against.
355 available = set()
355 available = set()
356
356
357 for rev in revs:
357 for rev in revs:
358 if rev == nullrev:
358 if rev == nullrev:
359 continue
359 continue
360
360
361 node = fnode(rev)
361 node = fnode(rev)
362 p1rev, p2rev = store.parentrevs(rev)
362 p1rev, p2rev = store.parentrevs(rev)
363
363
364 if deltaparentfn:
364 if deltaparentfn:
365 deltaparentrev = deltaparentfn(rev)
365 deltaparentrev = deltaparentfn(rev)
366 else:
366 else:
367 deltaparentrev = nullrev
367 deltaparentrev = nullrev
368
368
369 # Forced delta against previous mode.
369 # Forced delta against previous mode.
370 if deltamode == repository.CG_DELTAMODE_PREV:
370 if deltamode == repository.CG_DELTAMODE_PREV:
371 baserev = prevrev
371 baserev = prevrev
372
372
373 # We're instructed to send fulltext. Honor that.
373 # We're instructed to send fulltext. Honor that.
374 elif deltamode == repository.CG_DELTAMODE_FULL:
374 elif deltamode == repository.CG_DELTAMODE_FULL:
375 baserev = nullrev
375 baserev = nullrev
376 # We're instructed to use p1. Honor that
377 elif deltamode == repository.CG_DELTAMODE_P1:
378 baserev = p1rev
376
379
377 # There is a delta in storage. We try to use that because it
380 # There is a delta in storage. We try to use that because it
378 # amounts to effectively copying data from storage and is
381 # amounts to effectively copying data from storage and is
379 # therefore the fastest.
382 # therefore the fastest.
380 elif deltaparentrev != nullrev:
383 elif deltaparentrev != nullrev:
381 # Base revision was already emitted in this group. We can
384 # Base revision was already emitted in this group. We can
382 # always safely use the delta.
385 # always safely use the delta.
383 if deltaparentrev in available:
386 if deltaparentrev in available:
384 baserev = deltaparentrev
387 baserev = deltaparentrev
385
388
386 # Base revision is a parent that hasn't been emitted already.
389 # Base revision is a parent that hasn't been emitted already.
387 # Use it if we can assume the receiver has the parent revision.
390 # Use it if we can assume the receiver has the parent revision.
388 elif (assumehaveparentrevisions
391 elif (assumehaveparentrevisions
389 and deltaparentrev in (p1rev, p2rev)):
392 and deltaparentrev in (p1rev, p2rev)):
390 baserev = deltaparentrev
393 baserev = deltaparentrev
391
394
392 # No guarantee the receiver has the delta parent. Send delta
395 # No guarantee the receiver has the delta parent. Send delta
393 # against last revision (if possible), which in the common case
396 # against last revision (if possible), which in the common case
394 # should be similar enough to this revision that the delta is
397 # should be similar enough to this revision that the delta is
395 # reasonable.
398 # reasonable.
396 elif prevrev is not None:
399 elif prevrev is not None:
397 baserev = prevrev
400 baserev = prevrev
398 else:
401 else:
399 baserev = nullrev
402 baserev = nullrev
400
403
401 # Storage has a fulltext revision.
404 # Storage has a fulltext revision.
402
405
403 # Let's use the previous revision, which is as good a guess as any.
406 # Let's use the previous revision, which is as good a guess as any.
404 # There is definitely room to improve this logic.
407 # There is definitely room to improve this logic.
405 elif prevrev is not None:
408 elif prevrev is not None:
406 baserev = prevrev
409 baserev = prevrev
407 else:
410 else:
408 baserev = nullrev
411 baserev = nullrev
409
412
410 # But we can't actually use our chosen delta base for whatever
413 # But we can't actually use our chosen delta base for whatever
411 # reason. Reset to fulltext.
414 # reason. Reset to fulltext.
412 if baserev != nullrev and (candeltafn and not candeltafn(baserev, rev)):
415 if baserev != nullrev and (candeltafn and not candeltafn(baserev, rev)):
413 baserev = nullrev
416 baserev = nullrev
414
417
415 revision = None
418 revision = None
416 delta = None
419 delta = None
417 baserevisionsize = None
420 baserevisionsize = None
418
421
419 if revisiondata:
422 if revisiondata:
420 if store.iscensored(baserev) or store.iscensored(rev):
423 if store.iscensored(baserev) or store.iscensored(rev):
421 try:
424 try:
422 revision = store.revision(node, raw=True)
425 revision = store.revision(node, raw=True)
423 except error.CensoredNodeError as e:
426 except error.CensoredNodeError as e:
424 revision = e.tombstone
427 revision = e.tombstone
425
428
426 if baserev != nullrev:
429 if baserev != nullrev:
427 if rawsizefn:
430 if rawsizefn:
428 baserevisionsize = rawsizefn(baserev)
431 baserevisionsize = rawsizefn(baserev)
429 else:
432 else:
430 baserevisionsize = len(store.revision(baserev,
433 baserevisionsize = len(store.revision(baserev,
431 raw=True))
434 raw=True))
432
435
433 elif (baserev == nullrev
436 elif (baserev == nullrev
434 and deltamode != repository.CG_DELTAMODE_PREV):
437 and deltamode != repository.CG_DELTAMODE_PREV):
435 revision = store.revision(node, raw=True)
438 revision = store.revision(node, raw=True)
436 available.add(rev)
439 available.add(rev)
437 else:
440 else:
438 if revdifffn:
441 if revdifffn:
439 delta = revdifffn(baserev, rev)
442 delta = revdifffn(baserev, rev)
440 else:
443 else:
441 delta = mdiff.textdiff(store.revision(baserev, raw=True),
444 delta = mdiff.textdiff(store.revision(baserev, raw=True),
442 store.revision(rev, raw=True))
445 store.revision(rev, raw=True))
443
446
444 available.add(rev)
447 available.add(rev)
445
448
446 yield resultcls(
449 yield resultcls(
447 node=node,
450 node=node,
448 p1node=fnode(p1rev),
451 p1node=fnode(p1rev),
449 p2node=fnode(p2rev),
452 p2node=fnode(p2rev),
450 basenode=fnode(baserev),
453 basenode=fnode(baserev),
451 flags=flagsfn(rev) if flagsfn else 0,
454 flags=flagsfn(rev) if flagsfn else 0,
452 baserevisionsize=baserevisionsize,
455 baserevisionsize=baserevisionsize,
453 revision=revision,
456 revision=revision,
454 delta=delta)
457 delta=delta)
455
458
456 prevrev = rev
459 prevrev = rev
457
460
458 def deltaiscensored(delta, baserev, baselenfn):
461 def deltaiscensored(delta, baserev, baselenfn):
459 """Determine if a delta represents censored revision data.
462 """Determine if a delta represents censored revision data.
460
463
461 ``baserev`` is the base revision this delta is encoded against.
464 ``baserev`` is the base revision this delta is encoded against.
462 ``baselenfn`` is a callable receiving a revision number that resolves the
465 ``baselenfn`` is a callable receiving a revision number that resolves the
463 length of the revision fulltext.
466 length of the revision fulltext.
464
467
465 Returns a bool indicating if the result of the delta represents a censored
468 Returns a bool indicating if the result of the delta represents a censored
466 revision.
469 revision.
467 """
470 """
468 # Fragile heuristic: unless new file meta keys are added alphabetically
471 # Fragile heuristic: unless new file meta keys are added alphabetically
469 # preceding "censored", all censored revisions are prefixed by
472 # preceding "censored", all censored revisions are prefixed by
470 # "\1\ncensored:". A delta producing such a censored revision must be a
473 # "\1\ncensored:". A delta producing such a censored revision must be a
471 # full-replacement delta, so we inspect the first and only patch in the
474 # full-replacement delta, so we inspect the first and only patch in the
472 # delta for this prefix.
475 # delta for this prefix.
473 hlen = struct.calcsize(">lll")
476 hlen = struct.calcsize(">lll")
474 if len(delta) <= hlen:
477 if len(delta) <= hlen:
475 return False
478 return False
476
479
477 oldlen = baselenfn(baserev)
480 oldlen = baselenfn(baserev)
478 newlen = len(delta) - hlen
481 newlen = len(delta) - hlen
479 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
482 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
480 return False
483 return False
481
484
482 add = "\1\ncensored:"
485 add = "\1\ncensored:"
483 addlen = len(add)
486 addlen = len(add)
484 return newlen >= addlen and delta[hlen:hlen + addlen] == add
487 return newlen >= addlen and delta[hlen:hlen + addlen] == add
@@ -1,902 +1,907
1 Setting up test
1 Setting up test
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo 0 > afile
5 $ echo 0 > afile
6 $ hg add afile
6 $ hg add afile
7 $ hg commit -m "0.0"
7 $ hg commit -m "0.0"
8 $ echo 1 >> afile
8 $ echo 1 >> afile
9 $ hg commit -m "0.1"
9 $ hg commit -m "0.1"
10 $ echo 2 >> afile
10 $ echo 2 >> afile
11 $ hg commit -m "0.2"
11 $ hg commit -m "0.2"
12 $ echo 3 >> afile
12 $ echo 3 >> afile
13 $ hg commit -m "0.3"
13 $ hg commit -m "0.3"
14 $ hg update -C 0
14 $ hg update -C 0
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
15 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
16 $ echo 1 >> afile
16 $ echo 1 >> afile
17 $ hg commit -m "1.1"
17 $ hg commit -m "1.1"
18 created new head
18 created new head
19 $ echo 2 >> afile
19 $ echo 2 >> afile
20 $ hg commit -m "1.2"
20 $ hg commit -m "1.2"
21 $ echo "a line" > fred
21 $ echo "a line" > fred
22 $ echo 3 >> afile
22 $ echo 3 >> afile
23 $ hg add fred
23 $ hg add fred
24 $ hg commit -m "1.3"
24 $ hg commit -m "1.3"
25 $ hg mv afile adifferentfile
25 $ hg mv afile adifferentfile
26 $ hg commit -m "1.3m"
26 $ hg commit -m "1.3m"
27 $ hg update -C 3
27 $ hg update -C 3
28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
29 $ hg mv afile anotherfile
29 $ hg mv afile anotherfile
30 $ hg commit -m "0.3m"
30 $ hg commit -m "0.3m"
31 $ hg verify
31 $ hg verify
32 checking changesets
32 checking changesets
33 checking manifests
33 checking manifests
34 crosschecking files in changesets and manifests
34 crosschecking files in changesets and manifests
35 checking files
35 checking files
36 checked 9 changesets with 7 changes to 4 files
36 checked 9 changesets with 7 changes to 4 files
37 $ cd ..
37 $ cd ..
38 $ hg init empty
38 $ hg init empty
39
39
40 Bundle and phase
40 Bundle and phase
41
41
42 $ hg -R test phase --force --secret 0
42 $ hg -R test phase --force --secret 0
43 $ hg -R test bundle phase.hg empty
43 $ hg -R test bundle phase.hg empty
44 searching for changes
44 searching for changes
45 no changes found (ignored 9 secret changesets)
45 no changes found (ignored 9 secret changesets)
46 [1]
46 [1]
47 $ hg -R test phase --draft -r 'head()'
47 $ hg -R test phase --draft -r 'head()'
48
48
49 Bundle --all
49 Bundle --all
50
50
51 $ hg -R test bundle --all all.hg
51 $ hg -R test bundle --all all.hg
52 9 changesets found
52 9 changesets found
53
53
54 Bundle test to full.hg
54 Bundle test to full.hg
55
55
56 $ hg -R test bundle full.hg empty
56 $ hg -R test bundle full.hg empty
57 searching for changes
57 searching for changes
58 9 changesets found
58 9 changesets found
59
59
60 Unbundle full.hg in test
60 Unbundle full.hg in test
61
61
62 $ hg -R test unbundle full.hg
62 $ hg -R test unbundle full.hg
63 adding changesets
63 adding changesets
64 adding manifests
64 adding manifests
65 adding file changes
65 adding file changes
66 added 0 changesets with 0 changes to 4 files
66 added 0 changesets with 0 changes to 4 files
67 (run 'hg update' to get a working copy)
67 (run 'hg update' to get a working copy)
68
68
69 Verify empty
69 Verify empty
70
70
71 $ hg -R empty heads
71 $ hg -R empty heads
72 [1]
72 [1]
73 $ hg -R empty verify
73 $ hg -R empty verify
74 checking changesets
74 checking changesets
75 checking manifests
75 checking manifests
76 crosschecking files in changesets and manifests
76 crosschecking files in changesets and manifests
77 checking files
77 checking files
78 checked 0 changesets with 0 changes to 0 files
78 checked 0 changesets with 0 changes to 0 files
79
79
80 #if repobundlerepo
80 #if repobundlerepo
81
81
82 Pull full.hg into test (using --cwd)
82 Pull full.hg into test (using --cwd)
83
83
84 $ hg --cwd test pull ../full.hg
84 $ hg --cwd test pull ../full.hg
85 pulling from ../full.hg
85 pulling from ../full.hg
86 searching for changes
86 searching for changes
87 no changes found
87 no changes found
88
88
89 Verify that there are no leaked temporary files after pull (issue2797)
89 Verify that there are no leaked temporary files after pull (issue2797)
90
90
91 $ ls test/.hg | grep .hg10un
91 $ ls test/.hg | grep .hg10un
92 [1]
92 [1]
93
93
94 Pull full.hg into empty (using --cwd)
94 Pull full.hg into empty (using --cwd)
95
95
96 $ hg --cwd empty pull ../full.hg
96 $ hg --cwd empty pull ../full.hg
97 pulling from ../full.hg
97 pulling from ../full.hg
98 requesting all changes
98 requesting all changes
99 adding changesets
99 adding changesets
100 adding manifests
100 adding manifests
101 adding file changes
101 adding file changes
102 added 9 changesets with 7 changes to 4 files (+1 heads)
102 added 9 changesets with 7 changes to 4 files (+1 heads)
103 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
103 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
104 (run 'hg heads' to see heads, 'hg merge' to merge)
104 (run 'hg heads' to see heads, 'hg merge' to merge)
105
105
106 Rollback empty
106 Rollback empty
107
107
108 $ hg -R empty rollback
108 $ hg -R empty rollback
109 repository tip rolled back to revision -1 (undo pull)
109 repository tip rolled back to revision -1 (undo pull)
110
110
111 Pull full.hg into empty again (using --cwd)
111 Pull full.hg into empty again (using --cwd)
112
112
113 $ hg --cwd empty pull ../full.hg
113 $ hg --cwd empty pull ../full.hg
114 pulling from ../full.hg
114 pulling from ../full.hg
115 requesting all changes
115 requesting all changes
116 adding changesets
116 adding changesets
117 adding manifests
117 adding manifests
118 adding file changes
118 adding file changes
119 added 9 changesets with 7 changes to 4 files (+1 heads)
119 added 9 changesets with 7 changes to 4 files (+1 heads)
120 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
120 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
121 (run 'hg heads' to see heads, 'hg merge' to merge)
121 (run 'hg heads' to see heads, 'hg merge' to merge)
122
122
123 Pull full.hg into test (using -R)
123 Pull full.hg into test (using -R)
124
124
125 $ hg -R test pull full.hg
125 $ hg -R test pull full.hg
126 pulling from full.hg
126 pulling from full.hg
127 searching for changes
127 searching for changes
128 no changes found
128 no changes found
129
129
130 Pull full.hg into empty (using -R)
130 Pull full.hg into empty (using -R)
131
131
132 $ hg -R empty pull full.hg
132 $ hg -R empty pull full.hg
133 pulling from full.hg
133 pulling from full.hg
134 searching for changes
134 searching for changes
135 no changes found
135 no changes found
136
136
137 Rollback empty
137 Rollback empty
138
138
139 $ hg -R empty rollback
139 $ hg -R empty rollback
140 repository tip rolled back to revision -1 (undo pull)
140 repository tip rolled back to revision -1 (undo pull)
141
141
142 Pull full.hg into empty again (using -R)
142 Pull full.hg into empty again (using -R)
143
143
144 $ hg -R empty pull full.hg
144 $ hg -R empty pull full.hg
145 pulling from full.hg
145 pulling from full.hg
146 requesting all changes
146 requesting all changes
147 adding changesets
147 adding changesets
148 adding manifests
148 adding manifests
149 adding file changes
149 adding file changes
150 added 9 changesets with 7 changes to 4 files (+1 heads)
150 added 9 changesets with 7 changes to 4 files (+1 heads)
151 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
151 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
152 (run 'hg heads' to see heads, 'hg merge' to merge)
152 (run 'hg heads' to see heads, 'hg merge' to merge)
153
153
154 Log -R full.hg in fresh empty
154 Log -R full.hg in fresh empty
155
155
156 $ rm -r empty
156 $ rm -r empty
157 $ hg init empty
157 $ hg init empty
158 $ cd empty
158 $ cd empty
159 $ hg -R bundle://../full.hg log
159 $ hg -R bundle://../full.hg log
160 changeset: 8:aa35859c02ea
160 changeset: 8:aa35859c02ea
161 tag: tip
161 tag: tip
162 parent: 3:eebf5a27f8ca
162 parent: 3:eebf5a27f8ca
163 user: test
163 user: test
164 date: Thu Jan 01 00:00:00 1970 +0000
164 date: Thu Jan 01 00:00:00 1970 +0000
165 summary: 0.3m
165 summary: 0.3m
166
166
167 changeset: 7:a6a34bfa0076
167 changeset: 7:a6a34bfa0076
168 user: test
168 user: test
169 date: Thu Jan 01 00:00:00 1970 +0000
169 date: Thu Jan 01 00:00:00 1970 +0000
170 summary: 1.3m
170 summary: 1.3m
171
171
172 changeset: 6:7373c1169842
172 changeset: 6:7373c1169842
173 user: test
173 user: test
174 date: Thu Jan 01 00:00:00 1970 +0000
174 date: Thu Jan 01 00:00:00 1970 +0000
175 summary: 1.3
175 summary: 1.3
176
176
177 changeset: 5:1bb50a9436a7
177 changeset: 5:1bb50a9436a7
178 user: test
178 user: test
179 date: Thu Jan 01 00:00:00 1970 +0000
179 date: Thu Jan 01 00:00:00 1970 +0000
180 summary: 1.2
180 summary: 1.2
181
181
182 changeset: 4:095197eb4973
182 changeset: 4:095197eb4973
183 parent: 0:f9ee2f85a263
183 parent: 0:f9ee2f85a263
184 user: test
184 user: test
185 date: Thu Jan 01 00:00:00 1970 +0000
185 date: Thu Jan 01 00:00:00 1970 +0000
186 summary: 1.1
186 summary: 1.1
187
187
188 changeset: 3:eebf5a27f8ca
188 changeset: 3:eebf5a27f8ca
189 user: test
189 user: test
190 date: Thu Jan 01 00:00:00 1970 +0000
190 date: Thu Jan 01 00:00:00 1970 +0000
191 summary: 0.3
191 summary: 0.3
192
192
193 changeset: 2:e38ba6f5b7e0
193 changeset: 2:e38ba6f5b7e0
194 user: test
194 user: test
195 date: Thu Jan 01 00:00:00 1970 +0000
195 date: Thu Jan 01 00:00:00 1970 +0000
196 summary: 0.2
196 summary: 0.2
197
197
198 changeset: 1:34c2bf6b0626
198 changeset: 1:34c2bf6b0626
199 user: test
199 user: test
200 date: Thu Jan 01 00:00:00 1970 +0000
200 date: Thu Jan 01 00:00:00 1970 +0000
201 summary: 0.1
201 summary: 0.1
202
202
203 changeset: 0:f9ee2f85a263
203 changeset: 0:f9ee2f85a263
204 user: test
204 user: test
205 date: Thu Jan 01 00:00:00 1970 +0000
205 date: Thu Jan 01 00:00:00 1970 +0000
206 summary: 0.0
206 summary: 0.0
207
207
208 Make sure bundlerepo doesn't leak tempfiles (issue2491)
208 Make sure bundlerepo doesn't leak tempfiles (issue2491)
209
209
210 $ ls .hg
210 $ ls .hg
211 00changelog.i
211 00changelog.i
212 cache
212 cache
213 requires
213 requires
214 store
214 store
215
215
216 Pull ../full.hg into empty (with hook)
216 Pull ../full.hg into empty (with hook)
217
217
218 $ cat >> .hg/hgrc <<EOF
218 $ cat >> .hg/hgrc <<EOF
219 > [hooks]
219 > [hooks]
220 > changegroup = sh -c "printenv.py changegroup"
220 > changegroup = sh -c "printenv.py changegroup"
221 > EOF
221 > EOF
222
222
223 doesn't work (yet ?)
223 doesn't work (yet ?)
224
224
225 hg -R bundle://../full.hg verify
225 hg -R bundle://../full.hg verify
226
226
227 $ hg pull bundle://../full.hg
227 $ hg pull bundle://../full.hg
228 pulling from bundle:../full.hg
228 pulling from bundle:../full.hg
229 requesting all changes
229 requesting all changes
230 adding changesets
230 adding changesets
231 adding manifests
231 adding manifests
232 adding file changes
232 adding file changes
233 added 9 changesets with 7 changes to 4 files (+1 heads)
233 added 9 changesets with 7 changes to 4 files (+1 heads)
234 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
234 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
235 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=bundle*../full.hg (glob)
235 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=bundle*../full.hg (glob)
236 (run 'hg heads' to see heads, 'hg merge' to merge)
236 (run 'hg heads' to see heads, 'hg merge' to merge)
237
237
238 Rollback empty
238 Rollback empty
239
239
240 $ hg rollback
240 $ hg rollback
241 repository tip rolled back to revision -1 (undo pull)
241 repository tip rolled back to revision -1 (undo pull)
242 $ cd ..
242 $ cd ..
243
243
244 Log -R bundle:empty+full.hg
244 Log -R bundle:empty+full.hg
245
245
246 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
246 $ hg -R bundle:empty+full.hg log --template="{rev} "; echo ""
247 8 7 6 5 4 3 2 1 0
247 8 7 6 5 4 3 2 1 0
248
248
249 Pull full.hg into empty again (using -R; with hook)
249 Pull full.hg into empty again (using -R; with hook)
250
250
251 $ hg -R empty pull full.hg
251 $ hg -R empty pull full.hg
252 pulling from full.hg
252 pulling from full.hg
253 requesting all changes
253 requesting all changes
254 adding changesets
254 adding changesets
255 adding manifests
255 adding manifests
256 adding file changes
256 adding file changes
257 added 9 changesets with 7 changes to 4 files (+1 heads)
257 added 9 changesets with 7 changes to 4 files (+1 heads)
258 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
258 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
259 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=bundle:empty+full.hg
259 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=f9ee2f85a263049e9ae6d37a0e67e96194ffb735 HG_NODE_LAST=aa35859c02ea8bd48da5da68cd2740ac71afcbaf HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=bundle:empty+full.hg
260 (run 'hg heads' to see heads, 'hg merge' to merge)
260 (run 'hg heads' to see heads, 'hg merge' to merge)
261
261
262 #endif
262 #endif
263
263
264 Cannot produce streaming clone bundles with "hg bundle"
264 Cannot produce streaming clone bundles with "hg bundle"
265
265
266 $ hg -R test bundle -t packed1 packed.hg
266 $ hg -R test bundle -t packed1 packed.hg
267 abort: packed bundles cannot be produced by "hg bundle"
267 abort: packed bundles cannot be produced by "hg bundle"
268 (use 'hg debugcreatestreamclonebundle')
268 (use 'hg debugcreatestreamclonebundle')
269 [255]
269 [255]
270
270
271 packed1 is produced properly
271 packed1 is produced properly
272
272
273 #if reporevlogstore
273 #if reporevlogstore
274
274
275 $ hg -R test debugcreatestreamclonebundle packed.hg
275 $ hg -R test debugcreatestreamclonebundle packed.hg
276 writing 2664 bytes for 6 files
276 writing 2664 bytes for 6 files
277 bundle requirements: generaldelta, revlogv1
277 bundle requirements: generaldelta, revlogv1
278
278
279 $ f -B 64 --size --sha1 --hexdump packed.hg
279 $ f -B 64 --size --sha1 --hexdump packed.hg
280 packed.hg: size=2827, sha1=9d14cb90c66a21462d915ab33656f38b9deed686
280 packed.hg: size=2827, sha1=9d14cb90c66a21462d915ab33656f38b9deed686
281 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
281 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
282 0010: 00 00 00 00 0a 68 00 16 67 65 6e 65 72 61 6c 64 |.....h..generald|
282 0010: 00 00 00 00 0a 68 00 16 67 65 6e 65 72 61 6c 64 |.....h..generald|
283 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 00 64 61 |elta,revlogv1.da|
283 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 00 64 61 |elta,revlogv1.da|
284 0030: 74 61 2f 61 64 69 66 66 65 72 65 6e 74 66 69 6c |ta/adifferentfil|
284 0030: 74 61 2f 61 64 69 66 66 65 72 65 6e 74 66 69 6c |ta/adifferentfil|
285
285
286 $ hg debugbundle --spec packed.hg
286 $ hg debugbundle --spec packed.hg
287 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1
287 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1
288
288
289 generaldelta requirement is not listed in stream clone bundles unless used
289 generaldelta requirement is not listed in stream clone bundles unless used
290
290
291 $ hg --config format.usegeneraldelta=false init testnongd
291 $ hg --config format.usegeneraldelta=false init testnongd
292 $ cd testnongd
292 $ cd testnongd
293 $ touch foo
293 $ touch foo
294 $ hg -q commit -A -m initial
294 $ hg -q commit -A -m initial
295 $ cd ..
295 $ cd ..
296 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
296 $ hg -R testnongd debugcreatestreamclonebundle packednongd.hg
297 writing 301 bytes for 3 files
297 writing 301 bytes for 3 files
298 bundle requirements: revlogv1
298 bundle requirements: revlogv1
299
299
300 $ f -B 64 --size --sha1 --hexdump packednongd.hg
300 $ f -B 64 --size --sha1 --hexdump packednongd.hg
301 packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f
301 packednongd.hg: size=383, sha1=1d9c230238edd5d38907100b729ba72b1831fe6f
302 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
302 0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 03 00 00 |HGS1UN..........|
303 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1|
303 0010: 00 00 00 00 01 2d 00 09 72 65 76 6c 6f 67 76 31 |.....-..revlogv1|
304 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..|
304 0020: 00 64 61 74 61 2f 66 6f 6f 2e 69 00 36 34 0a 00 |.data/foo.i.64..|
305 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
305 0030: 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
306
306
307 $ hg debugbundle --spec packednongd.hg
307 $ hg debugbundle --spec packednongd.hg
308 none-packed1;requirements%3Drevlogv1
308 none-packed1;requirements%3Drevlogv1
309
309
310 Warning emitted when packed bundles contain secret changesets
310 Warning emitted when packed bundles contain secret changesets
311
311
312 $ hg init testsecret
312 $ hg init testsecret
313 $ cd testsecret
313 $ cd testsecret
314 $ touch foo
314 $ touch foo
315 $ hg -q commit -A -m initial
315 $ hg -q commit -A -m initial
316 $ hg phase --force --secret -r .
316 $ hg phase --force --secret -r .
317 $ cd ..
317 $ cd ..
318
318
319 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
319 $ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
320 (warning: stream clone bundle will contain secret revisions)
320 (warning: stream clone bundle will contain secret revisions)
321 writing 301 bytes for 3 files
321 writing 301 bytes for 3 files
322 bundle requirements: generaldelta, revlogv1
322 bundle requirements: generaldelta, revlogv1
323
323
324 Unpacking packed1 bundles with "hg unbundle" isn't allowed
324 Unpacking packed1 bundles with "hg unbundle" isn't allowed
325
325
326 $ hg init packed
326 $ hg init packed
327 $ hg -R packed unbundle packed.hg
327 $ hg -R packed unbundle packed.hg
328 abort: packed bundles cannot be applied with "hg unbundle"
328 abort: packed bundles cannot be applied with "hg unbundle"
329 (use "hg debugapplystreamclonebundle")
329 (use "hg debugapplystreamclonebundle")
330 [255]
330 [255]
331
331
332 packed1 can be consumed from debug command
332 packed1 can be consumed from debug command
333
333
334 (this also confirms that streamclone-ed changes are visible via
334 (this also confirms that streamclone-ed changes are visible via
335 @filecache properties to in-process procedures before closing
335 @filecache properties to in-process procedures before closing
336 transaction)
336 transaction)
337
337
338 $ cat > $TESTTMP/showtip.py <<EOF
338 $ cat > $TESTTMP/showtip.py <<EOF
339 > from __future__ import absolute_import
339 > from __future__ import absolute_import
340 >
340 >
341 > def showtip(ui, repo, hooktype, **kwargs):
341 > def showtip(ui, repo, hooktype, **kwargs):
342 > ui.warn(b'%s: %s\n' % (hooktype, repo[b'tip'].hex()[:12]))
342 > ui.warn(b'%s: %s\n' % (hooktype, repo[b'tip'].hex()[:12]))
343 >
343 >
344 > def reposetup(ui, repo):
344 > def reposetup(ui, repo):
345 > # this confirms (and ensures) that (empty) 00changelog.i
345 > # this confirms (and ensures) that (empty) 00changelog.i
346 > # before streamclone is already cached as repo.changelog
346 > # before streamclone is already cached as repo.changelog
347 > ui.setconfig(b'hooks', b'pretxnopen.showtip', showtip)
347 > ui.setconfig(b'hooks', b'pretxnopen.showtip', showtip)
348 >
348 >
349 > # this confirms that streamclone-ed changes are visible to
349 > # this confirms that streamclone-ed changes are visible to
350 > # in-process procedures before closing transaction
350 > # in-process procedures before closing transaction
351 > ui.setconfig(b'hooks', b'pretxnclose.showtip', showtip)
351 > ui.setconfig(b'hooks', b'pretxnclose.showtip', showtip)
352 >
352 >
353 > # this confirms that streamclone-ed changes are still visible
353 > # this confirms that streamclone-ed changes are still visible
354 > # after closing transaction
354 > # after closing transaction
355 > ui.setconfig(b'hooks', b'txnclose.showtip', showtip)
355 > ui.setconfig(b'hooks', b'txnclose.showtip', showtip)
356 > EOF
356 > EOF
357 $ cat >> $HGRCPATH <<EOF
357 $ cat >> $HGRCPATH <<EOF
358 > [extensions]
358 > [extensions]
359 > showtip = $TESTTMP/showtip.py
359 > showtip = $TESTTMP/showtip.py
360 > EOF
360 > EOF
361
361
362 $ hg -R packed debugapplystreamclonebundle packed.hg
362 $ hg -R packed debugapplystreamclonebundle packed.hg
363 6 files to transfer, 2.60 KB of data
363 6 files to transfer, 2.60 KB of data
364 pretxnopen: 000000000000
364 pretxnopen: 000000000000
365 pretxnclose: aa35859c02ea
365 pretxnclose: aa35859c02ea
366 transferred 2.60 KB in *.* seconds (* */sec) (glob)
366 transferred 2.60 KB in *.* seconds (* */sec) (glob)
367 txnclose: aa35859c02ea
367 txnclose: aa35859c02ea
368
368
369 (for safety, confirm visibility of streamclone-ed changes by another
369 (for safety, confirm visibility of streamclone-ed changes by another
370 process, too)
370 process, too)
371
371
372 $ hg -R packed tip -T "{node|short}\n"
372 $ hg -R packed tip -T "{node|short}\n"
373 aa35859c02ea
373 aa35859c02ea
374
374
375 $ cat >> $HGRCPATH <<EOF
375 $ cat >> $HGRCPATH <<EOF
376 > [extensions]
376 > [extensions]
377 > showtip = !
377 > showtip = !
378 > EOF
378 > EOF
379
379
380 Does not work on non-empty repo
380 Does not work on non-empty repo
381
381
382 $ hg -R packed debugapplystreamclonebundle packed.hg
382 $ hg -R packed debugapplystreamclonebundle packed.hg
383 abort: cannot apply stream clone bundle on non-empty repo
383 abort: cannot apply stream clone bundle on non-empty repo
384 [255]
384 [255]
385
385
386 #endif
386 #endif
387
387
388 Create partial clones
388 Create partial clones
389
389
390 $ rm -r empty
390 $ rm -r empty
391 $ hg init empty
391 $ hg init empty
392 $ hg clone -r 3 test partial
392 $ hg clone -r 3 test partial
393 adding changesets
393 adding changesets
394 adding manifests
394 adding manifests
395 adding file changes
395 adding file changes
396 added 4 changesets with 4 changes to 1 files
396 added 4 changesets with 4 changes to 1 files
397 new changesets f9ee2f85a263:eebf5a27f8ca
397 new changesets f9ee2f85a263:eebf5a27f8ca
398 updating to branch default
398 updating to branch default
399 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
399 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
400 $ hg clone partial partial2
400 $ hg clone partial partial2
401 updating to branch default
401 updating to branch default
402 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
402 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
403 $ cd partial
403 $ cd partial
404
404
405 #if repobundlerepo
405 #if repobundlerepo
406
406
407 Log -R full.hg in partial
407 Log -R full.hg in partial
408
408
409 $ hg -R bundle://../full.hg log -T phases
409 $ hg -R bundle://../full.hg log -T phases
410 changeset: 8:aa35859c02ea
410 changeset: 8:aa35859c02ea
411 tag: tip
411 tag: tip
412 phase: draft
412 phase: draft
413 parent: 3:eebf5a27f8ca
413 parent: 3:eebf5a27f8ca
414 user: test
414 user: test
415 date: Thu Jan 01 00:00:00 1970 +0000
415 date: Thu Jan 01 00:00:00 1970 +0000
416 summary: 0.3m
416 summary: 0.3m
417
417
418 changeset: 7:a6a34bfa0076
418 changeset: 7:a6a34bfa0076
419 phase: draft
419 phase: draft
420 user: test
420 user: test
421 date: Thu Jan 01 00:00:00 1970 +0000
421 date: Thu Jan 01 00:00:00 1970 +0000
422 summary: 1.3m
422 summary: 1.3m
423
423
424 changeset: 6:7373c1169842
424 changeset: 6:7373c1169842
425 phase: draft
425 phase: draft
426 user: test
426 user: test
427 date: Thu Jan 01 00:00:00 1970 +0000
427 date: Thu Jan 01 00:00:00 1970 +0000
428 summary: 1.3
428 summary: 1.3
429
429
430 changeset: 5:1bb50a9436a7
430 changeset: 5:1bb50a9436a7
431 phase: draft
431 phase: draft
432 user: test
432 user: test
433 date: Thu Jan 01 00:00:00 1970 +0000
433 date: Thu Jan 01 00:00:00 1970 +0000
434 summary: 1.2
434 summary: 1.2
435
435
436 changeset: 4:095197eb4973
436 changeset: 4:095197eb4973
437 phase: draft
437 phase: draft
438 parent: 0:f9ee2f85a263
438 parent: 0:f9ee2f85a263
439 user: test
439 user: test
440 date: Thu Jan 01 00:00:00 1970 +0000
440 date: Thu Jan 01 00:00:00 1970 +0000
441 summary: 1.1
441 summary: 1.1
442
442
443 changeset: 3:eebf5a27f8ca
443 changeset: 3:eebf5a27f8ca
444 phase: public
444 phase: public
445 user: test
445 user: test
446 date: Thu Jan 01 00:00:00 1970 +0000
446 date: Thu Jan 01 00:00:00 1970 +0000
447 summary: 0.3
447 summary: 0.3
448
448
449 changeset: 2:e38ba6f5b7e0
449 changeset: 2:e38ba6f5b7e0
450 phase: public
450 phase: public
451 user: test
451 user: test
452 date: Thu Jan 01 00:00:00 1970 +0000
452 date: Thu Jan 01 00:00:00 1970 +0000
453 summary: 0.2
453 summary: 0.2
454
454
455 changeset: 1:34c2bf6b0626
455 changeset: 1:34c2bf6b0626
456 phase: public
456 phase: public
457 user: test
457 user: test
458 date: Thu Jan 01 00:00:00 1970 +0000
458 date: Thu Jan 01 00:00:00 1970 +0000
459 summary: 0.1
459 summary: 0.1
460
460
461 changeset: 0:f9ee2f85a263
461 changeset: 0:f9ee2f85a263
462 phase: public
462 phase: public
463 user: test
463 user: test
464 date: Thu Jan 01 00:00:00 1970 +0000
464 date: Thu Jan 01 00:00:00 1970 +0000
465 summary: 0.0
465 summary: 0.0
466
466
467
467
468 Incoming full.hg in partial
468 Incoming full.hg in partial
469
469
470 $ hg incoming bundle://../full.hg
470 $ hg incoming bundle://../full.hg
471 comparing with bundle:../full.hg
471 comparing with bundle:../full.hg
472 searching for changes
472 searching for changes
473 changeset: 4:095197eb4973
473 changeset: 4:095197eb4973
474 parent: 0:f9ee2f85a263
474 parent: 0:f9ee2f85a263
475 user: test
475 user: test
476 date: Thu Jan 01 00:00:00 1970 +0000
476 date: Thu Jan 01 00:00:00 1970 +0000
477 summary: 1.1
477 summary: 1.1
478
478
479 changeset: 5:1bb50a9436a7
479 changeset: 5:1bb50a9436a7
480 user: test
480 user: test
481 date: Thu Jan 01 00:00:00 1970 +0000
481 date: Thu Jan 01 00:00:00 1970 +0000
482 summary: 1.2
482 summary: 1.2
483
483
484 changeset: 6:7373c1169842
484 changeset: 6:7373c1169842
485 user: test
485 user: test
486 date: Thu Jan 01 00:00:00 1970 +0000
486 date: Thu Jan 01 00:00:00 1970 +0000
487 summary: 1.3
487 summary: 1.3
488
488
489 changeset: 7:a6a34bfa0076
489 changeset: 7:a6a34bfa0076
490 user: test
490 user: test
491 date: Thu Jan 01 00:00:00 1970 +0000
491 date: Thu Jan 01 00:00:00 1970 +0000
492 summary: 1.3m
492 summary: 1.3m
493
493
494 changeset: 8:aa35859c02ea
494 changeset: 8:aa35859c02ea
495 tag: tip
495 tag: tip
496 parent: 3:eebf5a27f8ca
496 parent: 3:eebf5a27f8ca
497 user: test
497 user: test
498 date: Thu Jan 01 00:00:00 1970 +0000
498 date: Thu Jan 01 00:00:00 1970 +0000
499 summary: 0.3m
499 summary: 0.3m
500
500
501
501
502 Outgoing -R full.hg vs partial2 in partial
502 Outgoing -R full.hg vs partial2 in partial
503
503
504 $ hg -R bundle://../full.hg outgoing ../partial2
504 $ hg -R bundle://../full.hg outgoing ../partial2
505 comparing with ../partial2
505 comparing with ../partial2
506 searching for changes
506 searching for changes
507 changeset: 4:095197eb4973
507 changeset: 4:095197eb4973
508 parent: 0:f9ee2f85a263
508 parent: 0:f9ee2f85a263
509 user: test
509 user: test
510 date: Thu Jan 01 00:00:00 1970 +0000
510 date: Thu Jan 01 00:00:00 1970 +0000
511 summary: 1.1
511 summary: 1.1
512
512
513 changeset: 5:1bb50a9436a7
513 changeset: 5:1bb50a9436a7
514 user: test
514 user: test
515 date: Thu Jan 01 00:00:00 1970 +0000
515 date: Thu Jan 01 00:00:00 1970 +0000
516 summary: 1.2
516 summary: 1.2
517
517
518 changeset: 6:7373c1169842
518 changeset: 6:7373c1169842
519 user: test
519 user: test
520 date: Thu Jan 01 00:00:00 1970 +0000
520 date: Thu Jan 01 00:00:00 1970 +0000
521 summary: 1.3
521 summary: 1.3
522
522
523 changeset: 7:a6a34bfa0076
523 changeset: 7:a6a34bfa0076
524 user: test
524 user: test
525 date: Thu Jan 01 00:00:00 1970 +0000
525 date: Thu Jan 01 00:00:00 1970 +0000
526 summary: 1.3m
526 summary: 1.3m
527
527
528 changeset: 8:aa35859c02ea
528 changeset: 8:aa35859c02ea
529 tag: tip
529 tag: tip
530 parent: 3:eebf5a27f8ca
530 parent: 3:eebf5a27f8ca
531 user: test
531 user: test
532 date: Thu Jan 01 00:00:00 1970 +0000
532 date: Thu Jan 01 00:00:00 1970 +0000
533 summary: 0.3m
533 summary: 0.3m
534
534
535
535
536 Outgoing -R does-not-exist.hg vs partial2 in partial
536 Outgoing -R does-not-exist.hg vs partial2 in partial
537
537
538 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
538 $ hg -R bundle://../does-not-exist.hg outgoing ../partial2
539 abort: *../does-not-exist.hg* (glob)
539 abort: *../does-not-exist.hg* (glob)
540 [255]
540 [255]
541
541
542 #endif
542 #endif
543
543
544 $ cd ..
544 $ cd ..
545
545
546 hide outer repo
546 hide outer repo
547 $ hg init
547 $ hg init
548
548
549 Direct clone from bundle (all-history)
549 Direct clone from bundle (all-history)
550
550
551 #if repobundlerepo
551 #if repobundlerepo
552
552
553 $ hg clone full.hg full-clone
553 $ hg clone full.hg full-clone
554 requesting all changes
554 requesting all changes
555 adding changesets
555 adding changesets
556 adding manifests
556 adding manifests
557 adding file changes
557 adding file changes
558 added 9 changesets with 7 changes to 4 files (+1 heads)
558 added 9 changesets with 7 changes to 4 files (+1 heads)
559 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
559 new changesets f9ee2f85a263:aa35859c02ea (9 drafts)
560 updating to branch default
560 updating to branch default
561 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
561 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
562 $ hg -R full-clone heads
562 $ hg -R full-clone heads
563 changeset: 8:aa35859c02ea
563 changeset: 8:aa35859c02ea
564 tag: tip
564 tag: tip
565 parent: 3:eebf5a27f8ca
565 parent: 3:eebf5a27f8ca
566 user: test
566 user: test
567 date: Thu Jan 01 00:00:00 1970 +0000
567 date: Thu Jan 01 00:00:00 1970 +0000
568 summary: 0.3m
568 summary: 0.3m
569
569
570 changeset: 7:a6a34bfa0076
570 changeset: 7:a6a34bfa0076
571 user: test
571 user: test
572 date: Thu Jan 01 00:00:00 1970 +0000
572 date: Thu Jan 01 00:00:00 1970 +0000
573 summary: 1.3m
573 summary: 1.3m
574
574
575 $ rm -r full-clone
575 $ rm -r full-clone
576
576
577 When cloning from a non-copiable repository into '', do not
577 When cloning from a non-copiable repository into '', do not
578 recurse infinitely (issue2528)
578 recurse infinitely (issue2528)
579
579
580 $ hg clone full.hg ''
580 $ hg clone full.hg ''
581 abort: empty destination path is not valid
581 abort: empty destination path is not valid
582 [255]
582 [255]
583
583
584 test for https://bz.mercurial-scm.org/216
584 test for https://bz.mercurial-scm.org/216
585
585
586 Unbundle incremental bundles into fresh empty in one go
586 Unbundle incremental bundles into fresh empty in one go
587
587
588 $ rm -r empty
588 $ rm -r empty
589 $ hg init empty
589 $ hg init empty
590 $ hg -R test bundle --base null -r 0 ../0.hg
590 $ hg -R test bundle --base null -r 0 ../0.hg
591 1 changesets found
591 1 changesets found
592 $ hg -R test bundle --base 0 -r 1 ../1.hg
592 $ hg -R test bundle --base 0 -r 1 ../1.hg
593 1 changesets found
593 1 changesets found
594 $ hg -R empty unbundle -u ../0.hg ../1.hg
594 $ hg -R empty unbundle -u ../0.hg ../1.hg
595 adding changesets
595 adding changesets
596 adding manifests
596 adding manifests
597 adding file changes
597 adding file changes
598 added 1 changesets with 1 changes to 1 files
598 added 1 changesets with 1 changes to 1 files
599 new changesets f9ee2f85a263 (1 drafts)
599 new changesets f9ee2f85a263 (1 drafts)
600 adding changesets
600 adding changesets
601 adding manifests
601 adding manifests
602 adding file changes
602 adding file changes
603 added 1 changesets with 1 changes to 1 files
603 added 1 changesets with 1 changes to 1 files
604 new changesets 34c2bf6b0626 (1 drafts)
604 new changesets 34c2bf6b0626 (1 drafts)
605 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
605 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
606
606
607 View full contents of the bundle
607 View full contents of the bundle
608 $ hg -R test bundle --base null -r 3 ../partial.hg
608 $ hg -R test bundle --base null -r 3 ../partial.hg
609 4 changesets found
609 4 changesets found
610 $ cd test
610 $ cd test
611 $ hg -R ../../partial.hg log -r "bundle()"
611 $ hg -R ../../partial.hg log -r "bundle()"
612 changeset: 0:f9ee2f85a263
612 changeset: 0:f9ee2f85a263
613 user: test
613 user: test
614 date: Thu Jan 01 00:00:00 1970 +0000
614 date: Thu Jan 01 00:00:00 1970 +0000
615 summary: 0.0
615 summary: 0.0
616
616
617 changeset: 1:34c2bf6b0626
617 changeset: 1:34c2bf6b0626
618 user: test
618 user: test
619 date: Thu Jan 01 00:00:00 1970 +0000
619 date: Thu Jan 01 00:00:00 1970 +0000
620 summary: 0.1
620 summary: 0.1
621
621
622 changeset: 2:e38ba6f5b7e0
622 changeset: 2:e38ba6f5b7e0
623 user: test
623 user: test
624 date: Thu Jan 01 00:00:00 1970 +0000
624 date: Thu Jan 01 00:00:00 1970 +0000
625 summary: 0.2
625 summary: 0.2
626
626
627 changeset: 3:eebf5a27f8ca
627 changeset: 3:eebf5a27f8ca
628 user: test
628 user: test
629 date: Thu Jan 01 00:00:00 1970 +0000
629 date: Thu Jan 01 00:00:00 1970 +0000
630 summary: 0.3
630 summary: 0.3
631
631
632 $ cd ..
632 $ cd ..
633
633
634 #endif
634 #endif
635
635
636 test for 540d1059c802
636 test for 540d1059c802
637
637
638 $ hg init orig
638 $ hg init orig
639 $ cd orig
639 $ cd orig
640 $ echo foo > foo
640 $ echo foo > foo
641 $ hg add foo
641 $ hg add foo
642 $ hg ci -m 'add foo'
642 $ hg ci -m 'add foo'
643
643
644 $ hg clone . ../copy
644 $ hg clone . ../copy
645 updating to branch default
645 updating to branch default
646 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
646 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
647 $ hg tag foo
647 $ hg tag foo
648
648
649 $ cd ../copy
649 $ cd ../copy
650 $ echo >> foo
650 $ echo >> foo
651 $ hg ci -m 'change foo'
651 $ hg ci -m 'change foo'
652 $ hg bundle ../bundle.hg ../orig
652 $ hg bundle ../bundle.hg ../orig
653 searching for changes
653 searching for changes
654 1 changesets found
654 1 changesets found
655
655
656 $ cd ..
656 $ cd ..
657
657
658 #if repobundlerepo
658 #if repobundlerepo
659 $ cd orig
659 $ cd orig
660 $ hg incoming ../bundle.hg
660 $ hg incoming ../bundle.hg
661 comparing with ../bundle.hg
661 comparing with ../bundle.hg
662 searching for changes
662 searching for changes
663 changeset: 2:ed1b79f46b9a
663 changeset: 2:ed1b79f46b9a
664 tag: tip
664 tag: tip
665 parent: 0:bbd179dfa0a7
665 parent: 0:bbd179dfa0a7
666 user: test
666 user: test
667 date: Thu Jan 01 00:00:00 1970 +0000
667 date: Thu Jan 01 00:00:00 1970 +0000
668 summary: change foo
668 summary: change foo
669
669
670 $ cd ..
670 $ cd ..
671
671
672 test bundle with # in the filename (issue2154):
672 test bundle with # in the filename (issue2154):
673
673
674 $ cp bundle.hg 'test#bundle.hg'
674 $ cp bundle.hg 'test#bundle.hg'
675 $ cd orig
675 $ cd orig
676 $ hg incoming '../test#bundle.hg'
676 $ hg incoming '../test#bundle.hg'
677 comparing with ../test
677 comparing with ../test
678 abort: unknown revision 'bundle.hg'!
678 abort: unknown revision 'bundle.hg'!
679 [255]
679 [255]
680
680
681 note that percent encoding is not handled:
681 note that percent encoding is not handled:
682
682
683 $ hg incoming ../test%23bundle.hg
683 $ hg incoming ../test%23bundle.hg
684 abort: repository ../test%23bundle.hg not found!
684 abort: repository ../test%23bundle.hg not found!
685 [255]
685 [255]
686 $ cd ..
686 $ cd ..
687
687
688 #endif
688 #endif
689
689
690 test to bundle revisions on the newly created branch (issue3828):
690 test to bundle revisions on the newly created branch (issue3828):
691
691
692 $ hg -q clone -U test test-clone
692 $ hg -q clone -U test test-clone
693 $ cd test
693 $ cd test
694
694
695 $ hg -q branch foo
695 $ hg -q branch foo
696 $ hg commit -m "create foo branch"
696 $ hg commit -m "create foo branch"
697 $ hg -q outgoing ../test-clone
697 $ hg -q outgoing ../test-clone
698 9:b4f5acb1ee27
698 9:b4f5acb1ee27
699 $ hg -q bundle --branch foo foo.hg ../test-clone
699 $ hg -q bundle --branch foo foo.hg ../test-clone
700 #if repobundlerepo
700 #if repobundlerepo
701 $ hg -R foo.hg -q log -r "bundle()"
701 $ hg -R foo.hg -q log -r "bundle()"
702 9:b4f5acb1ee27
702 9:b4f5acb1ee27
703 #endif
703 #endif
704
704
705 $ cd ..
705 $ cd ..
706
706
707 test for https://bz.mercurial-scm.org/1144
707 test for https://bz.mercurial-scm.org/1144
708
708
709 test that verify bundle does not traceback
709 test that verify bundle does not traceback
710
710
711 partial history bundle, fails w/ unknown parent
711 partial history bundle, fails w/ unknown parent
712
712
713 $ hg -R bundle.hg verify
713 $ hg -R bundle.hg verify
714 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
714 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
715 [255]
715 [255]
716
716
717 full history bundle, refuses to verify non-local repo
717 full history bundle, refuses to verify non-local repo
718
718
719 #if repobundlerepo
719 #if repobundlerepo
720 $ hg -R all.hg verify
720 $ hg -R all.hg verify
721 abort: cannot verify bundle or remote repos
721 abort: cannot verify bundle or remote repos
722 [255]
722 [255]
723 #endif
723 #endif
724
724
725 but, regular verify must continue to work
725 but, regular verify must continue to work
726
726
727 $ hg -R orig verify
727 $ hg -R orig verify
728 checking changesets
728 checking changesets
729 checking manifests
729 checking manifests
730 crosschecking files in changesets and manifests
730 crosschecking files in changesets and manifests
731 checking files
731 checking files
732 checked 2 changesets with 2 changes to 2 files
732 checked 2 changesets with 2 changes to 2 files
733
733
734 #if repobundlerepo
734 #if repobundlerepo
735 diff against bundle
735 diff against bundle
736
736
737 $ hg init b
737 $ hg init b
738 $ cd b
738 $ cd b
739 $ hg -R ../all.hg diff -r tip
739 $ hg -R ../all.hg diff -r tip
740 diff -r aa35859c02ea anotherfile
740 diff -r aa35859c02ea anotherfile
741 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
741 --- a/anotherfile Thu Jan 01 00:00:00 1970 +0000
742 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
742 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
743 @@ -1,4 +0,0 @@
743 @@ -1,4 +0,0 @@
744 -0
744 -0
745 -1
745 -1
746 -2
746 -2
747 -3
747 -3
748 $ cd ..
748 $ cd ..
749 #endif
749 #endif
750
750
751 bundle single branch
751 bundle single branch
752
752
753 $ hg init branchy
753 $ hg init branchy
754 $ cd branchy
754 $ cd branchy
755 $ echo a >a
755 $ echo a >a
756 $ echo x >x
756 $ echo x >x
757 $ hg ci -Ama
757 $ hg ci -Ama
758 adding a
758 adding a
759 adding x
759 adding x
760 $ echo c >c
760 $ echo c >c
761 $ echo xx >x
761 $ echo xx >x
762 $ hg ci -Amc
762 $ hg ci -Amc
763 adding c
763 adding c
764 $ echo c1 >c1
764 $ echo c1 >c1
765 $ hg ci -Amc1
765 $ hg ci -Amc1
766 adding c1
766 adding c1
767 $ hg up 0
767 $ hg up 0
768 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
768 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
769 $ echo b >b
769 $ echo b >b
770 $ hg ci -Amb
770 $ hg ci -Amb
771 adding b
771 adding b
772 created new head
772 created new head
773 $ echo b1 >b1
773 $ echo b1 >b1
774 $ echo xx >x
774 $ echo xx >x
775 $ hg ci -Amb1
775 $ hg ci -Amb1
776 adding b1
776 adding b1
777 $ hg clone -q -r2 . part
777 $ hg clone -q -r2 . part
778
778
779 == bundling via incoming
779 == bundling via incoming
780
780
781 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
781 $ hg in -R part --bundle incoming.hg --template "{node}\n" .
782 comparing with .
782 comparing with .
783 searching for changes
783 searching for changes
784 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
784 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
785 057f4db07f61970e1c11e83be79e9d08adc4dc31
785 057f4db07f61970e1c11e83be79e9d08adc4dc31
786
786
787 == bundling
787 == bundling
788
788
789 $ hg bundle bundle.hg part --debug --config progress.debug=true
789 $ hg bundle bundle.hg part --debug --config progress.debug=true
790 query 1; heads
790 query 1; heads
791 searching for changes
791 searching for changes
792 all remote heads known locally
792 all remote heads known locally
793 2 changesets found
793 2 changesets found
794 list of changesets:
794 list of changesets:
795 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
795 1a38c1b849e8b70c756d2d80b0b9a3ac0b7ea11a
796 057f4db07f61970e1c11e83be79e9d08adc4dc31
796 057f4db07f61970e1c11e83be79e9d08adc4dc31
797 bundle2-output-bundle: "HG20", (1 params) 2 parts total
797 bundle2-output-bundle: "HG20", (1 params) 2 parts total
798 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
798 bundle2-output-part: "changegroup" (params: 1 mandatory 1 advisory) streamed payload
799 changesets: 1/2 chunks (50.00%)
799 changesets: 1/2 chunks (50.00%)
800 changesets: 2/2 chunks (100.00%)
800 changesets: 2/2 chunks (100.00%)
801 manifests: 1/2 chunks (50.00%)
801 manifests: 1/2 chunks (50.00%)
802 manifests: 2/2 chunks (100.00%)
802 manifests: 2/2 chunks (100.00%)
803 files: b 1/3 files (33.33%)
803 files: b 1/3 files (33.33%)
804 files: b1 2/3 files (66.67%)
804 files: b1 2/3 files (66.67%)
805 files: x 3/3 files (100.00%)
805 files: x 3/3 files (100.00%)
806 bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
806 bundle2-output-part: "cache:rev-branch-cache" (advisory) streamed payload
807
807
808 #if repobundlerepo
808 #if repobundlerepo
809 == Test for issue3441
809 == Test for issue3441
810
810
811 $ hg clone -q -r0 . part2
811 $ hg clone -q -r0 . part2
812 $ hg -q -R part2 pull bundle.hg
812 $ hg -q -R part2 pull bundle.hg
813 $ hg -R part2 verify
813 $ hg -R part2 verify
814 checking changesets
814 checking changesets
815 checking manifests
815 checking manifests
816 crosschecking files in changesets and manifests
816 crosschecking files in changesets and manifests
817 checking files
817 checking files
818 checked 3 changesets with 5 changes to 4 files
818 checked 3 changesets with 5 changes to 4 files
819 #endif
819 #endif
820
820
821 == Test bundling no commits
821 == Test bundling no commits
822
822
823 $ hg bundle -r 'public()' no-output.hg
823 $ hg bundle -r 'public()' no-output.hg
824 abort: no commits to bundle
824 abort: no commits to bundle
825 [255]
825 [255]
826
826
827 $ cd ..
827 $ cd ..
828
828
829 When user merges to the revision existing only in the bundle,
829 When user merges to the revision existing only in the bundle,
830 it should show warning that second parent of the working
830 it should show warning that second parent of the working
831 directory does not exist
831 directory does not exist
832
832
833 $ hg init update2bundled
833 $ hg init update2bundled
834 $ cd update2bundled
834 $ cd update2bundled
835 $ cat <<EOF >> .hg/hgrc
835 $ cat <<EOF >> .hg/hgrc
836 > [extensions]
836 > [extensions]
837 > strip =
837 > strip =
838 > EOF
838 > EOF
839 $ echo "aaa" >> a
839 $ echo "aaa" >> a
840 $ hg commit -A -m 0
840 $ hg commit -A -m 0
841 adding a
841 adding a
842 $ echo "bbb" >> b
842 $ echo "bbb" >> b
843 $ hg commit -A -m 1
843 $ hg commit -A -m 1
844 adding b
844 adding b
845 $ echo "ccc" >> c
845 $ echo "ccc" >> c
846 $ hg commit -A -m 2
846 $ hg commit -A -m 2
847 adding c
847 adding c
848 $ hg update -r 1
848 $ hg update -r 1
849 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
849 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
850 $ echo "ddd" >> d
850 $ echo "ddd" >> d
851 $ hg commit -A -m 3
851 $ hg commit -A -m 3
852 adding d
852 adding d
853 created new head
853 created new head
854 $ hg update -r 2
854 $ hg update -r 2
855 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
855 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
856 $ hg log -G
856 $ hg log -G
857 o changeset: 3:8bd3e1f196af
857 o changeset: 3:8bd3e1f196af
858 | tag: tip
858 | tag: tip
859 | parent: 1:a01eca7af26d
859 | parent: 1:a01eca7af26d
860 | user: test
860 | user: test
861 | date: Thu Jan 01 00:00:00 1970 +0000
861 | date: Thu Jan 01 00:00:00 1970 +0000
862 | summary: 3
862 | summary: 3
863 |
863 |
864 | @ changeset: 2:4652c276ac4f
864 | @ changeset: 2:4652c276ac4f
865 |/ user: test
865 |/ user: test
866 | date: Thu Jan 01 00:00:00 1970 +0000
866 | date: Thu Jan 01 00:00:00 1970 +0000
867 | summary: 2
867 | summary: 2
868 |
868 |
869 o changeset: 1:a01eca7af26d
869 o changeset: 1:a01eca7af26d
870 | user: test
870 | user: test
871 | date: Thu Jan 01 00:00:00 1970 +0000
871 | date: Thu Jan 01 00:00:00 1970 +0000
872 | summary: 1
872 | summary: 1
873 |
873 |
874 o changeset: 0:4fe08cd4693e
874 o changeset: 0:4fe08cd4693e
875 user: test
875 user: test
876 date: Thu Jan 01 00:00:00 1970 +0000
876 date: Thu Jan 01 00:00:00 1970 +0000
877 summary: 0
877 summary: 0
878
878
879
879
880 #if repobundlerepo
880 #if repobundlerepo
881 $ hg bundle --base 1 -r 3 ../update2bundled.hg
881 $ hg bundle --base 1 -r 3 ../update2bundled.hg
882 1 changesets found
882 1 changesets found
883 $ hg strip -r 3
883 $ hg strip -r 3
884 saved backup bundle to $TESTTMP/update2bundled/.hg/strip-backup/8bd3e1f196af-017e56d8-backup.hg
884 saved backup bundle to $TESTTMP/update2bundled/.hg/strip-backup/8bd3e1f196af-017e56d8-backup.hg
885 $ hg merge -R ../update2bundled.hg -r 3
885 $ hg merge -R ../update2bundled.hg -r 3
886 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
886 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
887 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
887 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
888 (branch merge, don't forget to commit)
888 (branch merge, don't forget to commit)
889
889
890 When user updates to the revision existing only in the bundle,
890 When user updates to the revision existing only in the bundle,
891 it should show warning
891 it should show warning
892
892
893 $ hg update -R ../update2bundled.hg --clean -r 3
893 $ hg update -R ../update2bundled.hg --clean -r 3
894 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
894 setting parent to node 8bd3e1f196af289b2b121be08031e76d7ae92098 that only exists in the bundle
895 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
895 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
896
896
897 When user updates to the revision existing in the local repository
897 When user updates to the revision existing in the local repository
898 the warning shouldn't be emitted
898 the warning shouldn't be emitted
899
899
900 $ hg update -R ../update2bundled.hg -r 0
900 $ hg update -R ../update2bundled.hg -r 0
901 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
901 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
902 #endif
902 #endif
903
904 Test the option that create slim bundle
905
906 $ hg bundle -a --config devel.bundle.delta=p1 ./slim.hg
907 3 changesets found
General Comments 0
You need to be logged in to leave comments. Login now