##// END OF EJS Templates
changegroup: fix treemanifests on merges...
Martin von Zweigbergk -
r28240:1ac8ce13 default
parent child Browse files
Show More
@@ -1,1163 +1,1130 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import tempfile
12 import tempfile
13 import weakref
13 import weakref
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 short,
20 short,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 branchmap,
24 branchmap,
25 dagutil,
25 dagutil,
26 discovery,
26 discovery,
27 error,
27 error,
28 mdiff,
28 mdiff,
29 phases,
29 phases,
30 util,
30 util,
31 )
31 )
32
32
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
33 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
34 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
35 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
36
36
37 def readexactly(stream, n):
37 def readexactly(stream, n):
38 '''read n bytes from stream.read and abort if less was available'''
38 '''read n bytes from stream.read and abort if less was available'''
39 s = stream.read(n)
39 s = stream.read(n)
40 if len(s) < n:
40 if len(s) < n:
41 raise error.Abort(_("stream ended unexpectedly"
41 raise error.Abort(_("stream ended unexpectedly"
42 " (got %d bytes, expected %d)")
42 " (got %d bytes, expected %d)")
43 % (len(s), n))
43 % (len(s), n))
44 return s
44 return s
45
45
46 def getchunk(stream):
46 def getchunk(stream):
47 """return the next chunk from stream as a string"""
47 """return the next chunk from stream as a string"""
48 d = readexactly(stream, 4)
48 d = readexactly(stream, 4)
49 l = struct.unpack(">l", d)[0]
49 l = struct.unpack(">l", d)[0]
50 if l <= 4:
50 if l <= 4:
51 if l:
51 if l:
52 raise error.Abort(_("invalid chunk length %d") % l)
52 raise error.Abort(_("invalid chunk length %d") % l)
53 return ""
53 return ""
54 return readexactly(stream, l - 4)
54 return readexactly(stream, l - 4)
55
55
56 def chunkheader(length):
56 def chunkheader(length):
57 """return a changegroup chunk header (string)"""
57 """return a changegroup chunk header (string)"""
58 return struct.pack(">l", length + 4)
58 return struct.pack(">l", length + 4)
59
59
60 def closechunk():
60 def closechunk():
61 """return a changegroup chunk header (string) for a zero-length chunk"""
61 """return a changegroup chunk header (string) for a zero-length chunk"""
62 return struct.pack(">l", 0)
62 return struct.pack(">l", 0)
63
63
64 def combineresults(results):
64 def combineresults(results):
65 """logic to combine 0 or more addchangegroup results into one"""
65 """logic to combine 0 or more addchangegroup results into one"""
66 changedheads = 0
66 changedheads = 0
67 result = 1
67 result = 1
68 for ret in results:
68 for ret in results:
69 # If any changegroup result is 0, return 0
69 # If any changegroup result is 0, return 0
70 if ret == 0:
70 if ret == 0:
71 result = 0
71 result = 0
72 break
72 break
73 if ret < -1:
73 if ret < -1:
74 changedheads += ret + 1
74 changedheads += ret + 1
75 elif ret > 1:
75 elif ret > 1:
76 changedheads += ret - 1
76 changedheads += ret - 1
77 if changedheads > 0:
77 if changedheads > 0:
78 result = 1 + changedheads
78 result = 1 + changedheads
79 elif changedheads < 0:
79 elif changedheads < 0:
80 result = -1 + changedheads
80 result = -1 + changedheads
81 return result
81 return result
82
82
83 bundletypes = {
83 bundletypes = {
84 "": ("", None), # only when using unbundle on ssh and old http servers
84 "": ("", None), # only when using unbundle on ssh and old http servers
85 # since the unification ssh accepts a header but there
85 # since the unification ssh accepts a header but there
86 # is no capability signaling it.
86 # is no capability signaling it.
87 "HG20": (), # special-cased below
87 "HG20": (), # special-cased below
88 "HG10UN": ("HG10UN", None),
88 "HG10UN": ("HG10UN", None),
89 "HG10BZ": ("HG10", 'BZ'),
89 "HG10BZ": ("HG10", 'BZ'),
90 "HG10GZ": ("HG10GZ", 'GZ'),
90 "HG10GZ": ("HG10GZ", 'GZ'),
91 }
91 }
92
92
93 # hgweb uses this list to communicate its preferred type
93 # hgweb uses this list to communicate its preferred type
94 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
94 bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
95
95
96 def writechunks(ui, chunks, filename, vfs=None):
96 def writechunks(ui, chunks, filename, vfs=None):
97 """Write chunks to a file and return its filename.
97 """Write chunks to a file and return its filename.
98
98
99 The stream is assumed to be a bundle file.
99 The stream is assumed to be a bundle file.
100 Existing files will not be overwritten.
100 Existing files will not be overwritten.
101 If no filename is specified, a temporary file is created.
101 If no filename is specified, a temporary file is created.
102 """
102 """
103 fh = None
103 fh = None
104 cleanup = None
104 cleanup = None
105 try:
105 try:
106 if filename:
106 if filename:
107 if vfs:
107 if vfs:
108 fh = vfs.open(filename, "wb")
108 fh = vfs.open(filename, "wb")
109 else:
109 else:
110 fh = open(filename, "wb")
110 fh = open(filename, "wb")
111 else:
111 else:
112 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
112 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
113 fh = os.fdopen(fd, "wb")
113 fh = os.fdopen(fd, "wb")
114 cleanup = filename
114 cleanup = filename
115 for c in chunks:
115 for c in chunks:
116 fh.write(c)
116 fh.write(c)
117 cleanup = None
117 cleanup = None
118 return filename
118 return filename
119 finally:
119 finally:
120 if fh is not None:
120 if fh is not None:
121 fh.close()
121 fh.close()
122 if cleanup is not None:
122 if cleanup is not None:
123 if filename and vfs:
123 if filename and vfs:
124 vfs.unlink(cleanup)
124 vfs.unlink(cleanup)
125 else:
125 else:
126 os.unlink(cleanup)
126 os.unlink(cleanup)
127
127
128 def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
128 def writebundle(ui, cg, filename, bundletype, vfs=None, compression=None):
129 """Write a bundle file and return its filename.
129 """Write a bundle file and return its filename.
130
130
131 Existing files will not be overwritten.
131 Existing files will not be overwritten.
132 If no filename is specified, a temporary file is created.
132 If no filename is specified, a temporary file is created.
133 bz2 compression can be turned off.
133 bz2 compression can be turned off.
134 The bundle file will be deleted in case of errors.
134 The bundle file will be deleted in case of errors.
135 """
135 """
136
136
137 if bundletype == "HG20":
137 if bundletype == "HG20":
138 from . import bundle2
138 from . import bundle2
139 bundle = bundle2.bundle20(ui)
139 bundle = bundle2.bundle20(ui)
140 bundle.setcompression(compression)
140 bundle.setcompression(compression)
141 part = bundle.newpart('changegroup', data=cg.getchunks())
141 part = bundle.newpart('changegroup', data=cg.getchunks())
142 part.addparam('version', cg.version)
142 part.addparam('version', cg.version)
143 chunkiter = bundle.getchunks()
143 chunkiter = bundle.getchunks()
144 else:
144 else:
145 # compression argument is only for the bundle2 case
145 # compression argument is only for the bundle2 case
146 assert compression is None
146 assert compression is None
147 if cg.version != '01':
147 if cg.version != '01':
148 raise error.Abort(_('old bundle types only supports v1 '
148 raise error.Abort(_('old bundle types only supports v1 '
149 'changegroups'))
149 'changegroups'))
150 header, comp = bundletypes[bundletype]
150 header, comp = bundletypes[bundletype]
151 if comp not in util.compressors:
151 if comp not in util.compressors:
152 raise error.Abort(_('unknown stream compression type: %s')
152 raise error.Abort(_('unknown stream compression type: %s')
153 % comp)
153 % comp)
154 z = util.compressors[comp]()
154 z = util.compressors[comp]()
155 subchunkiter = cg.getchunks()
155 subchunkiter = cg.getchunks()
156 def chunkiter():
156 def chunkiter():
157 yield header
157 yield header
158 for chunk in subchunkiter:
158 for chunk in subchunkiter:
159 yield z.compress(chunk)
159 yield z.compress(chunk)
160 yield z.flush()
160 yield z.flush()
161 chunkiter = chunkiter()
161 chunkiter = chunkiter()
162
162
163 # parse the changegroup data, otherwise we will block
163 # parse the changegroup data, otherwise we will block
164 # in case of sshrepo because we don't know the end of the stream
164 # in case of sshrepo because we don't know the end of the stream
165
165
166 # an empty chunkgroup is the end of the changegroup
166 # an empty chunkgroup is the end of the changegroup
167 # a changegroup has at least 2 chunkgroups (changelog and manifest).
167 # a changegroup has at least 2 chunkgroups (changelog and manifest).
168 # after that, an empty chunkgroup is the end of the changegroup
168 # after that, an empty chunkgroup is the end of the changegroup
169 return writechunks(ui, chunkiter, filename, vfs=vfs)
169 return writechunks(ui, chunkiter, filename, vfs=vfs)
170
170
171 class cg1unpacker(object):
171 class cg1unpacker(object):
172 """Unpacker for cg1 changegroup streams.
172 """Unpacker for cg1 changegroup streams.
173
173
174 A changegroup unpacker handles the framing of the revision data in
174 A changegroup unpacker handles the framing of the revision data in
175 the wire format. Most consumers will want to use the apply()
175 the wire format. Most consumers will want to use the apply()
176 method to add the changes from the changegroup to a repository.
176 method to add the changes from the changegroup to a repository.
177
177
178 If you're forwarding a changegroup unmodified to another consumer,
178 If you're forwarding a changegroup unmodified to another consumer,
179 use getchunks(), which returns an iterator of changegroup
179 use getchunks(), which returns an iterator of changegroup
180 chunks. This is mostly useful for cases where you need to know the
180 chunks. This is mostly useful for cases where you need to know the
181 data stream has ended by observing the end of the changegroup.
181 data stream has ended by observing the end of the changegroup.
182
182
183 deltachunk() is useful only if you're applying delta data. Most
183 deltachunk() is useful only if you're applying delta data. Most
184 consumers should prefer apply() instead.
184 consumers should prefer apply() instead.
185
185
186 A few other public methods exist. Those are used only for
186 A few other public methods exist. Those are used only for
187 bundlerepo and some debug commands - their use is discouraged.
187 bundlerepo and some debug commands - their use is discouraged.
188 """
188 """
189 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
189 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
190 deltaheadersize = struct.calcsize(deltaheader)
190 deltaheadersize = struct.calcsize(deltaheader)
191 version = '01'
191 version = '01'
192 _grouplistcount = 1 # One list of files after the manifests
192 _grouplistcount = 1 # One list of files after the manifests
193
193
194 def __init__(self, fh, alg):
194 def __init__(self, fh, alg):
195 if alg == 'UN':
195 if alg == 'UN':
196 alg = None # get more modern without breaking too much
196 alg = None # get more modern without breaking too much
197 if not alg in util.decompressors:
197 if not alg in util.decompressors:
198 raise error.Abort(_('unknown stream compression type: %s')
198 raise error.Abort(_('unknown stream compression type: %s')
199 % alg)
199 % alg)
200 if alg == 'BZ':
200 if alg == 'BZ':
201 alg = '_truncatedBZ'
201 alg = '_truncatedBZ'
202 self._stream = util.decompressors[alg](fh)
202 self._stream = util.decompressors[alg](fh)
203 self._type = alg
203 self._type = alg
204 self.callback = None
204 self.callback = None
205
205
206 # These methods (compressed, read, seek, tell) all appear to only
206 # These methods (compressed, read, seek, tell) all appear to only
207 # be used by bundlerepo, but it's a little hard to tell.
207 # be used by bundlerepo, but it's a little hard to tell.
208 def compressed(self):
208 def compressed(self):
209 return self._type is not None
209 return self._type is not None
210 def read(self, l):
210 def read(self, l):
211 return self._stream.read(l)
211 return self._stream.read(l)
212 def seek(self, pos):
212 def seek(self, pos):
213 return self._stream.seek(pos)
213 return self._stream.seek(pos)
214 def tell(self):
214 def tell(self):
215 return self._stream.tell()
215 return self._stream.tell()
216 def close(self):
216 def close(self):
217 return self._stream.close()
217 return self._stream.close()
218
218
219 def _chunklength(self):
219 def _chunklength(self):
220 d = readexactly(self._stream, 4)
220 d = readexactly(self._stream, 4)
221 l = struct.unpack(">l", d)[0]
221 l = struct.unpack(">l", d)[0]
222 if l <= 4:
222 if l <= 4:
223 if l:
223 if l:
224 raise error.Abort(_("invalid chunk length %d") % l)
224 raise error.Abort(_("invalid chunk length %d") % l)
225 return 0
225 return 0
226 if self.callback:
226 if self.callback:
227 self.callback()
227 self.callback()
228 return l - 4
228 return l - 4
229
229
230 def changelogheader(self):
230 def changelogheader(self):
231 """v10 does not have a changelog header chunk"""
231 """v10 does not have a changelog header chunk"""
232 return {}
232 return {}
233
233
234 def manifestheader(self):
234 def manifestheader(self):
235 """v10 does not have a manifest header chunk"""
235 """v10 does not have a manifest header chunk"""
236 return {}
236 return {}
237
237
238 def filelogheader(self):
238 def filelogheader(self):
239 """return the header of the filelogs chunk, v10 only has the filename"""
239 """return the header of the filelogs chunk, v10 only has the filename"""
240 l = self._chunklength()
240 l = self._chunklength()
241 if not l:
241 if not l:
242 return {}
242 return {}
243 fname = readexactly(self._stream, l)
243 fname = readexactly(self._stream, l)
244 return {'filename': fname}
244 return {'filename': fname}
245
245
246 def _deltaheader(self, headertuple, prevnode):
246 def _deltaheader(self, headertuple, prevnode):
247 node, p1, p2, cs = headertuple
247 node, p1, p2, cs = headertuple
248 if prevnode is None:
248 if prevnode is None:
249 deltabase = p1
249 deltabase = p1
250 else:
250 else:
251 deltabase = prevnode
251 deltabase = prevnode
252 flags = 0
252 flags = 0
253 return node, p1, p2, deltabase, cs, flags
253 return node, p1, p2, deltabase, cs, flags
254
254
255 def deltachunk(self, prevnode):
255 def deltachunk(self, prevnode):
256 l = self._chunklength()
256 l = self._chunklength()
257 if not l:
257 if not l:
258 return {}
258 return {}
259 headerdata = readexactly(self._stream, self.deltaheadersize)
259 headerdata = readexactly(self._stream, self.deltaheadersize)
260 header = struct.unpack(self.deltaheader, headerdata)
260 header = struct.unpack(self.deltaheader, headerdata)
261 delta = readexactly(self._stream, l - self.deltaheadersize)
261 delta = readexactly(self._stream, l - self.deltaheadersize)
262 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
262 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
263 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
263 return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
264 'deltabase': deltabase, 'delta': delta, 'flags': flags}
264 'deltabase': deltabase, 'delta': delta, 'flags': flags}
265
265
266 def getchunks(self):
266 def getchunks(self):
267 """returns all the chunks contains in the bundle
267 """returns all the chunks contains in the bundle
268
268
269 Used when you need to forward the binary stream to a file or another
269 Used when you need to forward the binary stream to a file or another
270 network API. To do so, it parse the changegroup data, otherwise it will
270 network API. To do so, it parse the changegroup data, otherwise it will
271 block in case of sshrepo because it don't know the end of the stream.
271 block in case of sshrepo because it don't know the end of the stream.
272 """
272 """
273 # an empty chunkgroup is the end of the changegroup
273 # an empty chunkgroup is the end of the changegroup
274 # a changegroup has at least 2 chunkgroups (changelog and manifest).
274 # a changegroup has at least 2 chunkgroups (changelog and manifest).
275 # after that, changegroup versions 1 and 2 have a series of groups
275 # after that, changegroup versions 1 and 2 have a series of groups
276 # with one group per file. changegroup 3 has a series of directory
276 # with one group per file. changegroup 3 has a series of directory
277 # manifests before the files.
277 # manifests before the files.
278 count = 0
278 count = 0
279 emptycount = 0
279 emptycount = 0
280 while emptycount < self._grouplistcount:
280 while emptycount < self._grouplistcount:
281 empty = True
281 empty = True
282 count += 1
282 count += 1
283 while True:
283 while True:
284 chunk = getchunk(self)
284 chunk = getchunk(self)
285 if not chunk:
285 if not chunk:
286 if empty and count > 2:
286 if empty and count > 2:
287 emptycount += 1
287 emptycount += 1
288 break
288 break
289 empty = False
289 empty = False
290 yield chunkheader(len(chunk))
290 yield chunkheader(len(chunk))
291 pos = 0
291 pos = 0
292 while pos < len(chunk):
292 while pos < len(chunk):
293 next = pos + 2**20
293 next = pos + 2**20
294 yield chunk[pos:next]
294 yield chunk[pos:next]
295 pos = next
295 pos = next
296 yield closechunk()
296 yield closechunk()
297
297
298 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
298 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
299 # We know that we'll never have more manifests than we had
299 # We know that we'll never have more manifests than we had
300 # changesets.
300 # changesets.
301 self.callback = prog(_('manifests'), numchanges)
301 self.callback = prog(_('manifests'), numchanges)
302 # no need to check for empty manifest group here:
302 # no need to check for empty manifest group here:
303 # if the result of the merge of 1 and 2 is the same in 3 and 4,
303 # if the result of the merge of 1 and 2 is the same in 3 and 4,
304 # no new manifest will be created and the manifest group will
304 # no new manifest will be created and the manifest group will
305 # be empty during the pull
305 # be empty during the pull
306 self.manifestheader()
306 self.manifestheader()
307 repo.manifest.addgroup(self, revmap, trp)
307 repo.manifest.addgroup(self, revmap, trp)
308 repo.ui.progress(_('manifests'), None)
308 repo.ui.progress(_('manifests'), None)
309
309
310 def apply(self, repo, srctype, url, emptyok=False,
310 def apply(self, repo, srctype, url, emptyok=False,
311 targetphase=phases.draft, expectedtotal=None):
311 targetphase=phases.draft, expectedtotal=None):
312 """Add the changegroup returned by source.read() to this repo.
312 """Add the changegroup returned by source.read() to this repo.
313 srctype is a string like 'push', 'pull', or 'unbundle'. url is
313 srctype is a string like 'push', 'pull', or 'unbundle'. url is
314 the URL of the repo where this changegroup is coming from.
314 the URL of the repo where this changegroup is coming from.
315
315
316 Return an integer summarizing the change to this repo:
316 Return an integer summarizing the change to this repo:
317 - nothing changed or no source: 0
317 - nothing changed or no source: 0
318 - more heads than before: 1+added heads (2..n)
318 - more heads than before: 1+added heads (2..n)
319 - fewer heads than before: -1-removed heads (-2..-n)
319 - fewer heads than before: -1-removed heads (-2..-n)
320 - number of heads stays the same: 1
320 - number of heads stays the same: 1
321 """
321 """
322 repo = repo.unfiltered()
322 repo = repo.unfiltered()
323 def csmap(x):
323 def csmap(x):
324 repo.ui.debug("add changeset %s\n" % short(x))
324 repo.ui.debug("add changeset %s\n" % short(x))
325 return len(cl)
325 return len(cl)
326
326
327 def revmap(x):
327 def revmap(x):
328 return cl.rev(x)
328 return cl.rev(x)
329
329
330 changesets = files = revisions = 0
330 changesets = files = revisions = 0
331
331
332 try:
332 try:
333 with repo.transaction("\n".join([srctype,
333 with repo.transaction("\n".join([srctype,
334 util.hidepassword(url)])) as tr:
334 util.hidepassword(url)])) as tr:
335 # The transaction could have been created before and already
335 # The transaction could have been created before and already
336 # carries source information. In this case we use the top
336 # carries source information. In this case we use the top
337 # level data. We overwrite the argument because we need to use
337 # level data. We overwrite the argument because we need to use
338 # the top level value (if they exist) in this function.
338 # the top level value (if they exist) in this function.
339 srctype = tr.hookargs.setdefault('source', srctype)
339 srctype = tr.hookargs.setdefault('source', srctype)
340 url = tr.hookargs.setdefault('url', url)
340 url = tr.hookargs.setdefault('url', url)
341 repo.hook('prechangegroup', throw=True, **tr.hookargs)
341 repo.hook('prechangegroup', throw=True, **tr.hookargs)
342
342
343 # write changelog data to temp files so concurrent readers
343 # write changelog data to temp files so concurrent readers
344 # will not see an inconsistent view
344 # will not see an inconsistent view
345 cl = repo.changelog
345 cl = repo.changelog
346 cl.delayupdate(tr)
346 cl.delayupdate(tr)
347 oldheads = cl.heads()
347 oldheads = cl.heads()
348
348
349 trp = weakref.proxy(tr)
349 trp = weakref.proxy(tr)
350 # pull off the changeset group
350 # pull off the changeset group
351 repo.ui.status(_("adding changesets\n"))
351 repo.ui.status(_("adding changesets\n"))
352 clstart = len(cl)
352 clstart = len(cl)
353 class prog(object):
353 class prog(object):
354 def __init__(self, step, total):
354 def __init__(self, step, total):
355 self._step = step
355 self._step = step
356 self._total = total
356 self._total = total
357 self._count = 1
357 self._count = 1
358 def __call__(self):
358 def __call__(self):
359 repo.ui.progress(self._step, self._count,
359 repo.ui.progress(self._step, self._count,
360 unit=_('chunks'), total=self._total)
360 unit=_('chunks'), total=self._total)
361 self._count += 1
361 self._count += 1
362 self.callback = prog(_('changesets'), expectedtotal)
362 self.callback = prog(_('changesets'), expectedtotal)
363
363
364 efiles = set()
364 efiles = set()
365 def onchangelog(cl, node):
365 def onchangelog(cl, node):
366 efiles.update(cl.read(node)[3])
366 efiles.update(cl.read(node)[3])
367
367
368 self.changelogheader()
368 self.changelogheader()
369 srccontent = cl.addgroup(self, csmap, trp,
369 srccontent = cl.addgroup(self, csmap, trp,
370 addrevisioncb=onchangelog)
370 addrevisioncb=onchangelog)
371 efiles = len(efiles)
371 efiles = len(efiles)
372
372
373 if not (srccontent or emptyok):
373 if not (srccontent or emptyok):
374 raise error.Abort(_("received changelog group is empty"))
374 raise error.Abort(_("received changelog group is empty"))
375 clend = len(cl)
375 clend = len(cl)
376 changesets = clend - clstart
376 changesets = clend - clstart
377 repo.ui.progress(_('changesets'), None)
377 repo.ui.progress(_('changesets'), None)
378
378
379 # pull off the manifest group
379 # pull off the manifest group
380 repo.ui.status(_("adding manifests\n"))
380 repo.ui.status(_("adding manifests\n"))
381 self._unpackmanifests(repo, revmap, trp, prog, changesets)
381 self._unpackmanifests(repo, revmap, trp, prog, changesets)
382
382
383 needfiles = {}
383 needfiles = {}
384 if repo.ui.configbool('server', 'validate', default=False):
384 if repo.ui.configbool('server', 'validate', default=False):
385 # validate incoming csets have their manifests
385 # validate incoming csets have their manifests
386 for cset in xrange(clstart, clend):
386 for cset in xrange(clstart, clend):
387 mfnode = repo.changelog.read(
387 mfnode = repo.changelog.read(
388 repo.changelog.node(cset))[0]
388 repo.changelog.node(cset))[0]
389 mfest = repo.manifest.readdelta(mfnode)
389 mfest = repo.manifest.readdelta(mfnode)
390 # store file nodes we must see
390 # store file nodes we must see
391 for f, n in mfest.iteritems():
391 for f, n in mfest.iteritems():
392 needfiles.setdefault(f, set()).add(n)
392 needfiles.setdefault(f, set()).add(n)
393
393
394 # process the files
394 # process the files
395 repo.ui.status(_("adding file changes\n"))
395 repo.ui.status(_("adding file changes\n"))
396 self.callback = None
396 self.callback = None
397 pr = prog(_('files'), efiles)
397 pr = prog(_('files'), efiles)
398 newrevs, newfiles = _addchangegroupfiles(
398 newrevs, newfiles = _addchangegroupfiles(
399 repo, self, revmap, trp, pr, needfiles)
399 repo, self, revmap, trp, pr, needfiles)
400 revisions += newrevs
400 revisions += newrevs
401 files += newfiles
401 files += newfiles
402
402
403 dh = 0
403 dh = 0
404 if oldheads:
404 if oldheads:
405 heads = cl.heads()
405 heads = cl.heads()
406 dh = len(heads) - len(oldheads)
406 dh = len(heads) - len(oldheads)
407 for h in heads:
407 for h in heads:
408 if h not in oldheads and repo[h].closesbranch():
408 if h not in oldheads and repo[h].closesbranch():
409 dh -= 1
409 dh -= 1
410 htext = ""
410 htext = ""
411 if dh:
411 if dh:
412 htext = _(" (%+d heads)") % dh
412 htext = _(" (%+d heads)") % dh
413
413
414 repo.ui.status(_("added %d changesets"
414 repo.ui.status(_("added %d changesets"
415 " with %d changes to %d files%s\n")
415 " with %d changes to %d files%s\n")
416 % (changesets, revisions, files, htext))
416 % (changesets, revisions, files, htext))
417 repo.invalidatevolatilesets()
417 repo.invalidatevolatilesets()
418
418
419 if changesets > 0:
419 if changesets > 0:
420 if 'node' not in tr.hookargs:
420 if 'node' not in tr.hookargs:
421 tr.hookargs['node'] = hex(cl.node(clstart))
421 tr.hookargs['node'] = hex(cl.node(clstart))
422 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
422 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
423 hookargs = dict(tr.hookargs)
423 hookargs = dict(tr.hookargs)
424 else:
424 else:
425 hookargs = dict(tr.hookargs)
425 hookargs = dict(tr.hookargs)
426 hookargs['node'] = hex(cl.node(clstart))
426 hookargs['node'] = hex(cl.node(clstart))
427 hookargs['node_last'] = hex(cl.node(clend - 1))
427 hookargs['node_last'] = hex(cl.node(clend - 1))
428 repo.hook('pretxnchangegroup', throw=True, **hookargs)
428 repo.hook('pretxnchangegroup', throw=True, **hookargs)
429
429
430 added = [cl.node(r) for r in xrange(clstart, clend)]
430 added = [cl.node(r) for r in xrange(clstart, clend)]
431 publishing = repo.publishing()
431 publishing = repo.publishing()
432 if srctype in ('push', 'serve'):
432 if srctype in ('push', 'serve'):
433 # Old servers can not push the boundary themselves.
433 # Old servers can not push the boundary themselves.
434 # New servers won't push the boundary if changeset already
434 # New servers won't push the boundary if changeset already
435 # exists locally as secret
435 # exists locally as secret
436 #
436 #
437 # We should not use added here but the list of all change in
437 # We should not use added here but the list of all change in
438 # the bundle
438 # the bundle
439 if publishing:
439 if publishing:
440 phases.advanceboundary(repo, tr, phases.public,
440 phases.advanceboundary(repo, tr, phases.public,
441 srccontent)
441 srccontent)
442 else:
442 else:
443 # Those changesets have been pushed from the
443 # Those changesets have been pushed from the
444 # outside, their phases are going to be pushed
444 # outside, their phases are going to be pushed
445 # alongside. Therefor `targetphase` is
445 # alongside. Therefor `targetphase` is
446 # ignored.
446 # ignored.
447 phases.advanceboundary(repo, tr, phases.draft,
447 phases.advanceboundary(repo, tr, phases.draft,
448 srccontent)
448 srccontent)
449 phases.retractboundary(repo, tr, phases.draft, added)
449 phases.retractboundary(repo, tr, phases.draft, added)
450 elif srctype != 'strip':
450 elif srctype != 'strip':
451 # publishing only alter behavior during push
451 # publishing only alter behavior during push
452 #
452 #
453 # strip should not touch boundary at all
453 # strip should not touch boundary at all
454 phases.retractboundary(repo, tr, targetphase, added)
454 phases.retractboundary(repo, tr, targetphase, added)
455
455
456 if changesets > 0:
456 if changesets > 0:
457 if srctype != 'strip':
457 if srctype != 'strip':
458 # During strip, branchcache is invalid but
458 # During strip, branchcache is invalid but
459 # coming call to `destroyed` will repair it.
459 # coming call to `destroyed` will repair it.
460 # In other case we can safely update cache on
460 # In other case we can safely update cache on
461 # disk.
461 # disk.
462 branchmap.updatecache(repo.filtered('served'))
462 branchmap.updatecache(repo.filtered('served'))
463
463
464 def runhooks():
464 def runhooks():
465 # These hooks run when the lock releases, not when the
465 # These hooks run when the lock releases, not when the
466 # transaction closes. So it's possible for the changelog
466 # transaction closes. So it's possible for the changelog
467 # to have changed since we last saw it.
467 # to have changed since we last saw it.
468 if clstart >= len(repo):
468 if clstart >= len(repo):
469 return
469 return
470
470
471 # forcefully update the on-disk branch cache
471 # forcefully update the on-disk branch cache
472 repo.ui.debug("updating the branch cache\n")
472 repo.ui.debug("updating the branch cache\n")
473 repo.hook("changegroup", **hookargs)
473 repo.hook("changegroup", **hookargs)
474
474
475 for n in added:
475 for n in added:
476 args = hookargs.copy()
476 args = hookargs.copy()
477 args['node'] = hex(n)
477 args['node'] = hex(n)
478 del args['node_last']
478 del args['node_last']
479 repo.hook("incoming", **args)
479 repo.hook("incoming", **args)
480
480
481 newheads = [h for h in repo.heads()
481 newheads = [h for h in repo.heads()
482 if h not in oldheads]
482 if h not in oldheads]
483 repo.ui.log("incoming",
483 repo.ui.log("incoming",
484 "%s incoming changes - new heads: %s\n",
484 "%s incoming changes - new heads: %s\n",
485 len(added),
485 len(added),
486 ', '.join([hex(c[:6]) for c in newheads]))
486 ', '.join([hex(c[:6]) for c in newheads]))
487
487
488 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
488 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
489 lambda tr: repo._afterlock(runhooks))
489 lambda tr: repo._afterlock(runhooks))
490 finally:
490 finally:
491 repo.ui.flush()
491 repo.ui.flush()
492 # never return 0 here:
492 # never return 0 here:
493 if dh < 0:
493 if dh < 0:
494 return dh - 1
494 return dh - 1
495 else:
495 else:
496 return dh + 1
496 return dh + 1
497
497
498 class cg2unpacker(cg1unpacker):
498 class cg2unpacker(cg1unpacker):
499 """Unpacker for cg2 streams.
499 """Unpacker for cg2 streams.
500
500
501 cg2 streams add support for generaldelta, so the delta header
501 cg2 streams add support for generaldelta, so the delta header
502 format is slightly different. All other features about the data
502 format is slightly different. All other features about the data
503 remain the same.
503 remain the same.
504 """
504 """
505 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
505 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
506 deltaheadersize = struct.calcsize(deltaheader)
506 deltaheadersize = struct.calcsize(deltaheader)
507 version = '02'
507 version = '02'
508
508
509 def _deltaheader(self, headertuple, prevnode):
509 def _deltaheader(self, headertuple, prevnode):
510 node, p1, p2, deltabase, cs = headertuple
510 node, p1, p2, deltabase, cs = headertuple
511 flags = 0
511 flags = 0
512 return node, p1, p2, deltabase, cs, flags
512 return node, p1, p2, deltabase, cs, flags
513
513
514 class cg3unpacker(cg2unpacker):
514 class cg3unpacker(cg2unpacker):
515 """Unpacker for cg3 streams.
515 """Unpacker for cg3 streams.
516
516
517 cg3 streams add support for exchanging treemanifests and revlog
517 cg3 streams add support for exchanging treemanifests and revlog
518 flags. It adds the revlog flags to the delta header and an empty chunk
518 flags. It adds the revlog flags to the delta header and an empty chunk
519 separating manifests and files.
519 separating manifests and files.
520 """
520 """
521 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
521 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
522 deltaheadersize = struct.calcsize(deltaheader)
522 deltaheadersize = struct.calcsize(deltaheader)
523 version = '03'
523 version = '03'
524 _grouplistcount = 2 # One list of manifests and one list of files
524 _grouplistcount = 2 # One list of manifests and one list of files
525
525
526 def _deltaheader(self, headertuple, prevnode):
526 def _deltaheader(self, headertuple, prevnode):
527 node, p1, p2, deltabase, cs, flags = headertuple
527 node, p1, p2, deltabase, cs, flags = headertuple
528 return node, p1, p2, deltabase, cs, flags
528 return node, p1, p2, deltabase, cs, flags
529
529
530 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
530 def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
531 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
531 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
532 numchanges)
532 numchanges)
533 while True:
533 while True:
534 chunkdata = self.filelogheader()
534 chunkdata = self.filelogheader()
535 if not chunkdata:
535 if not chunkdata:
536 break
536 break
537 # If we get here, there are directory manifests in the changegroup
537 # If we get here, there are directory manifests in the changegroup
538 d = chunkdata["filename"]
538 d = chunkdata["filename"]
539 repo.ui.debug("adding %s revisions\n" % d)
539 repo.ui.debug("adding %s revisions\n" % d)
540 dirlog = repo.manifest.dirlog(d)
540 dirlog = repo.manifest.dirlog(d)
541 if not dirlog.addgroup(self, revmap, trp):
541 if not dirlog.addgroup(self, revmap, trp):
542 raise error.Abort(_("received dir revlog group is empty"))
542 raise error.Abort(_("received dir revlog group is empty"))
543
543
544 class headerlessfixup(object):
544 class headerlessfixup(object):
545 def __init__(self, fh, h):
545 def __init__(self, fh, h):
546 self._h = h
546 self._h = h
547 self._fh = fh
547 self._fh = fh
548 def read(self, n):
548 def read(self, n):
549 if self._h:
549 if self._h:
550 d, self._h = self._h[:n], self._h[n:]
550 d, self._h = self._h[:n], self._h[n:]
551 if len(d) < n:
551 if len(d) < n:
552 d += readexactly(self._fh, n - len(d))
552 d += readexactly(self._fh, n - len(d))
553 return d
553 return d
554 return readexactly(self._fh, n)
554 return readexactly(self._fh, n)
555
555
556 def _moddirs(files):
557 """Given a set of modified files, find the list of modified directories.
558
559 This returns a list of (path to changed dir, changed dir) tuples,
560 as that's what the one client needs anyway.
561
562 >>> _moddirs(['a/b/c.py', 'a/b/c.txt', 'a/d/e/f/g.txt', 'i.txt', ])
563 [('/', 'a/'), ('a/', 'b/'), ('a/', 'd/'), ('a/d/', 'e/'), ('a/d/e/', 'f/')]
564
565 """
566 alldirs = set()
567 for f in files:
568 path = f.split('/')[:-1]
569 for i in xrange(len(path) - 1, -1, -1):
570 dn = '/'.join(path[:i])
571 current = dn + '/', path[i] + '/'
572 if current in alldirs:
573 break
574 alldirs.add(current)
575 return sorted(alldirs)
576
577 class cg1packer(object):
556 class cg1packer(object):
578 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
557 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
579 version = '01'
558 version = '01'
580 def __init__(self, repo, bundlecaps=None):
559 def __init__(self, repo, bundlecaps=None):
581 """Given a source repo, construct a bundler.
560 """Given a source repo, construct a bundler.
582
561
583 bundlecaps is optional and can be used to specify the set of
562 bundlecaps is optional and can be used to specify the set of
584 capabilities which can be used to build the bundle.
563 capabilities which can be used to build the bundle.
585 """
564 """
586 # Set of capabilities we can use to build the bundle.
565 # Set of capabilities we can use to build the bundle.
587 if bundlecaps is None:
566 if bundlecaps is None:
588 bundlecaps = set()
567 bundlecaps = set()
589 self._bundlecaps = bundlecaps
568 self._bundlecaps = bundlecaps
590 # experimental config: bundle.reorder
569 # experimental config: bundle.reorder
591 reorder = repo.ui.config('bundle', 'reorder', 'auto')
570 reorder = repo.ui.config('bundle', 'reorder', 'auto')
592 if reorder == 'auto':
571 if reorder == 'auto':
593 reorder = None
572 reorder = None
594 else:
573 else:
595 reorder = util.parsebool(reorder)
574 reorder = util.parsebool(reorder)
596 self._repo = repo
575 self._repo = repo
597 self._reorder = reorder
576 self._reorder = reorder
598 self._progress = repo.ui.progress
577 self._progress = repo.ui.progress
599 if self._repo.ui.verbose and not self._repo.ui.debugflag:
578 if self._repo.ui.verbose and not self._repo.ui.debugflag:
600 self._verbosenote = self._repo.ui.note
579 self._verbosenote = self._repo.ui.note
601 else:
580 else:
602 self._verbosenote = lambda s: None
581 self._verbosenote = lambda s: None
603
582
604 def close(self):
583 def close(self):
605 return closechunk()
584 return closechunk()
606
585
607 def fileheader(self, fname):
586 def fileheader(self, fname):
608 return chunkheader(len(fname)) + fname
587 return chunkheader(len(fname)) + fname
609
588
610 def group(self, nodelist, revlog, lookup, units=None):
589 def group(self, nodelist, revlog, lookup, units=None):
611 """Calculate a delta group, yielding a sequence of changegroup chunks
590 """Calculate a delta group, yielding a sequence of changegroup chunks
612 (strings).
591 (strings).
613
592
614 Given a list of changeset revs, return a set of deltas and
593 Given a list of changeset revs, return a set of deltas and
615 metadata corresponding to nodes. The first delta is
594 metadata corresponding to nodes. The first delta is
616 first parent(nodelist[0]) -> nodelist[0], the receiver is
595 first parent(nodelist[0]) -> nodelist[0], the receiver is
617 guaranteed to have this parent as it has all history before
596 guaranteed to have this parent as it has all history before
618 these changesets. In the case firstparent is nullrev the
597 these changesets. In the case firstparent is nullrev the
619 changegroup starts with a full revision.
598 changegroup starts with a full revision.
620
599
621 If units is not None, progress detail will be generated, units specifies
600 If units is not None, progress detail will be generated, units specifies
622 the type of revlog that is touched (changelog, manifest, etc.).
601 the type of revlog that is touched (changelog, manifest, etc.).
623 """
602 """
624 # if we don't have any revisions touched by these changesets, bail
603 # if we don't have any revisions touched by these changesets, bail
625 if len(nodelist) == 0:
604 if len(nodelist) == 0:
626 yield self.close()
605 yield self.close()
627 return
606 return
628
607
629 # for generaldelta revlogs, we linearize the revs; this will both be
608 # for generaldelta revlogs, we linearize the revs; this will both be
630 # much quicker and generate a much smaller bundle
609 # much quicker and generate a much smaller bundle
631 if (revlog._generaldelta and self._reorder is None) or self._reorder:
610 if (revlog._generaldelta and self._reorder is None) or self._reorder:
632 dag = dagutil.revlogdag(revlog)
611 dag = dagutil.revlogdag(revlog)
633 revs = set(revlog.rev(n) for n in nodelist)
612 revs = set(revlog.rev(n) for n in nodelist)
634 revs = dag.linearize(revs)
613 revs = dag.linearize(revs)
635 else:
614 else:
636 revs = sorted([revlog.rev(n) for n in nodelist])
615 revs = sorted([revlog.rev(n) for n in nodelist])
637
616
638 # add the parent of the first rev
617 # add the parent of the first rev
639 p = revlog.parentrevs(revs[0])[0]
618 p = revlog.parentrevs(revs[0])[0]
640 revs.insert(0, p)
619 revs.insert(0, p)
641
620
642 # build deltas
621 # build deltas
643 total = len(revs) - 1
622 total = len(revs) - 1
644 msgbundling = _('bundling')
623 msgbundling = _('bundling')
645 for r in xrange(len(revs) - 1):
624 for r in xrange(len(revs) - 1):
646 if units is not None:
625 if units is not None:
647 self._progress(msgbundling, r + 1, unit=units, total=total)
626 self._progress(msgbundling, r + 1, unit=units, total=total)
648 prev, curr = revs[r], revs[r + 1]
627 prev, curr = revs[r], revs[r + 1]
649 linknode = lookup(revlog.node(curr))
628 linknode = lookup(revlog.node(curr))
650 for c in self.revchunk(revlog, curr, prev, linknode):
629 for c in self.revchunk(revlog, curr, prev, linknode):
651 yield c
630 yield c
652
631
653 if units is not None:
632 if units is not None:
654 self._progress(msgbundling, None)
633 self._progress(msgbundling, None)
655 yield self.close()
634 yield self.close()
656
635
657 # filter any nodes that claim to be part of the known set
636 # filter any nodes that claim to be part of the known set
658 def prune(self, revlog, missing, commonrevs):
637 def prune(self, revlog, missing, commonrevs):
659 rr, rl = revlog.rev, revlog.linkrev
638 rr, rl = revlog.rev, revlog.linkrev
660 return [n for n in missing if rl(rr(n)) not in commonrevs]
639 return [n for n in missing if rl(rr(n)) not in commonrevs]
661
640
662 def _packmanifests(self, dir, mfnodes, lookuplinknode):
641 def _packmanifests(self, dir, mfnodes, lookuplinknode):
663 """Pack flat manifests into a changegroup stream."""
642 """Pack flat manifests into a changegroup stream."""
664 assert not dir
643 assert not dir
665 for chunk in self.group(mfnodes, self._repo.manifest,
644 for chunk in self.group(mfnodes, self._repo.manifest,
666 lookuplinknode, units=_('manifests')):
645 lookuplinknode, units=_('manifests')):
667 yield chunk
646 yield chunk
668
647
669 def _manifestsdone(self):
648 def _manifestsdone(self):
670 return ''
649 return ''
671
650
672 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
651 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
673 '''yield a sequence of changegroup chunks (strings)'''
652 '''yield a sequence of changegroup chunks (strings)'''
674 repo = self._repo
653 repo = self._repo
675 cl = repo.changelog
654 cl = repo.changelog
676
655
677 clrevorder = {}
656 clrevorder = {}
678 mfs = {} # needed manifests
657 mfs = {} # needed manifests
679 fnodes = {} # needed file nodes
658 fnodes = {} # needed file nodes
680 # maps manifest node id -> set(changed files)
659 # maps manifest node id -> set(changed files)
681 mfchangedfiles = {}
660 mfchangedfiles = {}
682
661
683 # Callback for the changelog, used to collect changed files and manifest
662 # Callback for the changelog, used to collect changed files and manifest
684 # nodes.
663 # nodes.
685 # Returns the linkrev node (identity in the changelog case).
664 # Returns the linkrev node (identity in the changelog case).
686 def lookupcl(x):
665 def lookupcl(x):
687 c = cl.read(x)
666 c = cl.read(x)
688 clrevorder[x] = len(clrevorder)
667 clrevorder[x] = len(clrevorder)
689 n = c[0]
668 n = c[0]
690 # record the first changeset introducing this manifest version
669 # record the first changeset introducing this manifest version
691 mfs.setdefault(n, x)
670 mfs.setdefault(n, x)
692 # Record a complete list of potentially-changed files in
671 # Record a complete list of potentially-changed files in
693 # this manifest.
672 # this manifest.
694 mfchangedfiles.setdefault(n, set()).update(c[3])
673 mfchangedfiles.setdefault(n, set()).update(c[3])
695 return x
674 return x
696
675
697 self._verbosenote(_('uncompressed size of bundle content:\n'))
676 self._verbosenote(_('uncompressed size of bundle content:\n'))
698 size = 0
677 size = 0
699 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
678 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
700 size += len(chunk)
679 size += len(chunk)
701 yield chunk
680 yield chunk
702 self._verbosenote(_('%8.i (changelog)\n') % size)
681 self._verbosenote(_('%8.i (changelog)\n') % size)
703
682
704 # We need to make sure that the linkrev in the changegroup refers to
683 # We need to make sure that the linkrev in the changegroup refers to
705 # the first changeset that introduced the manifest or file revision.
684 # the first changeset that introduced the manifest or file revision.
706 # The fastpath is usually safer than the slowpath, because the filelogs
685 # The fastpath is usually safer than the slowpath, because the filelogs
707 # are walked in revlog order.
686 # are walked in revlog order.
708 #
687 #
709 # When taking the slowpath with reorder=None and the manifest revlog
688 # When taking the slowpath with reorder=None and the manifest revlog
710 # uses generaldelta, the manifest may be walked in the "wrong" order.
689 # uses generaldelta, the manifest may be walked in the "wrong" order.
711 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
690 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
712 # cc0ff93d0c0c).
691 # cc0ff93d0c0c).
713 #
692 #
714 # When taking the fastpath, we are only vulnerable to reordering
693 # When taking the fastpath, we are only vulnerable to reordering
715 # of the changelog itself. The changelog never uses generaldelta, so
694 # of the changelog itself. The changelog never uses generaldelta, so
716 # it is only reordered when reorder=True. To handle this case, we
695 # it is only reordered when reorder=True. To handle this case, we
717 # simply take the slowpath, which already has the 'clrevorder' logic.
696 # simply take the slowpath, which already has the 'clrevorder' logic.
718 # This was also fixed in cc0ff93d0c0c.
697 # This was also fixed in cc0ff93d0c0c.
719 fastpathlinkrev = fastpathlinkrev and not self._reorder
698 fastpathlinkrev = fastpathlinkrev and not self._reorder
720 # Treemanifests don't work correctly with fastpathlinkrev
699 # Treemanifests don't work correctly with fastpathlinkrev
721 # either, because we don't discover which directory nodes to
700 # either, because we don't discover which directory nodes to
722 # send along with files. This could probably be fixed.
701 # send along with files. This could probably be fixed.
723 fastpathlinkrev = fastpathlinkrev and (
702 fastpathlinkrev = fastpathlinkrev and (
724 'treemanifest' not in repo.requirements)
703 'treemanifest' not in repo.requirements)
725
704
726 for chunk in self.generatemanifests(commonrevs, clrevorder,
705 for chunk in self.generatemanifests(commonrevs, clrevorder,
727 fastpathlinkrev, mfs, mfchangedfiles, fnodes):
706 fastpathlinkrev, mfs, mfchangedfiles, fnodes):
728 yield chunk
707 yield chunk
729 mfs.clear()
708 mfs.clear()
730 clrevs = set(cl.rev(x) for x in clnodes)
709 clrevs = set(cl.rev(x) for x in clnodes)
731
710
732 if not fastpathlinkrev:
711 if not fastpathlinkrev:
733 def linknodes(unused, fname):
712 def linknodes(unused, fname):
734 return fnodes.get(fname, {})
713 return fnodes.get(fname, {})
735 else:
714 else:
736 cln = cl.node
715 cln = cl.node
737 def linknodes(filerevlog, fname):
716 def linknodes(filerevlog, fname):
738 llr = filerevlog.linkrev
717 llr = filerevlog.linkrev
739 fln = filerevlog.node
718 fln = filerevlog.node
740 revs = ((r, llr(r)) for r in filerevlog)
719 revs = ((r, llr(r)) for r in filerevlog)
741 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
720 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
742
721
743 changedfiles = set()
722 changedfiles = set()
744 for x in mfchangedfiles.itervalues():
723 for x in mfchangedfiles.itervalues():
745 changedfiles.update(x)
724 changedfiles.update(x)
746 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
725 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
747 source):
726 source):
748 yield chunk
727 yield chunk
749
728
750 yield self.close()
729 yield self.close()
751
730
752 if clnodes:
731 if clnodes:
753 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
732 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
754
733
755 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
734 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
756 mfchangedfiles, fnodes):
735 mfchangedfiles, fnodes):
757 repo = self._repo
736 repo = self._repo
758 ml = repo.manifest
737 dirlog = repo.manifest.dirlog
759 tmfnodes = {'': mfs}
738 tmfnodes = {'': mfs}
760
739
761 # Callback for the manifest, used to collect linkrevs for filelog
740 # Callback for the manifest, used to collect linkrevs for filelog
762 # revisions.
741 # revisions.
763 # Returns the linkrev node (collected in lookupcl).
742 # Returns the linkrev node (collected in lookupcl).
764 def makelookupmflinknode(dir):
743 def makelookupmflinknode(dir):
765 if fastpathlinkrev:
744 if fastpathlinkrev:
766 assert not dir
745 assert not dir
767 return mfs.__getitem__
746 return mfs.__getitem__
768
747
769 if dir:
770 return tmfnodes[dir].get
771
772 def lookupmflinknode(x):
748 def lookupmflinknode(x):
773 """Callback for looking up the linknode for manifests.
749 """Callback for looking up the linknode for manifests.
774
750
775 Returns the linkrev node for the specified manifest.
751 Returns the linkrev node for the specified manifest.
776
752
777 SIDE EFFECT:
753 SIDE EFFECT:
778
754
779 1) fclnodes gets populated with the list of relevant
755 1) fclnodes gets populated with the list of relevant
780 file nodes if we're not using fastpathlinkrev
756 file nodes if we're not using fastpathlinkrev
781 2) When treemanifests are in use, collects treemanifest nodes
757 2) When treemanifests are in use, collects treemanifest nodes
782 to send
758 to send
783
759
784 Note that this means manifests must be completely sent to
760 Note that this means manifests must be completely sent to
785 the client before you can trust the list of files and
761 the client before you can trust the list of files and
786 treemanifests to send.
762 treemanifests to send.
787 """
763 """
788 clnode = mfs[x]
764 clnode = tmfnodes[dir][x]
789 # We no longer actually care about reading deltas of
765 mdata = dirlog(dir).readshallowfast(x)
790 # the manifest here, because we already know the list
791 # of changed files, so for treemanifests (which
792 # lazily-load anyway to *generate* a readdelta) we can
793 # just load them with read() and then we'll actually
794 # be able to correctly load node IDs from the
795 # submanifest entries.
796 if 'treemanifest' in repo.requirements:
766 if 'treemanifest' in repo.requirements:
797 mdata = ml.read(x)
767 for p, n, fl in mdata.iterentries():
768 if fl == 't': # subdirectory manifest
769 subdir = dir + p + '/'
770 tmfclnodes = tmfnodes.setdefault(subdir, {})
771 tmfclnode = tmfclnodes.setdefault(n, clnode)
772 if clrevorder[clnode] < clrevorder[tmfclnode]:
773 tmfclnodes[n] = clnode
798 else:
774 else:
799 mdata = ml.readfast(x)
775 f = dir + p
776 fclnodes = fnodes.setdefault(f, {})
777 fclnode = fclnodes.setdefault(n, clnode)
778 if clrevorder[clnode] < clrevorder[fclnode]:
779 fclnodes[n] = clnode
780 else:
800 for f in mfchangedfiles[x]:
781 for f in mfchangedfiles[x]:
801 try:
782 try:
802 n = mdata[f]
783 n = mdata[f]
803 except KeyError:
784 except KeyError:
804 continue
785 continue
805 # record the first changeset introducing this filelog
786 # record the first changeset introducing this filelog
806 # version
787 # version
807 fclnodes = fnodes.setdefault(f, {})
788 fclnodes = fnodes.setdefault(f, {})
808 fclnode = fclnodes.setdefault(n, clnode)
789 fclnode = fclnodes.setdefault(n, clnode)
809 if clrevorder[clnode] < clrevorder[fclnode]:
790 if clrevorder[clnode] < clrevorder[fclnode]:
810 fclnodes[n] = clnode
791 fclnodes[n] = clnode
811 # gather list of changed treemanifest nodes
812 if 'treemanifest' in repo.requirements:
813 submfs = {'/': mdata}
814 for dn, bn in _moddirs(mfchangedfiles[x]):
815 try:
816 submf = submfs[dn]
817 submf = submf._dirs[bn]
818 except KeyError:
819 continue # deleted directory, so nothing to send
820 submfs[submf.dir()] = submf
821 tmfclnodes = tmfnodes.setdefault(submf.dir(), {})
822 tmfclnode = tmfclnodes.setdefault(submf._node, clnode)
823 if clrevorder[clnode] < clrevorder[tmfclnode]:
824 tmfclnodes[n] = clnode
825 return clnode
792 return clnode
826 return lookupmflinknode
793 return lookupmflinknode
827
794
828 size = 0
795 size = 0
829 while tmfnodes:
796 while tmfnodes:
830 dir = min(tmfnodes)
797 dir = min(tmfnodes)
831 nodes = tmfnodes[dir]
798 nodes = tmfnodes[dir]
832 prunednodes = self.prune(ml.dirlog(dir), nodes, commonrevs)
799 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
833 for x in self._packmanifests(dir, prunednodes,
800 for x in self._packmanifests(dir, prunednodes,
834 makelookupmflinknode(dir)):
801 makelookupmflinknode(dir)):
835 size += len(x)
802 size += len(x)
836 yield x
803 yield x
837 del tmfnodes[dir]
804 del tmfnodes[dir]
838 self._verbosenote(_('%8.i (manifests)\n') % size)
805 self._verbosenote(_('%8.i (manifests)\n') % size)
839 yield self._manifestsdone()
806 yield self._manifestsdone()
840
807
841 # The 'source' parameter is useful for extensions
808 # The 'source' parameter is useful for extensions
842 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
809 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
843 repo = self._repo
810 repo = self._repo
844 progress = self._progress
811 progress = self._progress
845 msgbundling = _('bundling')
812 msgbundling = _('bundling')
846
813
847 total = len(changedfiles)
814 total = len(changedfiles)
848 # for progress output
815 # for progress output
849 msgfiles = _('files')
816 msgfiles = _('files')
850 for i, fname in enumerate(sorted(changedfiles)):
817 for i, fname in enumerate(sorted(changedfiles)):
851 filerevlog = repo.file(fname)
818 filerevlog = repo.file(fname)
852 if not filerevlog:
819 if not filerevlog:
853 raise error.Abort(_("empty or missing revlog for %s") % fname)
820 raise error.Abort(_("empty or missing revlog for %s") % fname)
854
821
855 linkrevnodes = linknodes(filerevlog, fname)
822 linkrevnodes = linknodes(filerevlog, fname)
856 # Lookup for filenodes, we collected the linkrev nodes above in the
823 # Lookup for filenodes, we collected the linkrev nodes above in the
857 # fastpath case and with lookupmf in the slowpath case.
824 # fastpath case and with lookupmf in the slowpath case.
858 def lookupfilelog(x):
825 def lookupfilelog(x):
859 return linkrevnodes[x]
826 return linkrevnodes[x]
860
827
861 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
828 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
862 if filenodes:
829 if filenodes:
863 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
830 progress(msgbundling, i + 1, item=fname, unit=msgfiles,
864 total=total)
831 total=total)
865 h = self.fileheader(fname)
832 h = self.fileheader(fname)
866 size = len(h)
833 size = len(h)
867 yield h
834 yield h
868 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
835 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
869 size += len(chunk)
836 size += len(chunk)
870 yield chunk
837 yield chunk
871 self._verbosenote(_('%8.i %s\n') % (size, fname))
838 self._verbosenote(_('%8.i %s\n') % (size, fname))
872 progress(msgbundling, None)
839 progress(msgbundling, None)
873
840
874 def deltaparent(self, revlog, rev, p1, p2, prev):
841 def deltaparent(self, revlog, rev, p1, p2, prev):
875 return prev
842 return prev
876
843
877 def revchunk(self, revlog, rev, prev, linknode):
844 def revchunk(self, revlog, rev, prev, linknode):
878 node = revlog.node(rev)
845 node = revlog.node(rev)
879 p1, p2 = revlog.parentrevs(rev)
846 p1, p2 = revlog.parentrevs(rev)
880 base = self.deltaparent(revlog, rev, p1, p2, prev)
847 base = self.deltaparent(revlog, rev, p1, p2, prev)
881
848
882 prefix = ''
849 prefix = ''
883 if revlog.iscensored(base) or revlog.iscensored(rev):
850 if revlog.iscensored(base) or revlog.iscensored(rev):
884 try:
851 try:
885 delta = revlog.revision(node)
852 delta = revlog.revision(node)
886 except error.CensoredNodeError as e:
853 except error.CensoredNodeError as e:
887 delta = e.tombstone
854 delta = e.tombstone
888 if base == nullrev:
855 if base == nullrev:
889 prefix = mdiff.trivialdiffheader(len(delta))
856 prefix = mdiff.trivialdiffheader(len(delta))
890 else:
857 else:
891 baselen = revlog.rawsize(base)
858 baselen = revlog.rawsize(base)
892 prefix = mdiff.replacediffheader(baselen, len(delta))
859 prefix = mdiff.replacediffheader(baselen, len(delta))
893 elif base == nullrev:
860 elif base == nullrev:
894 delta = revlog.revision(node)
861 delta = revlog.revision(node)
895 prefix = mdiff.trivialdiffheader(len(delta))
862 prefix = mdiff.trivialdiffheader(len(delta))
896 else:
863 else:
897 delta = revlog.revdiff(base, rev)
864 delta = revlog.revdiff(base, rev)
898 p1n, p2n = revlog.parents(node)
865 p1n, p2n = revlog.parents(node)
899 basenode = revlog.node(base)
866 basenode = revlog.node(base)
900 flags = revlog.flags(rev)
867 flags = revlog.flags(rev)
901 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
868 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
902 meta += prefix
869 meta += prefix
903 l = len(meta) + len(delta)
870 l = len(meta) + len(delta)
904 yield chunkheader(l)
871 yield chunkheader(l)
905 yield meta
872 yield meta
906 yield delta
873 yield delta
907 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
874 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
908 # do nothing with basenode, it is implicitly the previous one in HG10
875 # do nothing with basenode, it is implicitly the previous one in HG10
909 # do nothing with flags, it is implicitly 0 for cg1 and cg2
876 # do nothing with flags, it is implicitly 0 for cg1 and cg2
910 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
877 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
911
878
912 class cg2packer(cg1packer):
879 class cg2packer(cg1packer):
913 version = '02'
880 version = '02'
914 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
881 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
915
882
916 def __init__(self, repo, bundlecaps=None):
883 def __init__(self, repo, bundlecaps=None):
917 super(cg2packer, self).__init__(repo, bundlecaps)
884 super(cg2packer, self).__init__(repo, bundlecaps)
918 if self._reorder is None:
885 if self._reorder is None:
919 # Since generaldelta is directly supported by cg2, reordering
886 # Since generaldelta is directly supported by cg2, reordering
920 # generally doesn't help, so we disable it by default (treating
887 # generally doesn't help, so we disable it by default (treating
921 # bundle.reorder=auto just like bundle.reorder=False).
888 # bundle.reorder=auto just like bundle.reorder=False).
922 self._reorder = False
889 self._reorder = False
923
890
924 def deltaparent(self, revlog, rev, p1, p2, prev):
891 def deltaparent(self, revlog, rev, p1, p2, prev):
925 dp = revlog.deltaparent(rev)
892 dp = revlog.deltaparent(rev)
926 # avoid storing full revisions; pick prev in those cases
893 # avoid storing full revisions; pick prev in those cases
927 # also pick prev when we can't be sure remote has dp
894 # also pick prev when we can't be sure remote has dp
928 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
895 if dp == nullrev or (dp != p1 and dp != p2 and dp != prev):
929 return prev
896 return prev
930 return dp
897 return dp
931
898
932 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
899 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
933 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
900 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
934 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
901 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
935
902
936 class cg3packer(cg2packer):
903 class cg3packer(cg2packer):
937 version = '03'
904 version = '03'
938 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
905 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
939
906
940 def _packmanifests(self, dir, mfnodes, lookuplinknode):
907 def _packmanifests(self, dir, mfnodes, lookuplinknode):
941 if dir:
908 if dir:
942 yield self.fileheader(dir)
909 yield self.fileheader(dir)
943 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
910 for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir),
944 lookuplinknode, units=_('manifests')):
911 lookuplinknode, units=_('manifests')):
945 yield chunk
912 yield chunk
946
913
947 def _manifestsdone(self):
914 def _manifestsdone(self):
948 return self.close()
915 return self.close()
949
916
950 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
917 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
951 return struct.pack(
918 return struct.pack(
952 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
919 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
953
920
954 _packermap = {'01': (cg1packer, cg1unpacker),
921 _packermap = {'01': (cg1packer, cg1unpacker),
955 # cg2 adds support for exchanging generaldelta
922 # cg2 adds support for exchanging generaldelta
956 '02': (cg2packer, cg2unpacker),
923 '02': (cg2packer, cg2unpacker),
957 # cg3 adds support for exchanging revlog flags and treemanifests
924 # cg3 adds support for exchanging revlog flags and treemanifests
958 '03': (cg3packer, cg3unpacker),
925 '03': (cg3packer, cg3unpacker),
959 }
926 }
960
927
961 def allsupportedversions(ui):
928 def allsupportedversions(ui):
962 versions = set(_packermap.keys())
929 versions = set(_packermap.keys())
963 versions.discard('03')
930 versions.discard('03')
964 if (ui.configbool('experimental', 'changegroup3') or
931 if (ui.configbool('experimental', 'changegroup3') or
965 ui.configbool('experimental', 'treemanifest')):
932 ui.configbool('experimental', 'treemanifest')):
966 versions.add('03')
933 versions.add('03')
967 return versions
934 return versions
968
935
969 # Changegroup versions that can be applied to the repo
936 # Changegroup versions that can be applied to the repo
970 def supportedincomingversions(repo):
937 def supportedincomingversions(repo):
971 versions = allsupportedversions(repo.ui)
938 versions = allsupportedversions(repo.ui)
972 if 'treemanifest' in repo.requirements:
939 if 'treemanifest' in repo.requirements:
973 versions.add('03')
940 versions.add('03')
974 return versions
941 return versions
975
942
976 # Changegroup versions that can be created from the repo
943 # Changegroup versions that can be created from the repo
977 def supportedoutgoingversions(repo):
944 def supportedoutgoingversions(repo):
978 versions = allsupportedversions(repo.ui)
945 versions = allsupportedversions(repo.ui)
979 if 'treemanifest' in repo.requirements:
946 if 'treemanifest' in repo.requirements:
980 # Versions 01 and 02 support only flat manifests and it's just too
947 # Versions 01 and 02 support only flat manifests and it's just too
981 # expensive to convert between the flat manifest and tree manifest on
948 # expensive to convert between the flat manifest and tree manifest on
982 # the fly. Since tree manifests are hashed differently, all of history
949 # the fly. Since tree manifests are hashed differently, all of history
983 # would have to be converted. Instead, we simply don't even pretend to
950 # would have to be converted. Instead, we simply don't even pretend to
984 # support versions 01 and 02.
951 # support versions 01 and 02.
985 versions.discard('01')
952 versions.discard('01')
986 versions.discard('02')
953 versions.discard('02')
987 versions.add('03')
954 versions.add('03')
988 return versions
955 return versions
989
956
990 def safeversion(repo):
957 def safeversion(repo):
991 # Finds the smallest version that it's safe to assume clients of the repo
958 # Finds the smallest version that it's safe to assume clients of the repo
992 # will support. For example, all hg versions that support generaldelta also
959 # will support. For example, all hg versions that support generaldelta also
993 # support changegroup 02.
960 # support changegroup 02.
994 versions = supportedoutgoingversions(repo)
961 versions = supportedoutgoingversions(repo)
995 if 'generaldelta' in repo.requirements:
962 if 'generaldelta' in repo.requirements:
996 versions.discard('01')
963 versions.discard('01')
997 assert versions
964 assert versions
998 return min(versions)
965 return min(versions)
999
966
1000 def getbundler(version, repo, bundlecaps=None):
967 def getbundler(version, repo, bundlecaps=None):
1001 assert version in supportedoutgoingversions(repo)
968 assert version in supportedoutgoingversions(repo)
1002 return _packermap[version][0](repo, bundlecaps)
969 return _packermap[version][0](repo, bundlecaps)
1003
970
1004 def getunbundler(version, fh, alg):
971 def getunbundler(version, fh, alg):
1005 return _packermap[version][1](fh, alg)
972 return _packermap[version][1](fh, alg)
1006
973
1007 def _changegroupinfo(repo, nodes, source):
974 def _changegroupinfo(repo, nodes, source):
1008 if repo.ui.verbose or source == 'bundle':
975 if repo.ui.verbose or source == 'bundle':
1009 repo.ui.status(_("%d changesets found\n") % len(nodes))
976 repo.ui.status(_("%d changesets found\n") % len(nodes))
1010 if repo.ui.debugflag:
977 if repo.ui.debugflag:
1011 repo.ui.debug("list of changesets:\n")
978 repo.ui.debug("list of changesets:\n")
1012 for node in nodes:
979 for node in nodes:
1013 repo.ui.debug("%s\n" % hex(node))
980 repo.ui.debug("%s\n" % hex(node))
1014
981
1015 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
982 def getsubsetraw(repo, outgoing, bundler, source, fastpath=False):
1016 repo = repo.unfiltered()
983 repo = repo.unfiltered()
1017 commonrevs = outgoing.common
984 commonrevs = outgoing.common
1018 csets = outgoing.missing
985 csets = outgoing.missing
1019 heads = outgoing.missingheads
986 heads = outgoing.missingheads
1020 # We go through the fast path if we get told to, or if all (unfiltered
987 # We go through the fast path if we get told to, or if all (unfiltered
1021 # heads have been requested (since we then know there all linkrevs will
988 # heads have been requested (since we then know there all linkrevs will
1022 # be pulled by the client).
989 # be pulled by the client).
1023 heads.sort()
990 heads.sort()
1024 fastpathlinkrev = fastpath or (
991 fastpathlinkrev = fastpath or (
1025 repo.filtername is None and heads == sorted(repo.heads()))
992 repo.filtername is None and heads == sorted(repo.heads()))
1026
993
1027 repo.hook('preoutgoing', throw=True, source=source)
994 repo.hook('preoutgoing', throw=True, source=source)
1028 _changegroupinfo(repo, csets, source)
995 _changegroupinfo(repo, csets, source)
1029 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
996 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1030
997
1031 def getsubset(repo, outgoing, bundler, source, fastpath=False):
998 def getsubset(repo, outgoing, bundler, source, fastpath=False):
1032 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
999 gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
1033 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None)
1000 return getunbundler(bundler.version, util.chunkbuffer(gengroup), None)
1034
1001
1035 def changegroupsubset(repo, roots, heads, source, version='01'):
1002 def changegroupsubset(repo, roots, heads, source, version='01'):
1036 """Compute a changegroup consisting of all the nodes that are
1003 """Compute a changegroup consisting of all the nodes that are
1037 descendants of any of the roots and ancestors of any of the heads.
1004 descendants of any of the roots and ancestors of any of the heads.
1038 Return a chunkbuffer object whose read() method will return
1005 Return a chunkbuffer object whose read() method will return
1039 successive changegroup chunks.
1006 successive changegroup chunks.
1040
1007
1041 It is fairly complex as determining which filenodes and which
1008 It is fairly complex as determining which filenodes and which
1042 manifest nodes need to be included for the changeset to be complete
1009 manifest nodes need to be included for the changeset to be complete
1043 is non-trivial.
1010 is non-trivial.
1044
1011
1045 Another wrinkle is doing the reverse, figuring out which changeset in
1012 Another wrinkle is doing the reverse, figuring out which changeset in
1046 the changegroup a particular filenode or manifestnode belongs to.
1013 the changegroup a particular filenode or manifestnode belongs to.
1047 """
1014 """
1048 cl = repo.changelog
1015 cl = repo.changelog
1049 if not roots:
1016 if not roots:
1050 roots = [nullid]
1017 roots = [nullid]
1051 discbases = []
1018 discbases = []
1052 for n in roots:
1019 for n in roots:
1053 discbases.extend([p for p in cl.parents(n) if p != nullid])
1020 discbases.extend([p for p in cl.parents(n) if p != nullid])
1054 # TODO: remove call to nodesbetween.
1021 # TODO: remove call to nodesbetween.
1055 csets, roots, heads = cl.nodesbetween(roots, heads)
1022 csets, roots, heads = cl.nodesbetween(roots, heads)
1056 included = set(csets)
1023 included = set(csets)
1057 discbases = [n for n in discbases if n not in included]
1024 discbases = [n for n in discbases if n not in included]
1058 outgoing = discovery.outgoing(cl, discbases, heads)
1025 outgoing = discovery.outgoing(cl, discbases, heads)
1059 bundler = getbundler(version, repo)
1026 bundler = getbundler(version, repo)
1060 return getsubset(repo, outgoing, bundler, source)
1027 return getsubset(repo, outgoing, bundler, source)
1061
1028
1062 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
1029 def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None,
1063 version='01'):
1030 version='01'):
1064 """Like getbundle, but taking a discovery.outgoing as an argument.
1031 """Like getbundle, but taking a discovery.outgoing as an argument.
1065
1032
1066 This is only implemented for local repos and reuses potentially
1033 This is only implemented for local repos and reuses potentially
1067 precomputed sets in outgoing. Returns a raw changegroup generator."""
1034 precomputed sets in outgoing. Returns a raw changegroup generator."""
1068 if not outgoing.missing:
1035 if not outgoing.missing:
1069 return None
1036 return None
1070 bundler = getbundler(version, repo, bundlecaps)
1037 bundler = getbundler(version, repo, bundlecaps)
1071 return getsubsetraw(repo, outgoing, bundler, source)
1038 return getsubsetraw(repo, outgoing, bundler, source)
1072
1039
1073 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
1040 def getlocalchangegroup(repo, source, outgoing, bundlecaps=None,
1074 version='01'):
1041 version='01'):
1075 """Like getbundle, but taking a discovery.outgoing as an argument.
1042 """Like getbundle, but taking a discovery.outgoing as an argument.
1076
1043
1077 This is only implemented for local repos and reuses potentially
1044 This is only implemented for local repos and reuses potentially
1078 precomputed sets in outgoing."""
1045 precomputed sets in outgoing."""
1079 if not outgoing.missing:
1046 if not outgoing.missing:
1080 return None
1047 return None
1081 bundler = getbundler(version, repo, bundlecaps)
1048 bundler = getbundler(version, repo, bundlecaps)
1082 return getsubset(repo, outgoing, bundler, source)
1049 return getsubset(repo, outgoing, bundler, source)
1083
1050
1084 def computeoutgoing(repo, heads, common):
1051 def computeoutgoing(repo, heads, common):
1085 """Computes which revs are outgoing given a set of common
1052 """Computes which revs are outgoing given a set of common
1086 and a set of heads.
1053 and a set of heads.
1087
1054
1088 This is a separate function so extensions can have access to
1055 This is a separate function so extensions can have access to
1089 the logic.
1056 the logic.
1090
1057
1091 Returns a discovery.outgoing object.
1058 Returns a discovery.outgoing object.
1092 """
1059 """
1093 cl = repo.changelog
1060 cl = repo.changelog
1094 if common:
1061 if common:
1095 hasnode = cl.hasnode
1062 hasnode = cl.hasnode
1096 common = [n for n in common if hasnode(n)]
1063 common = [n for n in common if hasnode(n)]
1097 else:
1064 else:
1098 common = [nullid]
1065 common = [nullid]
1099 if not heads:
1066 if not heads:
1100 heads = cl.heads()
1067 heads = cl.heads()
1101 return discovery.outgoing(cl, common, heads)
1068 return discovery.outgoing(cl, common, heads)
1102
1069
1103 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
1070 def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None,
1104 version='01'):
1071 version='01'):
1105 """Like changegroupsubset, but returns the set difference between the
1072 """Like changegroupsubset, but returns the set difference between the
1106 ancestors of heads and the ancestors common.
1073 ancestors of heads and the ancestors common.
1107
1074
1108 If heads is None, use the local heads. If common is None, use [nullid].
1075 If heads is None, use the local heads. If common is None, use [nullid].
1109
1076
1110 The nodes in common might not all be known locally due to the way the
1077 The nodes in common might not all be known locally due to the way the
1111 current discovery protocol works.
1078 current discovery protocol works.
1112 """
1079 """
1113 outgoing = computeoutgoing(repo, heads, common)
1080 outgoing = computeoutgoing(repo, heads, common)
1114 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1081 return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps,
1115 version=version)
1082 version=version)
1116
1083
1117 def changegroup(repo, basenodes, source):
1084 def changegroup(repo, basenodes, source):
1118 # to avoid a race we use changegroupsubset() (issue1320)
1085 # to avoid a race we use changegroupsubset() (issue1320)
1119 return changegroupsubset(repo, basenodes, repo.heads(), source)
1086 return changegroupsubset(repo, basenodes, repo.heads(), source)
1120
1087
1121 def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
1088 def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
1122 revisions = 0
1089 revisions = 0
1123 files = 0
1090 files = 0
1124 while True:
1091 while True:
1125 chunkdata = source.filelogheader()
1092 chunkdata = source.filelogheader()
1126 if not chunkdata:
1093 if not chunkdata:
1127 break
1094 break
1128 f = chunkdata["filename"]
1095 f = chunkdata["filename"]
1129 repo.ui.debug("adding %s revisions\n" % f)
1096 repo.ui.debug("adding %s revisions\n" % f)
1130 pr()
1097 pr()
1131 fl = repo.file(f)
1098 fl = repo.file(f)
1132 o = len(fl)
1099 o = len(fl)
1133 try:
1100 try:
1134 if not fl.addgroup(source, revmap, trp):
1101 if not fl.addgroup(source, revmap, trp):
1135 raise error.Abort(_("received file revlog group is empty"))
1102 raise error.Abort(_("received file revlog group is empty"))
1136 except error.CensoredBaseError as e:
1103 except error.CensoredBaseError as e:
1137 raise error.Abort(_("received delta base is censored: %s") % e)
1104 raise error.Abort(_("received delta base is censored: %s") % e)
1138 revisions += len(fl) - o
1105 revisions += len(fl) - o
1139 files += 1
1106 files += 1
1140 if f in needfiles:
1107 if f in needfiles:
1141 needs = needfiles[f]
1108 needs = needfiles[f]
1142 for new in xrange(o, len(fl)):
1109 for new in xrange(o, len(fl)):
1143 n = fl.node(new)
1110 n = fl.node(new)
1144 if n in needs:
1111 if n in needs:
1145 needs.remove(n)
1112 needs.remove(n)
1146 else:
1113 else:
1147 raise error.Abort(
1114 raise error.Abort(
1148 _("received spurious file revlog entry"))
1115 _("received spurious file revlog entry"))
1149 if not needs:
1116 if not needs:
1150 del needfiles[f]
1117 del needfiles[f]
1151 repo.ui.progress(_('files'), None)
1118 repo.ui.progress(_('files'), None)
1152
1119
1153 for f, needs in needfiles.iteritems():
1120 for f, needs in needfiles.iteritems():
1154 fl = repo.file(f)
1121 fl = repo.file(f)
1155 for n in needs:
1122 for n in needs:
1156 try:
1123 try:
1157 fl.rev(n)
1124 fl.rev(n)
1158 except error.LookupError:
1125 except error.LookupError:
1159 raise error.Abort(
1126 raise error.Abort(
1160 _('missing file data for %s:%s - run hg verify') %
1127 _('missing file data for %s:%s - run hg verify') %
1161 (f, hex(n)))
1128 (f, hex(n)))
1162
1129
1163 return revisions, files
1130 return revisions, files
@@ -1,1078 +1,1094 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import array
10 import array
11 import heapq
11 import heapq
12 import os
12 import os
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from . import (
16 from . import (
17 error,
17 error,
18 mdiff,
18 mdiff,
19 parsers,
19 parsers,
20 revlog,
20 revlog,
21 util,
21 util,
22 )
22 )
23
23
24 propertycache = util.propertycache
24 propertycache = util.propertycache
25
25
26 def _parsev1(data):
26 def _parsev1(data):
27 # This method does a little bit of excessive-looking
27 # This method does a little bit of excessive-looking
28 # precondition checking. This is so that the behavior of this
28 # precondition checking. This is so that the behavior of this
29 # class exactly matches its C counterpart to try and help
29 # class exactly matches its C counterpart to try and help
30 # prevent surprise breakage for anyone that develops against
30 # prevent surprise breakage for anyone that develops against
31 # the pure version.
31 # the pure version.
32 if data and data[-1] != '\n':
32 if data and data[-1] != '\n':
33 raise ValueError('Manifest did not end in a newline.')
33 raise ValueError('Manifest did not end in a newline.')
34 prev = None
34 prev = None
35 for l in data.splitlines():
35 for l in data.splitlines():
36 if prev is not None and prev > l:
36 if prev is not None and prev > l:
37 raise ValueError('Manifest lines not in sorted order.')
37 raise ValueError('Manifest lines not in sorted order.')
38 prev = l
38 prev = l
39 f, n = l.split('\0')
39 f, n = l.split('\0')
40 if len(n) > 40:
40 if len(n) > 40:
41 yield f, revlog.bin(n[:40]), n[40:]
41 yield f, revlog.bin(n[:40]), n[40:]
42 else:
42 else:
43 yield f, revlog.bin(n), ''
43 yield f, revlog.bin(n), ''
44
44
45 def _parsev2(data):
45 def _parsev2(data):
46 metadataend = data.find('\n')
46 metadataend = data.find('\n')
47 # Just ignore metadata for now
47 # Just ignore metadata for now
48 pos = metadataend + 1
48 pos = metadataend + 1
49 prevf = ''
49 prevf = ''
50 while pos < len(data):
50 while pos < len(data):
51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
52 if end == -1:
52 if end == -1:
53 raise ValueError('Manifest ended with incomplete file entry.')
53 raise ValueError('Manifest ended with incomplete file entry.')
54 stemlen = ord(data[pos])
54 stemlen = ord(data[pos])
55 items = data[pos + 1:end].split('\0')
55 items = data[pos + 1:end].split('\0')
56 f = prevf[:stemlen] + items[0]
56 f = prevf[:stemlen] + items[0]
57 if prevf > f:
57 if prevf > f:
58 raise ValueError('Manifest entries not in sorted order.')
58 raise ValueError('Manifest entries not in sorted order.')
59 fl = items[1]
59 fl = items[1]
60 # Just ignore metadata (items[2:] for now)
60 # Just ignore metadata (items[2:] for now)
61 n = data[end + 1:end + 21]
61 n = data[end + 1:end + 21]
62 yield f, n, fl
62 yield f, n, fl
63 pos = end + 22
63 pos = end + 22
64 prevf = f
64 prevf = f
65
65
66 def _parse(data):
66 def _parse(data):
67 """Generates (path, node, flags) tuples from a manifest text"""
67 """Generates (path, node, flags) tuples from a manifest text"""
68 if data.startswith('\0'):
68 if data.startswith('\0'):
69 return iter(_parsev2(data))
69 return iter(_parsev2(data))
70 else:
70 else:
71 return iter(_parsev1(data))
71 return iter(_parsev1(data))
72
72
73 def _text(it, usemanifestv2):
73 def _text(it, usemanifestv2):
74 """Given an iterator over (path, node, flags) tuples, returns a manifest
74 """Given an iterator over (path, node, flags) tuples, returns a manifest
75 text"""
75 text"""
76 if usemanifestv2:
76 if usemanifestv2:
77 return _textv2(it)
77 return _textv2(it)
78 else:
78 else:
79 return _textv1(it)
79 return _textv1(it)
80
80
81 def _textv1(it):
81 def _textv1(it):
82 files = []
82 files = []
83 lines = []
83 lines = []
84 _hex = revlog.hex
84 _hex = revlog.hex
85 for f, n, fl in it:
85 for f, n, fl in it:
86 files.append(f)
86 files.append(f)
87 # if this is changed to support newlines in filenames,
87 # if this is changed to support newlines in filenames,
88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
90
90
91 _checkforbidden(files)
91 _checkforbidden(files)
92 return ''.join(lines)
92 return ''.join(lines)
93
93
94 def _textv2(it):
94 def _textv2(it):
95 files = []
95 files = []
96 lines = ['\0\n']
96 lines = ['\0\n']
97 prevf = ''
97 prevf = ''
98 for f, n, fl in it:
98 for f, n, fl in it:
99 files.append(f)
99 files.append(f)
100 stem = os.path.commonprefix([prevf, f])
100 stem = os.path.commonprefix([prevf, f])
101 stemlen = min(len(stem), 255)
101 stemlen = min(len(stem), 255)
102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
103 prevf = f
103 prevf = f
104 _checkforbidden(files)
104 _checkforbidden(files)
105 return ''.join(lines)
105 return ''.join(lines)
106
106
107 class _lazymanifest(dict):
107 class _lazymanifest(dict):
108 """This is the pure implementation of lazymanifest.
108 """This is the pure implementation of lazymanifest.
109
109
110 It has not been optimized *at all* and is not lazy.
110 It has not been optimized *at all* and is not lazy.
111 """
111 """
112
112
113 def __init__(self, data):
113 def __init__(self, data):
114 dict.__init__(self)
114 dict.__init__(self)
115 for f, n, fl in _parse(data):
115 for f, n, fl in _parse(data):
116 self[f] = n, fl
116 self[f] = n, fl
117
117
118 def __setitem__(self, k, v):
118 def __setitem__(self, k, v):
119 node, flag = v
119 node, flag = v
120 assert node is not None
120 assert node is not None
121 if len(node) > 21:
121 if len(node) > 21:
122 node = node[:21] # match c implementation behavior
122 node = node[:21] # match c implementation behavior
123 dict.__setitem__(self, k, (node, flag))
123 dict.__setitem__(self, k, (node, flag))
124
124
125 def __iter__(self):
125 def __iter__(self):
126 return iter(sorted(dict.keys(self)))
126 return iter(sorted(dict.keys(self)))
127
127
128 def iterkeys(self):
128 def iterkeys(self):
129 return iter(sorted(dict.keys(self)))
129 return iter(sorted(dict.keys(self)))
130
130
131 def iterentries(self):
131 def iterentries(self):
132 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
132 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
133
133
134 def copy(self):
134 def copy(self):
135 c = _lazymanifest('')
135 c = _lazymanifest('')
136 c.update(self)
136 c.update(self)
137 return c
137 return c
138
138
139 def diff(self, m2, clean=False):
139 def diff(self, m2, clean=False):
140 '''Finds changes between the current manifest and m2.'''
140 '''Finds changes between the current manifest and m2.'''
141 diff = {}
141 diff = {}
142
142
143 for fn, e1 in self.iteritems():
143 for fn, e1 in self.iteritems():
144 if fn not in m2:
144 if fn not in m2:
145 diff[fn] = e1, (None, '')
145 diff[fn] = e1, (None, '')
146 else:
146 else:
147 e2 = m2[fn]
147 e2 = m2[fn]
148 if e1 != e2:
148 if e1 != e2:
149 diff[fn] = e1, e2
149 diff[fn] = e1, e2
150 elif clean:
150 elif clean:
151 diff[fn] = None
151 diff[fn] = None
152
152
153 for fn, e2 in m2.iteritems():
153 for fn, e2 in m2.iteritems():
154 if fn not in self:
154 if fn not in self:
155 diff[fn] = (None, ''), e2
155 diff[fn] = (None, ''), e2
156
156
157 return diff
157 return diff
158
158
159 def filtercopy(self, filterfn):
159 def filtercopy(self, filterfn):
160 c = _lazymanifest('')
160 c = _lazymanifest('')
161 for f, n, fl in self.iterentries():
161 for f, n, fl in self.iterentries():
162 if filterfn(f):
162 if filterfn(f):
163 c[f] = n, fl
163 c[f] = n, fl
164 return c
164 return c
165
165
166 def text(self):
166 def text(self):
167 """Get the full data of this manifest as a bytestring."""
167 """Get the full data of this manifest as a bytestring."""
168 return _textv1(self.iterentries())
168 return _textv1(self.iterentries())
169
169
170 try:
170 try:
171 _lazymanifest = parsers.lazymanifest
171 _lazymanifest = parsers.lazymanifest
172 except AttributeError:
172 except AttributeError:
173 pass
173 pass
174
174
175 class manifestdict(object):
175 class manifestdict(object):
176 def __init__(self, data=''):
176 def __init__(self, data=''):
177 if data.startswith('\0'):
177 if data.startswith('\0'):
178 #_lazymanifest can not parse v2
178 #_lazymanifest can not parse v2
179 self._lm = _lazymanifest('')
179 self._lm = _lazymanifest('')
180 for f, n, fl in _parsev2(data):
180 for f, n, fl in _parsev2(data):
181 self._lm[f] = n, fl
181 self._lm[f] = n, fl
182 else:
182 else:
183 self._lm = _lazymanifest(data)
183 self._lm = _lazymanifest(data)
184
184
185 def __getitem__(self, key):
185 def __getitem__(self, key):
186 return self._lm[key][0]
186 return self._lm[key][0]
187
187
188 def find(self, key):
188 def find(self, key):
189 return self._lm[key]
189 return self._lm[key]
190
190
191 def __len__(self):
191 def __len__(self):
192 return len(self._lm)
192 return len(self._lm)
193
193
194 def __setitem__(self, key, node):
194 def __setitem__(self, key, node):
195 self._lm[key] = node, self.flags(key, '')
195 self._lm[key] = node, self.flags(key, '')
196
196
197 def __contains__(self, key):
197 def __contains__(self, key):
198 return key in self._lm
198 return key in self._lm
199
199
200 def __delitem__(self, key):
200 def __delitem__(self, key):
201 del self._lm[key]
201 del self._lm[key]
202
202
203 def __iter__(self):
203 def __iter__(self):
204 return self._lm.__iter__()
204 return self._lm.__iter__()
205
205
206 def iterkeys(self):
206 def iterkeys(self):
207 return self._lm.iterkeys()
207 return self._lm.iterkeys()
208
208
209 def keys(self):
209 def keys(self):
210 return list(self.iterkeys())
210 return list(self.iterkeys())
211
211
212 def filesnotin(self, m2):
212 def filesnotin(self, m2):
213 '''Set of files in this manifest that are not in the other'''
213 '''Set of files in this manifest that are not in the other'''
214 files = set(self)
214 files = set(self)
215 files.difference_update(m2)
215 files.difference_update(m2)
216 return files
216 return files
217
217
218 @propertycache
218 @propertycache
219 def _dirs(self):
219 def _dirs(self):
220 return util.dirs(self)
220 return util.dirs(self)
221
221
222 def dirs(self):
222 def dirs(self):
223 return self._dirs
223 return self._dirs
224
224
225 def hasdir(self, dir):
225 def hasdir(self, dir):
226 return dir in self._dirs
226 return dir in self._dirs
227
227
228 def _filesfastpath(self, match):
228 def _filesfastpath(self, match):
229 '''Checks whether we can correctly and quickly iterate over matcher
229 '''Checks whether we can correctly and quickly iterate over matcher
230 files instead of over manifest files.'''
230 files instead of over manifest files.'''
231 files = match.files()
231 files = match.files()
232 return (len(files) < 100 and (match.isexact() or
232 return (len(files) < 100 and (match.isexact() or
233 (match.prefix() and all(fn in self for fn in files))))
233 (match.prefix() and all(fn in self for fn in files))))
234
234
235 def walk(self, match):
235 def walk(self, match):
236 '''Generates matching file names.
236 '''Generates matching file names.
237
237
238 Equivalent to manifest.matches(match).iterkeys(), but without creating
238 Equivalent to manifest.matches(match).iterkeys(), but without creating
239 an entirely new manifest.
239 an entirely new manifest.
240
240
241 It also reports nonexistent files by marking them bad with match.bad().
241 It also reports nonexistent files by marking them bad with match.bad().
242 '''
242 '''
243 if match.always():
243 if match.always():
244 for f in iter(self):
244 for f in iter(self):
245 yield f
245 yield f
246 return
246 return
247
247
248 fset = set(match.files())
248 fset = set(match.files())
249
249
250 # avoid the entire walk if we're only looking for specific files
250 # avoid the entire walk if we're only looking for specific files
251 if self._filesfastpath(match):
251 if self._filesfastpath(match):
252 for fn in sorted(fset):
252 for fn in sorted(fset):
253 yield fn
253 yield fn
254 return
254 return
255
255
256 for fn in self:
256 for fn in self:
257 if fn in fset:
257 if fn in fset:
258 # specified pattern is the exact name
258 # specified pattern is the exact name
259 fset.remove(fn)
259 fset.remove(fn)
260 if match(fn):
260 if match(fn):
261 yield fn
261 yield fn
262
262
263 # for dirstate.walk, files=['.'] means "walk the whole tree".
263 # for dirstate.walk, files=['.'] means "walk the whole tree".
264 # follow that here, too
264 # follow that here, too
265 fset.discard('.')
265 fset.discard('.')
266
266
267 for fn in sorted(fset):
267 for fn in sorted(fset):
268 if not self.hasdir(fn):
268 if not self.hasdir(fn):
269 match.bad(fn, None)
269 match.bad(fn, None)
270
270
271 def matches(self, match):
271 def matches(self, match):
272 '''generate a new manifest filtered by the match argument'''
272 '''generate a new manifest filtered by the match argument'''
273 if match.always():
273 if match.always():
274 return self.copy()
274 return self.copy()
275
275
276 if self._filesfastpath(match):
276 if self._filesfastpath(match):
277 m = manifestdict()
277 m = manifestdict()
278 lm = self._lm
278 lm = self._lm
279 for fn in match.files():
279 for fn in match.files():
280 if fn in lm:
280 if fn in lm:
281 m._lm[fn] = lm[fn]
281 m._lm[fn] = lm[fn]
282 return m
282 return m
283
283
284 m = manifestdict()
284 m = manifestdict()
285 m._lm = self._lm.filtercopy(match)
285 m._lm = self._lm.filtercopy(match)
286 return m
286 return m
287
287
288 def diff(self, m2, clean=False):
288 def diff(self, m2, clean=False):
289 '''Finds changes between the current manifest and m2.
289 '''Finds changes between the current manifest and m2.
290
290
291 Args:
291 Args:
292 m2: the manifest to which this manifest should be compared.
292 m2: the manifest to which this manifest should be compared.
293 clean: if true, include files unchanged between these manifests
293 clean: if true, include files unchanged between these manifests
294 with a None value in the returned dictionary.
294 with a None value in the returned dictionary.
295
295
296 The result is returned as a dict with filename as key and
296 The result is returned as a dict with filename as key and
297 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
297 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
298 nodeid in the current/other manifest and fl1/fl2 is the flag
298 nodeid in the current/other manifest and fl1/fl2 is the flag
299 in the current/other manifest. Where the file does not exist,
299 in the current/other manifest. Where the file does not exist,
300 the nodeid will be None and the flags will be the empty
300 the nodeid will be None and the flags will be the empty
301 string.
301 string.
302 '''
302 '''
303 return self._lm.diff(m2._lm, clean)
303 return self._lm.diff(m2._lm, clean)
304
304
305 def setflag(self, key, flag):
305 def setflag(self, key, flag):
306 self._lm[key] = self[key], flag
306 self._lm[key] = self[key], flag
307
307
308 def get(self, key, default=None):
308 def get(self, key, default=None):
309 try:
309 try:
310 return self._lm[key][0]
310 return self._lm[key][0]
311 except KeyError:
311 except KeyError:
312 return default
312 return default
313
313
314 def flags(self, key, default=''):
314 def flags(self, key, default=''):
315 try:
315 try:
316 return self._lm[key][1]
316 return self._lm[key][1]
317 except KeyError:
317 except KeyError:
318 return default
318 return default
319
319
320 def copy(self):
320 def copy(self):
321 c = manifestdict()
321 c = manifestdict()
322 c._lm = self._lm.copy()
322 c._lm = self._lm.copy()
323 return c
323 return c
324
324
325 def iteritems(self):
325 def iteritems(self):
326 return (x[:2] for x in self._lm.iterentries())
326 return (x[:2] for x in self._lm.iterentries())
327
327
328 def iterentries(self):
328 def iterentries(self):
329 return self._lm.iterentries()
329 return self._lm.iterentries()
330
330
331 def text(self, usemanifestv2=False):
331 def text(self, usemanifestv2=False):
332 if usemanifestv2:
332 if usemanifestv2:
333 return _textv2(self._lm.iterentries())
333 return _textv2(self._lm.iterentries())
334 else:
334 else:
335 # use (probably) native version for v1
335 # use (probably) native version for v1
336 return self._lm.text()
336 return self._lm.text()
337
337
338 def fastdelta(self, base, changes):
338 def fastdelta(self, base, changes):
339 """Given a base manifest text as an array.array and a list of changes
339 """Given a base manifest text as an array.array and a list of changes
340 relative to that text, compute a delta that can be used by revlog.
340 relative to that text, compute a delta that can be used by revlog.
341 """
341 """
342 delta = []
342 delta = []
343 dstart = None
343 dstart = None
344 dend = None
344 dend = None
345 dline = [""]
345 dline = [""]
346 start = 0
346 start = 0
347 # zero copy representation of base as a buffer
347 # zero copy representation of base as a buffer
348 addbuf = util.buffer(base)
348 addbuf = util.buffer(base)
349
349
350 changes = list(changes)
350 changes = list(changes)
351 if len(changes) < 1000:
351 if len(changes) < 1000:
352 # start with a readonly loop that finds the offset of
352 # start with a readonly loop that finds the offset of
353 # each line and creates the deltas
353 # each line and creates the deltas
354 for f, todelete in changes:
354 for f, todelete in changes:
355 # bs will either be the index of the item or the insert point
355 # bs will either be the index of the item or the insert point
356 start, end = _msearch(addbuf, f, start)
356 start, end = _msearch(addbuf, f, start)
357 if not todelete:
357 if not todelete:
358 h, fl = self._lm[f]
358 h, fl = self._lm[f]
359 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
359 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
360 else:
360 else:
361 if start == end:
361 if start == end:
362 # item we want to delete was not found, error out
362 # item we want to delete was not found, error out
363 raise AssertionError(
363 raise AssertionError(
364 _("failed to remove %s from manifest") % f)
364 _("failed to remove %s from manifest") % f)
365 l = ""
365 l = ""
366 if dstart is not None and dstart <= start and dend >= start:
366 if dstart is not None and dstart <= start and dend >= start:
367 if dend < end:
367 if dend < end:
368 dend = end
368 dend = end
369 if l:
369 if l:
370 dline.append(l)
370 dline.append(l)
371 else:
371 else:
372 if dstart is not None:
372 if dstart is not None:
373 delta.append([dstart, dend, "".join(dline)])
373 delta.append([dstart, dend, "".join(dline)])
374 dstart = start
374 dstart = start
375 dend = end
375 dend = end
376 dline = [l]
376 dline = [l]
377
377
378 if dstart is not None:
378 if dstart is not None:
379 delta.append([dstart, dend, "".join(dline)])
379 delta.append([dstart, dend, "".join(dline)])
380 # apply the delta to the base, and get a delta for addrevision
380 # apply the delta to the base, and get a delta for addrevision
381 deltatext, arraytext = _addlistdelta(base, delta)
381 deltatext, arraytext = _addlistdelta(base, delta)
382 else:
382 else:
383 # For large changes, it's much cheaper to just build the text and
383 # For large changes, it's much cheaper to just build the text and
384 # diff it.
384 # diff it.
385 arraytext = array.array('c', self.text())
385 arraytext = array.array('c', self.text())
386 deltatext = mdiff.textdiff(base, arraytext)
386 deltatext = mdiff.textdiff(base, arraytext)
387
387
388 return arraytext, deltatext
388 return arraytext, deltatext
389
389
390 def _msearch(m, s, lo=0, hi=None):
390 def _msearch(m, s, lo=0, hi=None):
391 '''return a tuple (start, end) that says where to find s within m.
391 '''return a tuple (start, end) that says where to find s within m.
392
392
393 If the string is found m[start:end] are the line containing
393 If the string is found m[start:end] are the line containing
394 that string. If start == end the string was not found and
394 that string. If start == end the string was not found and
395 they indicate the proper sorted insertion point.
395 they indicate the proper sorted insertion point.
396
396
397 m should be a buffer or a string
397 m should be a buffer or a string
398 s is a string'''
398 s is a string'''
399 def advance(i, c):
399 def advance(i, c):
400 while i < lenm and m[i] != c:
400 while i < lenm and m[i] != c:
401 i += 1
401 i += 1
402 return i
402 return i
403 if not s:
403 if not s:
404 return (lo, lo)
404 return (lo, lo)
405 lenm = len(m)
405 lenm = len(m)
406 if not hi:
406 if not hi:
407 hi = lenm
407 hi = lenm
408 while lo < hi:
408 while lo < hi:
409 mid = (lo + hi) // 2
409 mid = (lo + hi) // 2
410 start = mid
410 start = mid
411 while start > 0 and m[start - 1] != '\n':
411 while start > 0 and m[start - 1] != '\n':
412 start -= 1
412 start -= 1
413 end = advance(start, '\0')
413 end = advance(start, '\0')
414 if m[start:end] < s:
414 if m[start:end] < s:
415 # we know that after the null there are 40 bytes of sha1
415 # we know that after the null there are 40 bytes of sha1
416 # this translates to the bisect lo = mid + 1
416 # this translates to the bisect lo = mid + 1
417 lo = advance(end + 40, '\n') + 1
417 lo = advance(end + 40, '\n') + 1
418 else:
418 else:
419 # this translates to the bisect hi = mid
419 # this translates to the bisect hi = mid
420 hi = start
420 hi = start
421 end = advance(lo, '\0')
421 end = advance(lo, '\0')
422 found = m[lo:end]
422 found = m[lo:end]
423 if s == found:
423 if s == found:
424 # we know that after the null there are 40 bytes of sha1
424 # we know that after the null there are 40 bytes of sha1
425 end = advance(end + 40, '\n')
425 end = advance(end + 40, '\n')
426 return (lo, end + 1)
426 return (lo, end + 1)
427 else:
427 else:
428 return (lo, lo)
428 return (lo, lo)
429
429
430 def _checkforbidden(l):
430 def _checkforbidden(l):
431 """Check filenames for illegal characters."""
431 """Check filenames for illegal characters."""
432 for f in l:
432 for f in l:
433 if '\n' in f or '\r' in f:
433 if '\n' in f or '\r' in f:
434 raise error.RevlogError(
434 raise error.RevlogError(
435 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
435 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
436
436
437
437
438 # apply the changes collected during the bisect loop to our addlist
438 # apply the changes collected during the bisect loop to our addlist
439 # return a delta suitable for addrevision
439 # return a delta suitable for addrevision
440 def _addlistdelta(addlist, x):
440 def _addlistdelta(addlist, x):
441 # for large addlist arrays, building a new array is cheaper
441 # for large addlist arrays, building a new array is cheaper
442 # than repeatedly modifying the existing one
442 # than repeatedly modifying the existing one
443 currentposition = 0
443 currentposition = 0
444 newaddlist = array.array('c')
444 newaddlist = array.array('c')
445
445
446 for start, end, content in x:
446 for start, end, content in x:
447 newaddlist += addlist[currentposition:start]
447 newaddlist += addlist[currentposition:start]
448 if content:
448 if content:
449 newaddlist += array.array('c', content)
449 newaddlist += array.array('c', content)
450
450
451 currentposition = end
451 currentposition = end
452
452
453 newaddlist += addlist[currentposition:]
453 newaddlist += addlist[currentposition:]
454
454
455 deltatext = "".join(struct.pack(">lll", start, end, len(content))
455 deltatext = "".join(struct.pack(">lll", start, end, len(content))
456 + content for start, end, content in x)
456 + content for start, end, content in x)
457 return deltatext, newaddlist
457 return deltatext, newaddlist
458
458
459 def _splittopdir(f):
459 def _splittopdir(f):
460 if '/' in f:
460 if '/' in f:
461 dir, subpath = f.split('/', 1)
461 dir, subpath = f.split('/', 1)
462 return dir + '/', subpath
462 return dir + '/', subpath
463 else:
463 else:
464 return '', f
464 return '', f
465
465
466 _noop = lambda s: None
466 _noop = lambda s: None
467
467
468 class treemanifest(object):
468 class treemanifest(object):
469 def __init__(self, dir='', text=''):
469 def __init__(self, dir='', text=''):
470 self._dir = dir
470 self._dir = dir
471 self._node = revlog.nullid
471 self._node = revlog.nullid
472 self._loadfunc = _noop
472 self._loadfunc = _noop
473 self._copyfunc = _noop
473 self._copyfunc = _noop
474 self._dirty = False
474 self._dirty = False
475 self._dirs = {}
475 self._dirs = {}
476 # Using _lazymanifest here is a little slower than plain old dicts
476 # Using _lazymanifest here is a little slower than plain old dicts
477 self._files = {}
477 self._files = {}
478 self._flags = {}
478 self._flags = {}
479 if text:
479 if text:
480 def readsubtree(subdir, subm):
480 def readsubtree(subdir, subm):
481 raise AssertionError('treemanifest constructor only accepts '
481 raise AssertionError('treemanifest constructor only accepts '
482 'flat manifests')
482 'flat manifests')
483 self.parse(text, readsubtree)
483 self.parse(text, readsubtree)
484 self._dirty = True # Mark flat manifest dirty after parsing
484 self._dirty = True # Mark flat manifest dirty after parsing
485
485
486 def _subpath(self, path):
486 def _subpath(self, path):
487 return self._dir + path
487 return self._dir + path
488
488
489 def __len__(self):
489 def __len__(self):
490 self._load()
490 self._load()
491 size = len(self._files)
491 size = len(self._files)
492 for m in self._dirs.values():
492 for m in self._dirs.values():
493 size += m.__len__()
493 size += m.__len__()
494 return size
494 return size
495
495
496 def _isempty(self):
496 def _isempty(self):
497 self._load() # for consistency; already loaded by all callers
497 self._load() # for consistency; already loaded by all callers
498 return (not self._files and (not self._dirs or
498 return (not self._files and (not self._dirs or
499 all(m._isempty() for m in self._dirs.values())))
499 all(m._isempty() for m in self._dirs.values())))
500
500
501 def __repr__(self):
501 def __repr__(self):
502 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
502 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
503 (self._dir, revlog.hex(self._node),
503 (self._dir, revlog.hex(self._node),
504 bool(self._loadfunc is _noop),
504 bool(self._loadfunc is _noop),
505 self._dirty, id(self)))
505 self._dirty, id(self)))
506
506
507 def dir(self):
507 def dir(self):
508 '''The directory that this tree manifest represents, including a
508 '''The directory that this tree manifest represents, including a
509 trailing '/'. Empty string for the repo root directory.'''
509 trailing '/'. Empty string for the repo root directory.'''
510 return self._dir
510 return self._dir
511
511
512 def node(self):
512 def node(self):
513 '''This node of this instance. nullid for unsaved instances. Should
513 '''This node of this instance. nullid for unsaved instances. Should
514 be updated when the instance is read or written from a revlog.
514 be updated when the instance is read or written from a revlog.
515 '''
515 '''
516 assert not self._dirty
516 assert not self._dirty
517 return self._node
517 return self._node
518
518
519 def setnode(self, node):
519 def setnode(self, node):
520 self._node = node
520 self._node = node
521 self._dirty = False
521 self._dirty = False
522
522
523 def iterentries(self):
523 def iterentries(self):
524 self._load()
524 self._load()
525 for p, n in sorted(self._dirs.items() + self._files.items()):
525 for p, n in sorted(self._dirs.items() + self._files.items()):
526 if p in self._files:
526 if p in self._files:
527 yield self._subpath(p), n, self._flags.get(p, '')
527 yield self._subpath(p), n, self._flags.get(p, '')
528 else:
528 else:
529 for x in n.iterentries():
529 for x in n.iterentries():
530 yield x
530 yield x
531
531
532 def iteritems(self):
532 def iteritems(self):
533 self._load()
533 self._load()
534 for p, n in sorted(self._dirs.items() + self._files.items()):
534 for p, n in sorted(self._dirs.items() + self._files.items()):
535 if p in self._files:
535 if p in self._files:
536 yield self._subpath(p), n
536 yield self._subpath(p), n
537 else:
537 else:
538 for f, sn in n.iteritems():
538 for f, sn in n.iteritems():
539 yield f, sn
539 yield f, sn
540
540
541 def iterkeys(self):
541 def iterkeys(self):
542 self._load()
542 self._load()
543 for p in sorted(self._dirs.keys() + self._files.keys()):
543 for p in sorted(self._dirs.keys() + self._files.keys()):
544 if p in self._files:
544 if p in self._files:
545 yield self._subpath(p)
545 yield self._subpath(p)
546 else:
546 else:
547 for f in self._dirs[p].iterkeys():
547 for f in self._dirs[p].iterkeys():
548 yield f
548 yield f
549
549
550 def keys(self):
550 def keys(self):
551 return list(self.iterkeys())
551 return list(self.iterkeys())
552
552
553 def __iter__(self):
553 def __iter__(self):
554 return self.iterkeys()
554 return self.iterkeys()
555
555
556 def __contains__(self, f):
556 def __contains__(self, f):
557 if f is None:
557 if f is None:
558 return False
558 return False
559 self._load()
559 self._load()
560 dir, subpath = _splittopdir(f)
560 dir, subpath = _splittopdir(f)
561 if dir:
561 if dir:
562 if dir not in self._dirs:
562 if dir not in self._dirs:
563 return False
563 return False
564 return self._dirs[dir].__contains__(subpath)
564 return self._dirs[dir].__contains__(subpath)
565 else:
565 else:
566 return f in self._files
566 return f in self._files
567
567
568 def get(self, f, default=None):
568 def get(self, f, default=None):
569 self._load()
569 self._load()
570 dir, subpath = _splittopdir(f)
570 dir, subpath = _splittopdir(f)
571 if dir:
571 if dir:
572 if dir not in self._dirs:
572 if dir not in self._dirs:
573 return default
573 return default
574 return self._dirs[dir].get(subpath, default)
574 return self._dirs[dir].get(subpath, default)
575 else:
575 else:
576 return self._files.get(f, default)
576 return self._files.get(f, default)
577
577
578 def __getitem__(self, f):
578 def __getitem__(self, f):
579 self._load()
579 self._load()
580 dir, subpath = _splittopdir(f)
580 dir, subpath = _splittopdir(f)
581 if dir:
581 if dir:
582 return self._dirs[dir].__getitem__(subpath)
582 return self._dirs[dir].__getitem__(subpath)
583 else:
583 else:
584 return self._files[f]
584 return self._files[f]
585
585
586 def flags(self, f):
586 def flags(self, f):
587 self._load()
587 self._load()
588 dir, subpath = _splittopdir(f)
588 dir, subpath = _splittopdir(f)
589 if dir:
589 if dir:
590 if dir not in self._dirs:
590 if dir not in self._dirs:
591 return ''
591 return ''
592 return self._dirs[dir].flags(subpath)
592 return self._dirs[dir].flags(subpath)
593 else:
593 else:
594 if f in self._dirs:
594 if f in self._dirs:
595 return ''
595 return ''
596 return self._flags.get(f, '')
596 return self._flags.get(f, '')
597
597
598 def find(self, f):
598 def find(self, f):
599 self._load()
599 self._load()
600 dir, subpath = _splittopdir(f)
600 dir, subpath = _splittopdir(f)
601 if dir:
601 if dir:
602 return self._dirs[dir].find(subpath)
602 return self._dirs[dir].find(subpath)
603 else:
603 else:
604 return self._files[f], self._flags.get(f, '')
604 return self._files[f], self._flags.get(f, '')
605
605
606 def __delitem__(self, f):
606 def __delitem__(self, f):
607 self._load()
607 self._load()
608 dir, subpath = _splittopdir(f)
608 dir, subpath = _splittopdir(f)
609 if dir:
609 if dir:
610 self._dirs[dir].__delitem__(subpath)
610 self._dirs[dir].__delitem__(subpath)
611 # If the directory is now empty, remove it
611 # If the directory is now empty, remove it
612 if self._dirs[dir]._isempty():
612 if self._dirs[dir]._isempty():
613 del self._dirs[dir]
613 del self._dirs[dir]
614 else:
614 else:
615 del self._files[f]
615 del self._files[f]
616 if f in self._flags:
616 if f in self._flags:
617 del self._flags[f]
617 del self._flags[f]
618 self._dirty = True
618 self._dirty = True
619
619
620 def __setitem__(self, f, n):
620 def __setitem__(self, f, n):
621 assert n is not None
621 assert n is not None
622 self._load()
622 self._load()
623 dir, subpath = _splittopdir(f)
623 dir, subpath = _splittopdir(f)
624 if dir:
624 if dir:
625 if dir not in self._dirs:
625 if dir not in self._dirs:
626 self._dirs[dir] = treemanifest(self._subpath(dir))
626 self._dirs[dir] = treemanifest(self._subpath(dir))
627 self._dirs[dir].__setitem__(subpath, n)
627 self._dirs[dir].__setitem__(subpath, n)
628 else:
628 else:
629 self._files[f] = n[:21] # to match manifestdict's behavior
629 self._files[f] = n[:21] # to match manifestdict's behavior
630 self._dirty = True
630 self._dirty = True
631
631
632 def _load(self):
632 def _load(self):
633 if self._loadfunc is not _noop:
633 if self._loadfunc is not _noop:
634 lf, self._loadfunc = self._loadfunc, _noop
634 lf, self._loadfunc = self._loadfunc, _noop
635 lf(self)
635 lf(self)
636 elif self._copyfunc is not _noop:
636 elif self._copyfunc is not _noop:
637 cf, self._copyfunc = self._copyfunc, _noop
637 cf, self._copyfunc = self._copyfunc, _noop
638 cf(self)
638 cf(self)
639
639
640 def setflag(self, f, flags):
640 def setflag(self, f, flags):
641 """Set the flags (symlink, executable) for path f."""
641 """Set the flags (symlink, executable) for path f."""
642 self._load()
642 self._load()
643 dir, subpath = _splittopdir(f)
643 dir, subpath = _splittopdir(f)
644 if dir:
644 if dir:
645 if dir not in self._dirs:
645 if dir not in self._dirs:
646 self._dirs[dir] = treemanifest(self._subpath(dir))
646 self._dirs[dir] = treemanifest(self._subpath(dir))
647 self._dirs[dir].setflag(subpath, flags)
647 self._dirs[dir].setflag(subpath, flags)
648 else:
648 else:
649 self._flags[f] = flags
649 self._flags[f] = flags
650 self._dirty = True
650 self._dirty = True
651
651
652 def copy(self):
652 def copy(self):
653 copy = treemanifest(self._dir)
653 copy = treemanifest(self._dir)
654 copy._node = self._node
654 copy._node = self._node
655 copy._dirty = self._dirty
655 copy._dirty = self._dirty
656 if self._copyfunc is _noop:
656 if self._copyfunc is _noop:
657 def _copyfunc(s):
657 def _copyfunc(s):
658 self._load()
658 self._load()
659 for d in self._dirs:
659 for d in self._dirs:
660 s._dirs[d] = self._dirs[d].copy()
660 s._dirs[d] = self._dirs[d].copy()
661 s._files = dict.copy(self._files)
661 s._files = dict.copy(self._files)
662 s._flags = dict.copy(self._flags)
662 s._flags = dict.copy(self._flags)
663 if self._loadfunc is _noop:
663 if self._loadfunc is _noop:
664 _copyfunc(copy)
664 _copyfunc(copy)
665 else:
665 else:
666 copy._copyfunc = _copyfunc
666 copy._copyfunc = _copyfunc
667 else:
667 else:
668 copy._copyfunc = self._copyfunc
668 copy._copyfunc = self._copyfunc
669 return copy
669 return copy
670
670
671 def filesnotin(self, m2):
671 def filesnotin(self, m2):
672 '''Set of files in this manifest that are not in the other'''
672 '''Set of files in this manifest that are not in the other'''
673 files = set()
673 files = set()
674 def _filesnotin(t1, t2):
674 def _filesnotin(t1, t2):
675 if t1._node == t2._node and not t1._dirty and not t2._dirty:
675 if t1._node == t2._node and not t1._dirty and not t2._dirty:
676 return
676 return
677 t1._load()
677 t1._load()
678 t2._load()
678 t2._load()
679 for d, m1 in t1._dirs.iteritems():
679 for d, m1 in t1._dirs.iteritems():
680 if d in t2._dirs:
680 if d in t2._dirs:
681 m2 = t2._dirs[d]
681 m2 = t2._dirs[d]
682 _filesnotin(m1, m2)
682 _filesnotin(m1, m2)
683 else:
683 else:
684 files.update(m1.iterkeys())
684 files.update(m1.iterkeys())
685
685
686 for fn in t1._files.iterkeys():
686 for fn in t1._files.iterkeys():
687 if fn not in t2._files:
687 if fn not in t2._files:
688 files.add(t1._subpath(fn))
688 files.add(t1._subpath(fn))
689
689
690 _filesnotin(self, m2)
690 _filesnotin(self, m2)
691 return files
691 return files
692
692
693 @propertycache
693 @propertycache
694 def _alldirs(self):
694 def _alldirs(self):
695 return util.dirs(self)
695 return util.dirs(self)
696
696
697 def dirs(self):
697 def dirs(self):
698 return self._alldirs
698 return self._alldirs
699
699
700 def hasdir(self, dir):
700 def hasdir(self, dir):
701 self._load()
701 self._load()
702 topdir, subdir = _splittopdir(dir)
702 topdir, subdir = _splittopdir(dir)
703 if topdir:
703 if topdir:
704 if topdir in self._dirs:
704 if topdir in self._dirs:
705 return self._dirs[topdir].hasdir(subdir)
705 return self._dirs[topdir].hasdir(subdir)
706 return False
706 return False
707 return (dir + '/') in self._dirs
707 return (dir + '/') in self._dirs
708
708
709 def walk(self, match):
709 def walk(self, match):
710 '''Generates matching file names.
710 '''Generates matching file names.
711
711
712 Equivalent to manifest.matches(match).iterkeys(), but without creating
712 Equivalent to manifest.matches(match).iterkeys(), but without creating
713 an entirely new manifest.
713 an entirely new manifest.
714
714
715 It also reports nonexistent files by marking them bad with match.bad().
715 It also reports nonexistent files by marking them bad with match.bad().
716 '''
716 '''
717 if match.always():
717 if match.always():
718 for f in iter(self):
718 for f in iter(self):
719 yield f
719 yield f
720 return
720 return
721
721
722 fset = set(match.files())
722 fset = set(match.files())
723
723
724 for fn in self._walk(match):
724 for fn in self._walk(match):
725 if fn in fset:
725 if fn in fset:
726 # specified pattern is the exact name
726 # specified pattern is the exact name
727 fset.remove(fn)
727 fset.remove(fn)
728 yield fn
728 yield fn
729
729
730 # for dirstate.walk, files=['.'] means "walk the whole tree".
730 # for dirstate.walk, files=['.'] means "walk the whole tree".
731 # follow that here, too
731 # follow that here, too
732 fset.discard('.')
732 fset.discard('.')
733
733
734 for fn in sorted(fset):
734 for fn in sorted(fset):
735 if not self.hasdir(fn):
735 if not self.hasdir(fn):
736 match.bad(fn, None)
736 match.bad(fn, None)
737
737
738 def _walk(self, match):
738 def _walk(self, match):
739 '''Recursively generates matching file names for walk().'''
739 '''Recursively generates matching file names for walk().'''
740 if not match.visitdir(self._dir[:-1] or '.'):
740 if not match.visitdir(self._dir[:-1] or '.'):
741 return
741 return
742
742
743 # yield this dir's files and walk its submanifests
743 # yield this dir's files and walk its submanifests
744 self._load()
744 self._load()
745 for p in sorted(self._dirs.keys() + self._files.keys()):
745 for p in sorted(self._dirs.keys() + self._files.keys()):
746 if p in self._files:
746 if p in self._files:
747 fullp = self._subpath(p)
747 fullp = self._subpath(p)
748 if match(fullp):
748 if match(fullp):
749 yield fullp
749 yield fullp
750 else:
750 else:
751 for f in self._dirs[p]._walk(match):
751 for f in self._dirs[p]._walk(match):
752 yield f
752 yield f
753
753
754 def matches(self, match):
754 def matches(self, match):
755 '''generate a new manifest filtered by the match argument'''
755 '''generate a new manifest filtered by the match argument'''
756 if match.always():
756 if match.always():
757 return self.copy()
757 return self.copy()
758
758
759 return self._matches(match)
759 return self._matches(match)
760
760
761 def _matches(self, match):
761 def _matches(self, match):
762 '''recursively generate a new manifest filtered by the match argument.
762 '''recursively generate a new manifest filtered by the match argument.
763 '''
763 '''
764
764
765 visit = match.visitdir(self._dir[:-1] or '.')
765 visit = match.visitdir(self._dir[:-1] or '.')
766 if visit == 'all':
766 if visit == 'all':
767 return self.copy()
767 return self.copy()
768 ret = treemanifest(self._dir)
768 ret = treemanifest(self._dir)
769 if not visit:
769 if not visit:
770 return ret
770 return ret
771
771
772 self._load()
772 self._load()
773 for fn in self._files:
773 for fn in self._files:
774 fullp = self._subpath(fn)
774 fullp = self._subpath(fn)
775 if not match(fullp):
775 if not match(fullp):
776 continue
776 continue
777 ret._files[fn] = self._files[fn]
777 ret._files[fn] = self._files[fn]
778 if fn in self._flags:
778 if fn in self._flags:
779 ret._flags[fn] = self._flags[fn]
779 ret._flags[fn] = self._flags[fn]
780
780
781 for dir, subm in self._dirs.iteritems():
781 for dir, subm in self._dirs.iteritems():
782 m = subm._matches(match)
782 m = subm._matches(match)
783 if not m._isempty():
783 if not m._isempty():
784 ret._dirs[dir] = m
784 ret._dirs[dir] = m
785
785
786 if not ret._isempty():
786 if not ret._isempty():
787 ret._dirty = True
787 ret._dirty = True
788 return ret
788 return ret
789
789
790 def diff(self, m2, clean=False):
790 def diff(self, m2, clean=False):
791 '''Finds changes between the current manifest and m2.
791 '''Finds changes between the current manifest and m2.
792
792
793 Args:
793 Args:
794 m2: the manifest to which this manifest should be compared.
794 m2: the manifest to which this manifest should be compared.
795 clean: if true, include files unchanged between these manifests
795 clean: if true, include files unchanged between these manifests
796 with a None value in the returned dictionary.
796 with a None value in the returned dictionary.
797
797
798 The result is returned as a dict with filename as key and
798 The result is returned as a dict with filename as key and
799 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
799 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
800 nodeid in the current/other manifest and fl1/fl2 is the flag
800 nodeid in the current/other manifest and fl1/fl2 is the flag
801 in the current/other manifest. Where the file does not exist,
801 in the current/other manifest. Where the file does not exist,
802 the nodeid will be None and the flags will be the empty
802 the nodeid will be None and the flags will be the empty
803 string.
803 string.
804 '''
804 '''
805 result = {}
805 result = {}
806 emptytree = treemanifest()
806 emptytree = treemanifest()
807 def _diff(t1, t2):
807 def _diff(t1, t2):
808 if t1._node == t2._node and not t1._dirty and not t2._dirty:
808 if t1._node == t2._node and not t1._dirty and not t2._dirty:
809 return
809 return
810 t1._load()
810 t1._load()
811 t2._load()
811 t2._load()
812 for d, m1 in t1._dirs.iteritems():
812 for d, m1 in t1._dirs.iteritems():
813 m2 = t2._dirs.get(d, emptytree)
813 m2 = t2._dirs.get(d, emptytree)
814 _diff(m1, m2)
814 _diff(m1, m2)
815
815
816 for d, m2 in t2._dirs.iteritems():
816 for d, m2 in t2._dirs.iteritems():
817 if d not in t1._dirs:
817 if d not in t1._dirs:
818 _diff(emptytree, m2)
818 _diff(emptytree, m2)
819
819
820 for fn, n1 in t1._files.iteritems():
820 for fn, n1 in t1._files.iteritems():
821 fl1 = t1._flags.get(fn, '')
821 fl1 = t1._flags.get(fn, '')
822 n2 = t2._files.get(fn, None)
822 n2 = t2._files.get(fn, None)
823 fl2 = t2._flags.get(fn, '')
823 fl2 = t2._flags.get(fn, '')
824 if n1 != n2 or fl1 != fl2:
824 if n1 != n2 or fl1 != fl2:
825 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
825 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
826 elif clean:
826 elif clean:
827 result[t1._subpath(fn)] = None
827 result[t1._subpath(fn)] = None
828
828
829 for fn, n2 in t2._files.iteritems():
829 for fn, n2 in t2._files.iteritems():
830 if fn not in t1._files:
830 if fn not in t1._files:
831 fl2 = t2._flags.get(fn, '')
831 fl2 = t2._flags.get(fn, '')
832 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
832 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
833
833
834 _diff(self, m2)
834 _diff(self, m2)
835 return result
835 return result
836
836
837 def unmodifiedsince(self, m2):
837 def unmodifiedsince(self, m2):
838 return not self._dirty and not m2._dirty and self._node == m2._node
838 return not self._dirty and not m2._dirty and self._node == m2._node
839
839
840 def parse(self, text, readsubtree):
840 def parse(self, text, readsubtree):
841 for f, n, fl in _parse(text):
841 for f, n, fl in _parse(text):
842 if fl == 't':
842 if fl == 't':
843 f = f + '/'
843 f = f + '/'
844 self._dirs[f] = readsubtree(self._subpath(f), n)
844 self._dirs[f] = readsubtree(self._subpath(f), n)
845 elif '/' in f:
845 elif '/' in f:
846 # This is a flat manifest, so use __setitem__ and setflag rather
846 # This is a flat manifest, so use __setitem__ and setflag rather
847 # than assigning directly to _files and _flags, so we can
847 # than assigning directly to _files and _flags, so we can
848 # assign a path in a subdirectory, and to mark dirty (compared
848 # assign a path in a subdirectory, and to mark dirty (compared
849 # to nullid).
849 # to nullid).
850 self[f] = n
850 self[f] = n
851 if fl:
851 if fl:
852 self.setflag(f, fl)
852 self.setflag(f, fl)
853 else:
853 else:
854 # Assigning to _files and _flags avoids marking as dirty,
854 # Assigning to _files and _flags avoids marking as dirty,
855 # and should be a little faster.
855 # and should be a little faster.
856 self._files[f] = n
856 self._files[f] = n
857 if fl:
857 if fl:
858 self._flags[f] = fl
858 self._flags[f] = fl
859
859
860 def text(self, usemanifestv2=False):
860 def text(self, usemanifestv2=False):
861 """Get the full data of this manifest as a bytestring."""
861 """Get the full data of this manifest as a bytestring."""
862 self._load()
862 self._load()
863 return _text(self.iterentries(), usemanifestv2)
863 return _text(self.iterentries(), usemanifestv2)
864
864
865 def dirtext(self, usemanifestv2=False):
865 def dirtext(self, usemanifestv2=False):
866 """Get the full data of this directory as a bytestring. Make sure that
866 """Get the full data of this directory as a bytestring. Make sure that
867 any submanifests have been written first, so their nodeids are correct.
867 any submanifests have been written first, so their nodeids are correct.
868 """
868 """
869 self._load()
869 self._load()
870 flags = self.flags
870 flags = self.flags
871 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
871 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
872 files = [(f, self._files[f], flags(f)) for f in self._files]
872 files = [(f, self._files[f], flags(f)) for f in self._files]
873 return _text(sorted(dirs + files), usemanifestv2)
873 return _text(sorted(dirs + files), usemanifestv2)
874
874
875 def read(self, gettext, readsubtree):
875 def read(self, gettext, readsubtree):
876 def _load_for_read(s):
876 def _load_for_read(s):
877 s.parse(gettext(), readsubtree)
877 s.parse(gettext(), readsubtree)
878 s._dirty = False
878 s._dirty = False
879 self._loadfunc = _load_for_read
879 self._loadfunc = _load_for_read
880
880
881 def writesubtrees(self, m1, m2, writesubtree):
881 def writesubtrees(self, m1, m2, writesubtree):
882 self._load() # for consistency; should never have any effect here
882 self._load() # for consistency; should never have any effect here
883 emptytree = treemanifest()
883 emptytree = treemanifest()
884 for d, subm in self._dirs.iteritems():
884 for d, subm in self._dirs.iteritems():
885 subp1 = m1._dirs.get(d, emptytree)._node
885 subp1 = m1._dirs.get(d, emptytree)._node
886 subp2 = m2._dirs.get(d, emptytree)._node
886 subp2 = m2._dirs.get(d, emptytree)._node
887 if subp1 == revlog.nullid:
887 if subp1 == revlog.nullid:
888 subp1, subp2 = subp2, subp1
888 subp1, subp2 = subp2, subp1
889 writesubtree(subm, subp1, subp2)
889 writesubtree(subm, subp1, subp2)
890
890
891 class manifest(revlog.revlog):
891 class manifest(revlog.revlog):
892 def __init__(self, opener, dir='', dirlogcache=None):
892 def __init__(self, opener, dir='', dirlogcache=None):
893 '''The 'dir' and 'dirlogcache' arguments are for internal use by
893 '''The 'dir' and 'dirlogcache' arguments are for internal use by
894 manifest.manifest only. External users should create a root manifest
894 manifest.manifest only. External users should create a root manifest
895 log with manifest.manifest(opener) and call dirlog() on it.
895 log with manifest.manifest(opener) and call dirlog() on it.
896 '''
896 '''
897 # During normal operations, we expect to deal with not more than four
897 # During normal operations, we expect to deal with not more than four
898 # revs at a time (such as during commit --amend). When rebasing large
898 # revs at a time (such as during commit --amend). When rebasing large
899 # stacks of commits, the number can go up, hence the config knob below.
899 # stacks of commits, the number can go up, hence the config knob below.
900 cachesize = 4
900 cachesize = 4
901 usetreemanifest = False
901 usetreemanifest = False
902 usemanifestv2 = False
902 usemanifestv2 = False
903 opts = getattr(opener, 'options', None)
903 opts = getattr(opener, 'options', None)
904 if opts is not None:
904 if opts is not None:
905 cachesize = opts.get('manifestcachesize', cachesize)
905 cachesize = opts.get('manifestcachesize', cachesize)
906 usetreemanifest = opts.get('treemanifest', usetreemanifest)
906 usetreemanifest = opts.get('treemanifest', usetreemanifest)
907 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
907 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
908 self._mancache = util.lrucachedict(cachesize)
908 self._mancache = util.lrucachedict(cachesize)
909 self._treeinmem = usetreemanifest
909 self._treeinmem = usetreemanifest
910 self._treeondisk = usetreemanifest
910 self._treeondisk = usetreemanifest
911 self._usemanifestv2 = usemanifestv2
911 self._usemanifestv2 = usemanifestv2
912 indexfile = "00manifest.i"
912 indexfile = "00manifest.i"
913 if dir:
913 if dir:
914 assert self._treeondisk
914 assert self._treeondisk
915 if not dir.endswith('/'):
915 if not dir.endswith('/'):
916 dir = dir + '/'
916 dir = dir + '/'
917 indexfile = "meta/" + dir + "00manifest.i"
917 indexfile = "meta/" + dir + "00manifest.i"
918 revlog.revlog.__init__(self, opener, indexfile)
918 revlog.revlog.__init__(self, opener, indexfile)
919 self._dir = dir
919 self._dir = dir
920 # The dirlogcache is kept on the root manifest log
920 # The dirlogcache is kept on the root manifest log
921 if dir:
921 if dir:
922 self._dirlogcache = dirlogcache
922 self._dirlogcache = dirlogcache
923 else:
923 else:
924 self._dirlogcache = {'': self}
924 self._dirlogcache = {'': self}
925
925
926 def _newmanifest(self, data=''):
926 def _newmanifest(self, data=''):
927 if self._treeinmem:
927 if self._treeinmem:
928 return treemanifest(self._dir, data)
928 return treemanifest(self._dir, data)
929 return manifestdict(data)
929 return manifestdict(data)
930
930
931 def dirlog(self, dir):
931 def dirlog(self, dir):
932 if dir:
932 if dir:
933 assert self._treeondisk
933 assert self._treeondisk
934 if dir not in self._dirlogcache:
934 if dir not in self._dirlogcache:
935 self._dirlogcache[dir] = manifest(self.opener, dir,
935 self._dirlogcache[dir] = manifest(self.opener, dir,
936 self._dirlogcache)
936 self._dirlogcache)
937 return self._dirlogcache[dir]
937 return self._dirlogcache[dir]
938
938
939 def _slowreaddelta(self, node):
939 def _slowreaddelta(self, node):
940 r0 = self.deltaparent(self.rev(node))
940 r0 = self.deltaparent(self.rev(node))
941 m0 = self.read(self.node(r0))
941 m0 = self.read(self.node(r0))
942 m1 = self.read(node)
942 m1 = self.read(node)
943 md = self._newmanifest()
943 md = self._newmanifest()
944 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
944 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
945 if n1:
945 if n1:
946 md[f] = n1
946 md[f] = n1
947 if fl1:
947 if fl1:
948 md.setflag(f, fl1)
948 md.setflag(f, fl1)
949 return md
949 return md
950
950
951 def readdelta(self, node):
951 def readdelta(self, node):
952 if self._usemanifestv2 or self._treeondisk:
952 if self._usemanifestv2 or self._treeondisk:
953 return self._slowreaddelta(node)
953 return self._slowreaddelta(node)
954 r = self.rev(node)
954 r = self.rev(node)
955 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
955 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
956 return self._newmanifest(d)
956 return self._newmanifest(d)
957
957
958 def readshallowdelta(self, node):
958 def readshallowdelta(self, node):
959 '''For flat manifests, this is the same as readdelta(). For
959 '''For flat manifests, this is the same as readdelta(). For
960 treemanifests, this will read the delta for this revlog's directory,
960 treemanifests, this will read the delta for this revlog's directory,
961 without recursively reading subdirectory manifests. Instead, any
961 without recursively reading subdirectory manifests. Instead, any
962 subdirectory entry will be reported as it appears in the manifests, i.e.
962 subdirectory entry will be reported as it appears in the manifests, i.e.
963 the subdirectory will be reported among files and distinguished only by
963 the subdirectory will be reported among files and distinguished only by
964 its 't' flag.'''
964 its 't' flag.'''
965 if not self._treeondisk:
965 if not self._treeondisk:
966 return self.readdelta(node)
966 return self.readdelta(node)
967 if self._usemanifestv2:
967 if self._usemanifestv2:
968 raise error.Abort(
968 raise error.Abort(
969 "readshallowdelta() not implemented for manifestv2")
969 "readshallowdelta() not implemented for manifestv2")
970 r = self.rev(node)
970 r = self.rev(node)
971 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
971 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
972 return manifestdict(d)
972 return manifestdict(d)
973
973
974 def readfast(self, node):
974 def readfast(self, node):
975 '''use the faster of readdelta or read
975 '''use the faster of readdelta or read
976
976
977 This will return a manifest which is either only the files
977 This will return a manifest which is either only the files
978 added/modified relative to p1, or all files in the
978 added/modified relative to p1, or all files in the
979 manifest. Which one is returned depends on the codepath used
979 manifest. Which one is returned depends on the codepath used
980 to retrieve the data.
980 to retrieve the data.
981 '''
981 '''
982 r = self.rev(node)
982 r = self.rev(node)
983 deltaparent = self.deltaparent(r)
983 deltaparent = self.deltaparent(r)
984 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
984 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
985 return self.readdelta(node)
985 return self.readdelta(node)
986 return self.read(node)
986 return self.read(node)
987
987
988 def readshallowfast(self, node):
989 '''like readfast(), but calls readshallowdelta() instead of readdelta()
990 '''
991 r = self.rev(node)
992 deltaparent = self.deltaparent(r)
993 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
994 return self.readshallowdelta(node)
995 return self.readshallow(node)
996
988 def read(self, node):
997 def read(self, node):
989 if node == revlog.nullid:
998 if node == revlog.nullid:
990 return self._newmanifest() # don't upset local cache
999 return self._newmanifest() # don't upset local cache
991 if node in self._mancache:
1000 if node in self._mancache:
992 return self._mancache[node][0]
1001 return self._mancache[node][0]
993 if self._treeondisk:
1002 if self._treeondisk:
994 def gettext():
1003 def gettext():
995 return self.revision(node)
1004 return self.revision(node)
996 def readsubtree(dir, subm):
1005 def readsubtree(dir, subm):
997 return self.dirlog(dir).read(subm)
1006 return self.dirlog(dir).read(subm)
998 m = self._newmanifest()
1007 m = self._newmanifest()
999 m.read(gettext, readsubtree)
1008 m.read(gettext, readsubtree)
1000 m.setnode(node)
1009 m.setnode(node)
1001 arraytext = None
1010 arraytext = None
1002 else:
1011 else:
1003 text = self.revision(node)
1012 text = self.revision(node)
1004 m = self._newmanifest(text)
1013 m = self._newmanifest(text)
1005 arraytext = array.array('c', text)
1014 arraytext = array.array('c', text)
1006 self._mancache[node] = (m, arraytext)
1015 self._mancache[node] = (m, arraytext)
1007 return m
1016 return m
1008
1017
1018 def readshallow(self, node):
1019 '''Reads the manifest in this directory. When using flat manifests,
1020 this manifest will generally have files in subdirectories in it. Does
1021 not cache the manifest as the callers generally do not read the same
1022 version twice.'''
1023 return manifestdict(self.revision(node))
1024
1009 def find(self, node, f):
1025 def find(self, node, f):
1010 '''look up entry for a single file efficiently.
1026 '''look up entry for a single file efficiently.
1011 return (node, flags) pair if found, (None, None) if not.'''
1027 return (node, flags) pair if found, (None, None) if not.'''
1012 m = self.read(node)
1028 m = self.read(node)
1013 try:
1029 try:
1014 return m.find(f)
1030 return m.find(f)
1015 except KeyError:
1031 except KeyError:
1016 return None, None
1032 return None, None
1017
1033
1018 def add(self, m, transaction, link, p1, p2, added, removed):
1034 def add(self, m, transaction, link, p1, p2, added, removed):
1019 if (p1 in self._mancache and not self._treeinmem
1035 if (p1 in self._mancache and not self._treeinmem
1020 and not self._usemanifestv2):
1036 and not self._usemanifestv2):
1021 # If our first parent is in the manifest cache, we can
1037 # If our first parent is in the manifest cache, we can
1022 # compute a delta here using properties we know about the
1038 # compute a delta here using properties we know about the
1023 # manifest up-front, which may save time later for the
1039 # manifest up-front, which may save time later for the
1024 # revlog layer.
1040 # revlog layer.
1025
1041
1026 _checkforbidden(added)
1042 _checkforbidden(added)
1027 # combine the changed lists into one sorted iterator
1043 # combine the changed lists into one sorted iterator
1028 work = heapq.merge([(x, False) for x in added],
1044 work = heapq.merge([(x, False) for x in added],
1029 [(x, True) for x in removed])
1045 [(x, True) for x in removed])
1030
1046
1031 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
1047 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
1032 cachedelta = self.rev(p1), deltatext
1048 cachedelta = self.rev(p1), deltatext
1033 text = util.buffer(arraytext)
1049 text = util.buffer(arraytext)
1034 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1050 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1035 else:
1051 else:
1036 # The first parent manifest isn't already loaded, so we'll
1052 # The first parent manifest isn't already loaded, so we'll
1037 # just encode a fulltext of the manifest and pass that
1053 # just encode a fulltext of the manifest and pass that
1038 # through to the revlog layer, and let it handle the delta
1054 # through to the revlog layer, and let it handle the delta
1039 # process.
1055 # process.
1040 if self._treeondisk:
1056 if self._treeondisk:
1041 m1 = self.read(p1)
1057 m1 = self.read(p1)
1042 m2 = self.read(p2)
1058 m2 = self.read(p2)
1043 n = self._addtree(m, transaction, link, m1, m2)
1059 n = self._addtree(m, transaction, link, m1, m2)
1044 arraytext = None
1060 arraytext = None
1045 else:
1061 else:
1046 text = m.text(self._usemanifestv2)
1062 text = m.text(self._usemanifestv2)
1047 n = self.addrevision(text, transaction, link, p1, p2)
1063 n = self.addrevision(text, transaction, link, p1, p2)
1048 arraytext = array.array('c', text)
1064 arraytext = array.array('c', text)
1049
1065
1050 self._mancache[n] = (m, arraytext)
1066 self._mancache[n] = (m, arraytext)
1051
1067
1052 return n
1068 return n
1053
1069
1054 def _addtree(self, m, transaction, link, m1, m2):
1070 def _addtree(self, m, transaction, link, m1, m2):
1055 # If the manifest is unchanged compared to one parent,
1071 # If the manifest is unchanged compared to one parent,
1056 # don't write a new revision
1072 # don't write a new revision
1057 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
1073 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
1058 return m.node()
1074 return m.node()
1059 def writesubtree(subm, subp1, subp2):
1075 def writesubtree(subm, subp1, subp2):
1060 sublog = self.dirlog(subm.dir())
1076 sublog = self.dirlog(subm.dir())
1061 sublog.add(subm, transaction, link, subp1, subp2, None, None)
1077 sublog.add(subm, transaction, link, subp1, subp2, None, None)
1062 m.writesubtrees(m1, m2, writesubtree)
1078 m.writesubtrees(m1, m2, writesubtree)
1063 text = m.dirtext(self._usemanifestv2)
1079 text = m.dirtext(self._usemanifestv2)
1064 # Double-check whether contents are unchanged to one parent
1080 # Double-check whether contents are unchanged to one parent
1065 if text == m1.dirtext(self._usemanifestv2):
1081 if text == m1.dirtext(self._usemanifestv2):
1066 n = m1.node()
1082 n = m1.node()
1067 elif text == m2.dirtext(self._usemanifestv2):
1083 elif text == m2.dirtext(self._usemanifestv2):
1068 n = m2.node()
1084 n = m2.node()
1069 else:
1085 else:
1070 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1086 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1071 # Save nodeid so parent manifest can calculate its nodeid
1087 # Save nodeid so parent manifest can calculate its nodeid
1072 m.setnode(n)
1088 m.setnode(n)
1073 return n
1089 return n
1074
1090
1075 def clearcaches(self):
1091 def clearcaches(self):
1076 super(manifest, self).clearcaches()
1092 super(manifest, self).clearcaches()
1077 self._mancache.clear()
1093 self._mancache.clear()
1078 self._dirlogcache = {'': self}
1094 self._dirlogcache = {'': self}
@@ -1,731 +1,738 b''
1 #require killdaemons
1 #require killdaemons
2
2
3 $ cat << EOF >> $HGRCPATH
3 $ cat << EOF >> $HGRCPATH
4 > [format]
4 > [format]
5 > usegeneraldelta=yes
5 > usegeneraldelta=yes
6 > [ui]
6 > [ui]
7 > ssh=python "$TESTDIR/dummyssh"
7 > ssh=python "$TESTDIR/dummyssh"
8 > EOF
8 > EOF
9
9
10 Set up repo
10 Set up repo
11
11
12 $ hg --config experimental.treemanifest=True init repo
12 $ hg --config experimental.treemanifest=True init repo
13 $ cd repo
13 $ cd repo
14
14
15 Requirements get set on init
15 Requirements get set on init
16
16
17 $ grep treemanifest .hg/requires
17 $ grep treemanifest .hg/requires
18 treemanifest
18 treemanifest
19
19
20 Without directories, looks like any other repo
20 Without directories, looks like any other repo
21
21
22 $ echo 0 > a
22 $ echo 0 > a
23 $ echo 0 > b
23 $ echo 0 > b
24 $ hg ci -Aqm initial
24 $ hg ci -Aqm initial
25 $ hg debugdata -m 0
25 $ hg debugdata -m 0
26 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
26 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
27 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
27 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
28
28
29 Submanifest is stored in separate revlog
29 Submanifest is stored in separate revlog
30
30
31 $ mkdir dir1
31 $ mkdir dir1
32 $ echo 1 > dir1/a
32 $ echo 1 > dir1/a
33 $ echo 1 > dir1/b
33 $ echo 1 > dir1/b
34 $ echo 1 > e
34 $ echo 1 > e
35 $ hg ci -Aqm 'add dir1'
35 $ hg ci -Aqm 'add dir1'
36 $ hg debugdata -m 1
36 $ hg debugdata -m 1
37 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
37 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
38 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
38 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
39 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
39 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
40 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
40 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
41 $ hg debugdata --dir dir1 0
41 $ hg debugdata --dir dir1 0
42 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
42 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
43 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
43 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
44
44
45 Can add nested directories
45 Can add nested directories
46
46
47 $ mkdir dir1/dir1
47 $ mkdir dir1/dir1
48 $ echo 2 > dir1/dir1/a
48 $ echo 2 > dir1/dir1/a
49 $ echo 2 > dir1/dir1/b
49 $ echo 2 > dir1/dir1/b
50 $ mkdir dir1/dir2
50 $ mkdir dir1/dir2
51 $ echo 2 > dir1/dir2/a
51 $ echo 2 > dir1/dir2/a
52 $ echo 2 > dir1/dir2/b
52 $ echo 2 > dir1/dir2/b
53 $ hg ci -Aqm 'add dir1/dir1'
53 $ hg ci -Aqm 'add dir1/dir1'
54 $ hg files -r .
54 $ hg files -r .
55 a
55 a
56 b
56 b
57 dir1/a (glob)
57 dir1/a (glob)
58 dir1/b (glob)
58 dir1/b (glob)
59 dir1/dir1/a (glob)
59 dir1/dir1/a (glob)
60 dir1/dir1/b (glob)
60 dir1/dir1/b (glob)
61 dir1/dir2/a (glob)
61 dir1/dir2/a (glob)
62 dir1/dir2/b (glob)
62 dir1/dir2/b (glob)
63 e
63 e
64
64
65 Revision is not created for unchanged directory
65 Revision is not created for unchanged directory
66
66
67 $ mkdir dir2
67 $ mkdir dir2
68 $ echo 3 > dir2/a
68 $ echo 3 > dir2/a
69 $ hg add dir2
69 $ hg add dir2
70 adding dir2/a (glob)
70 adding dir2/a (glob)
71 $ hg debugindex --dir dir1 > before
71 $ hg debugindex --dir dir1 > before
72 $ hg ci -qm 'add dir2'
72 $ hg ci -qm 'add dir2'
73 $ hg debugindex --dir dir1 > after
73 $ hg debugindex --dir dir1 > after
74 $ diff before after
74 $ diff before after
75 $ rm before after
75 $ rm before after
76
76
77 Removing directory does not create an revlog entry
77 Removing directory does not create an revlog entry
78
78
79 $ hg rm dir1/dir1
79 $ hg rm dir1/dir1
80 removing dir1/dir1/a (glob)
80 removing dir1/dir1/a (glob)
81 removing dir1/dir1/b (glob)
81 removing dir1/dir1/b (glob)
82 $ hg debugindex --dir dir1/dir1 > before
82 $ hg debugindex --dir dir1/dir1 > before
83 $ hg ci -qm 'remove dir1/dir1'
83 $ hg ci -qm 'remove dir1/dir1'
84 $ hg debugindex --dir dir1/dir1 > after
84 $ hg debugindex --dir dir1/dir1 > after
85 $ diff before after
85 $ diff before after
86 $ rm before after
86 $ rm before after
87
87
88 Check that hg files (calls treemanifest.walk()) works
88 Check that hg files (calls treemanifest.walk()) works
89 without loading all directory revlogs
89 without loading all directory revlogs
90
90
91 $ hg co 'desc("add dir2")'
91 $ hg co 'desc("add dir2")'
92 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
92 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
93 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
93 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
94 $ hg files -r . dir1
94 $ hg files -r . dir1
95 dir1/a (glob)
95 dir1/a (glob)
96 dir1/b (glob)
96 dir1/b (glob)
97 dir1/dir1/a (glob)
97 dir1/dir1/a (glob)
98 dir1/dir1/b (glob)
98 dir1/dir1/b (glob)
99 dir1/dir2/a (glob)
99 dir1/dir2/a (glob)
100 dir1/dir2/b (glob)
100 dir1/dir2/b (glob)
101
101
102 Check that status between revisions works (calls treemanifest.matches())
102 Check that status between revisions works (calls treemanifest.matches())
103 without loading all directory revlogs
103 without loading all directory revlogs
104
104
105 $ hg status --rev 'desc("add dir1")' --rev . dir1
105 $ hg status --rev 'desc("add dir1")' --rev . dir1
106 A dir1/dir1/a
106 A dir1/dir1/a
107 A dir1/dir1/b
107 A dir1/dir1/b
108 A dir1/dir2/a
108 A dir1/dir2/a
109 A dir1/dir2/b
109 A dir1/dir2/b
110 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
110 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
111
111
112 Merge creates 2-parent revision of directory revlog
112 Merge creates 2-parent revision of directory revlog
113
113
114 $ echo 5 > dir1/a
114 $ echo 5 > dir1/a
115 $ hg ci -Aqm 'modify dir1/a'
115 $ hg ci -Aqm 'modify dir1/a'
116 $ hg co '.^'
116 $ hg co '.^'
117 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
117 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
118 $ echo 6 > dir1/b
118 $ echo 6 > dir1/b
119 $ hg ci -Aqm 'modify dir1/b'
119 $ hg ci -Aqm 'modify dir1/b'
120 $ hg merge 'desc("modify dir1/a")'
120 $ hg merge 'desc("modify dir1/a")'
121 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
121 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
122 (branch merge, don't forget to commit)
122 (branch merge, don't forget to commit)
123 $ hg ci -m 'conflict-free merge involving dir1/'
123 $ hg ci -m 'conflict-free merge involving dir1/'
124 $ cat dir1/a
124 $ cat dir1/a
125 5
125 5
126 $ cat dir1/b
126 $ cat dir1/b
127 6
127 6
128 $ hg debugindex --dir dir1
128 $ hg debugindex --dir dir1
129 rev offset length delta linkrev nodeid p1 p2
129 rev offset length delta linkrev nodeid p1 p2
130 0 0 54 -1 1 8b3ffd73f901 000000000000 000000000000
130 0 0 54 -1 1 8b3ffd73f901 000000000000 000000000000
131 1 54 68 0 2 68e9d057c5a8 8b3ffd73f901 000000000000
131 1 54 68 0 2 68e9d057c5a8 8b3ffd73f901 000000000000
132 2 122 12 1 4 4698198d2624 68e9d057c5a8 000000000000
132 2 122 12 1 4 4698198d2624 68e9d057c5a8 000000000000
133 3 134 55 1 5 44844058ccce 68e9d057c5a8 000000000000
133 3 134 55 1 5 44844058ccce 68e9d057c5a8 000000000000
134 4 189 55 1 6 bf3d9b744927 68e9d057c5a8 000000000000
134 4 189 55 1 6 bf3d9b744927 68e9d057c5a8 000000000000
135 5 244 55 4 7 dde7c0af2a03 bf3d9b744927 44844058ccce
135 5 244 55 4 7 dde7c0af2a03 bf3d9b744927 44844058ccce
136
136
137 Merge keeping directory from parent 1 does not create revlog entry. (Note that
137 Merge keeping directory from parent 1 does not create revlog entry. (Note that
138 dir1's manifest does change, but only because dir1/a's filelog changes.)
138 dir1's manifest does change, but only because dir1/a's filelog changes.)
139
139
140 $ hg co 'desc("add dir2")'
140 $ hg co 'desc("add dir2")'
141 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
141 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
142 $ echo 8 > dir2/a
142 $ echo 8 > dir2/a
143 $ hg ci -m 'modify dir2/a'
143 $ hg ci -m 'modify dir2/a'
144 created new head
144 created new head
145
145
146 $ hg debugindex --dir dir2 > before
146 $ hg debugindex --dir dir2 > before
147 $ hg merge 'desc("modify dir1/a")'
147 $ hg merge 'desc("modify dir1/a")'
148 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
148 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
149 (branch merge, don't forget to commit)
149 (branch merge, don't forget to commit)
150 $ hg revert -r 'desc("modify dir2/a")' .
150 $ hg revert -r 'desc("modify dir2/a")' .
151 reverting dir1/a (glob)
151 reverting dir1/a (glob)
152 $ hg ci -m 'merge, keeping parent 1'
152 $ hg ci -m 'merge, keeping parent 1'
153 $ hg debugindex --dir dir2 > after
153 $ hg debugindex --dir dir2 > after
154 $ diff before after
154 $ diff before after
155 $ rm before after
155 $ rm before after
156
156
157 Merge keeping directory from parent 2 does not create revlog entry. (Note that
157 Merge keeping directory from parent 2 does not create revlog entry. (Note that
158 dir2's manifest does change, but only because dir2/a's filelog changes.)
158 dir2's manifest does change, but only because dir2/a's filelog changes.)
159
159
160 $ hg co 'desc("modify dir2/a")'
160 $ hg co 'desc("modify dir2/a")'
161 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
161 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
162 $ hg debugindex --dir dir1 > before
162 $ hg debugindex --dir dir1 > before
163 $ hg merge 'desc("modify dir1/a")'
163 $ hg merge 'desc("modify dir1/a")'
164 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
164 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
165 (branch merge, don't forget to commit)
165 (branch merge, don't forget to commit)
166 $ hg revert -r 'desc("modify dir1/a")' .
166 $ hg revert -r 'desc("modify dir1/a")' .
167 reverting dir2/a (glob)
167 reverting dir2/a (glob)
168 $ hg ci -m 'merge, keeping parent 2'
168 $ hg ci -m 'merge, keeping parent 2'
169 created new head
169 created new head
170 $ hg debugindex --dir dir1 > after
170 $ hg debugindex --dir dir1 > after
171 $ diff before after
171 $ diff before after
172 $ rm before after
172 $ rm before after
173
173
174 Create flat source repo for tests with mixed flat/tree manifests
174 Create flat source repo for tests with mixed flat/tree manifests
175
175
176 $ cd ..
176 $ cd ..
177 $ hg init repo-flat
177 $ hg init repo-flat
178 $ cd repo-flat
178 $ cd repo-flat
179
179
180 Create a few commits with flat manifest
180 Create a few commits with flat manifest
181
181
182 $ echo 0 > a
182 $ echo 0 > a
183 $ echo 0 > b
183 $ echo 0 > b
184 $ echo 0 > e
184 $ echo 0 > e
185 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
185 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
186 > do
186 > do
187 > mkdir $d
187 > mkdir $d
188 > echo 0 > $d/a
188 > echo 0 > $d/a
189 > echo 0 > $d/b
189 > echo 0 > $d/b
190 > done
190 > done
191 $ hg ci -Aqm initial
191 $ hg ci -Aqm initial
192
192
193 $ echo 1 > a
193 $ echo 1 > a
194 $ echo 1 > dir1/a
194 $ echo 1 > dir1/a
195 $ echo 1 > dir1/dir1/a
195 $ echo 1 > dir1/dir1/a
196 $ hg ci -Aqm 'modify on branch 1'
196 $ hg ci -Aqm 'modify on branch 1'
197
197
198 $ hg co 0
198 $ hg co 0
199 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
199 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
200 $ echo 2 > b
200 $ echo 2 > b
201 $ echo 2 > dir1/b
201 $ echo 2 > dir1/b
202 $ echo 2 > dir1/dir1/b
202 $ echo 2 > dir1/dir1/b
203 $ hg ci -Aqm 'modify on branch 2'
203 $ hg ci -Aqm 'modify on branch 2'
204
204
205 $ hg merge 1
205 $ hg merge 1
206 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
206 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
207 (branch merge, don't forget to commit)
207 (branch merge, don't forget to commit)
208 $ hg ci -m 'merge of flat manifests to new flat manifest'
208 $ hg ci -m 'merge of flat manifests to new flat manifest'
209
209
210 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
210 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
211 $ cat hg.pid >> $DAEMON_PIDS
211 $ cat hg.pid >> $DAEMON_PIDS
212
212
213 Create clone with tree manifests enabled
213 Create clone with tree manifests enabled
214
214
215 $ cd ..
215 $ cd ..
216 $ hg clone --config experimental.treemanifest=1 \
216 $ hg clone --config experimental.treemanifest=1 \
217 > http://localhost:$HGPORT repo-mixed -r 1
217 > http://localhost:$HGPORT repo-mixed -r 1
218 adding changesets
218 adding changesets
219 adding manifests
219 adding manifests
220 adding file changes
220 adding file changes
221 added 2 changesets with 14 changes to 11 files
221 added 2 changesets with 14 changes to 11 files
222 updating to branch default
222 updating to branch default
223 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
223 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
224 $ cd repo-mixed
224 $ cd repo-mixed
225 $ test -d .hg/store/meta
225 $ test -d .hg/store/meta
226 [1]
226 [1]
227 $ grep treemanifest .hg/requires
227 $ grep treemanifest .hg/requires
228 treemanifest
228 treemanifest
229
229
230 Should be possible to push updates from flat to tree manifest repo
230 Should be possible to push updates from flat to tree manifest repo
231
231
232 $ hg -R ../repo-flat push ssh://user@dummy/repo-mixed
232 $ hg -R ../repo-flat push ssh://user@dummy/repo-mixed
233 pushing to ssh://user@dummy/repo-mixed
233 pushing to ssh://user@dummy/repo-mixed
234 searching for changes
234 searching for changes
235 remote: adding changesets
235 remote: adding changesets
236 remote: adding manifests
236 remote: adding manifests
237 remote: adding file changes
237 remote: adding file changes
238 remote: added 2 changesets with 3 changes to 3 files
238 remote: added 2 changesets with 3 changes to 3 files
239
239
240 Commit should store revlog per directory
240 Commit should store revlog per directory
241
241
242 $ hg co 1
242 $ hg co 1
243 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
243 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
244 $ echo 3 > a
244 $ echo 3 > a
245 $ echo 3 > dir1/a
245 $ echo 3 > dir1/a
246 $ echo 3 > dir1/dir1/a
246 $ echo 3 > dir1/dir1/a
247 $ hg ci -m 'first tree'
247 $ hg ci -m 'first tree'
248 created new head
248 created new head
249 $ find .hg/store/meta | sort
249 $ find .hg/store/meta | sort
250 .hg/store/meta
250 .hg/store/meta
251 .hg/store/meta/dir1
251 .hg/store/meta/dir1
252 .hg/store/meta/dir1/00manifest.i
252 .hg/store/meta/dir1/00manifest.i
253 .hg/store/meta/dir1/dir1
253 .hg/store/meta/dir1/dir1
254 .hg/store/meta/dir1/dir1/00manifest.i
254 .hg/store/meta/dir1/dir1/00manifest.i
255 .hg/store/meta/dir1/dir2
255 .hg/store/meta/dir1/dir2
256 .hg/store/meta/dir1/dir2/00manifest.i
256 .hg/store/meta/dir1/dir2/00manifest.i
257 .hg/store/meta/dir2
257 .hg/store/meta/dir2
258 .hg/store/meta/dir2/00manifest.i
258 .hg/store/meta/dir2/00manifest.i
259
259
260 Merge of two trees
260 Merge of two trees
261
261
262 $ hg co 2
262 $ hg co 2
263 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
263 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
264 $ hg merge 1
264 $ hg merge 1
265 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
266 (branch merge, don't forget to commit)
266 (branch merge, don't forget to commit)
267 $ hg ci -m 'merge of flat manifests to new tree manifest'
267 $ hg ci -m 'merge of flat manifests to new tree manifest'
268 created new head
268 created new head
269 $ hg diff -r 3
269 $ hg diff -r 3
270
270
271 Parent of tree root manifest should be flat manifest, and two for merge
271 Parent of tree root manifest should be flat manifest, and two for merge
272
272
273 $ hg debugindex -m
273 $ hg debugindex -m
274 rev offset length delta linkrev nodeid p1 p2
274 rev offset length delta linkrev nodeid p1 p2
275 0 0 80 -1 0 40536115ed9e 000000000000 000000000000
275 0 0 80 -1 0 40536115ed9e 000000000000 000000000000
276 1 80 83 0 1 f3376063c255 40536115ed9e 000000000000
276 1 80 83 0 1 f3376063c255 40536115ed9e 000000000000
277 2 163 89 0 2 5d9b9da231a2 40536115ed9e 000000000000
277 2 163 89 0 2 5d9b9da231a2 40536115ed9e 000000000000
278 3 252 83 2 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
278 3 252 83 2 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
279 4 335 124 1 4 51e32a8c60ee f3376063c255 000000000000
279 4 335 124 1 4 51e32a8c60ee f3376063c255 000000000000
280 5 459 126 2 5 cc5baa78b230 5d9b9da231a2 f3376063c255
280 5 459 126 2 5 cc5baa78b230 5d9b9da231a2 f3376063c255
281
281
282
282
283 Status across flat/tree boundary should work
283 Status across flat/tree boundary should work
284
284
285 $ hg status --rev '.^' --rev .
285 $ hg status --rev '.^' --rev .
286 M a
286 M a
287 M dir1/a
287 M dir1/a
288 M dir1/dir1/a
288 M dir1/dir1/a
289
289
290
290
291 Turning off treemanifest config has no effect
291 Turning off treemanifest config has no effect
292
292
293 $ hg debugindex --dir dir1
293 $ hg debugindex --dir dir1
294 rev offset length delta linkrev nodeid p1 p2
294 rev offset length delta linkrev nodeid p1 p2
295 0 0 127 -1 4 064927a0648a 000000000000 000000000000
295 0 0 127 -1 4 064927a0648a 000000000000 000000000000
296 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
296 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
297 $ echo 2 > dir1/a
297 $ echo 2 > dir1/a
298 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
298 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
299 $ hg debugindex --dir dir1
299 $ hg debugindex --dir dir1
300 rev offset length delta linkrev nodeid p1 p2
300 rev offset length delta linkrev nodeid p1 p2
301 0 0 127 -1 4 064927a0648a 000000000000 000000000000
301 0 0 127 -1 4 064927a0648a 000000000000 000000000000
302 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
302 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
303 2 238 55 1 6 5b16163a30c6 25ecb8cb8618 000000000000
303 2 238 55 1 6 5b16163a30c6 25ecb8cb8618 000000000000
304
304
305 Stripping and recovering changes should work
305 Stripping and recovering changes should work
306
306
307 $ hg st --change tip
307 $ hg st --change tip
308 M dir1/a
308 M dir1/a
309 $ hg --config extensions.strip= strip tip
309 $ hg --config extensions.strip= strip tip
310 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
310 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
311 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg (glob)
311 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg (glob)
312 $ hg unbundle -q .hg/strip-backup/*
312 $ hg unbundle -q .hg/strip-backup/*
313 $ hg st --change tip
313 $ hg st --change tip
314 M dir1/a
314 M dir1/a
315
315
316 Shelving and unshelving should work
316 Shelving and unshelving should work
317
317
318 $ echo foo >> dir1/a
318 $ echo foo >> dir1/a
319 $ hg --config extensions.shelve= shelve
319 $ hg --config extensions.shelve= shelve
320 shelved as default
320 shelved as default
321 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
321 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
322 $ hg --config extensions.shelve= unshelve
322 $ hg --config extensions.shelve= unshelve
323 unshelving change 'default'
323 unshelving change 'default'
324 $ hg diff --nodates
324 $ hg diff --nodates
325 diff -r 708a273da119 dir1/a
325 diff -r 708a273da119 dir1/a
326 --- a/dir1/a
326 --- a/dir1/a
327 +++ b/dir1/a
327 +++ b/dir1/a
328 @@ -1,1 +1,2 @@
328 @@ -1,1 +1,2 @@
329 1
329 1
330 +foo
330 +foo
331
331
332 Pushing from treemanifest repo to an empty repo makes that a treemanifest repo
332 Pushing from treemanifest repo to an empty repo makes that a treemanifest repo
333
333
334 $ cd ..
334 $ cd ..
335 $ hg init empty-repo
335 $ hg init empty-repo
336 $ cat << EOF >> empty-repo/.hg/hgrc
336 $ cat << EOF >> empty-repo/.hg/hgrc
337 > [experimental]
337 > [experimental]
338 > changegroup3=yes
338 > changegroup3=yes
339 > EOF
339 > EOF
340 $ grep treemanifest empty-repo/.hg/requires
340 $ grep treemanifest empty-repo/.hg/requires
341 [1]
341 [1]
342 $ hg push -R repo -r 0 empty-repo
342 $ hg push -R repo -r 0 empty-repo
343 pushing to empty-repo
343 pushing to empty-repo
344 searching for changes
344 searching for changes
345 adding changesets
345 adding changesets
346 adding manifests
346 adding manifests
347 adding file changes
347 adding file changes
348 added 1 changesets with 2 changes to 2 files
348 added 1 changesets with 2 changes to 2 files
349 $ grep treemanifest empty-repo/.hg/requires
349 $ grep treemanifest empty-repo/.hg/requires
350 treemanifest
350 treemanifest
351
351
352 Pushing to an empty repo works
352 Pushing to an empty repo works
353
353
354 $ hg --config experimental.treemanifest=1 init clone
354 $ hg --config experimental.treemanifest=1 init clone
355 $ grep treemanifest clone/.hg/requires
355 $ grep treemanifest clone/.hg/requires
356 treemanifest
356 treemanifest
357 $ hg push -R repo clone
357 $ hg push -R repo clone
358 pushing to clone
358 pushing to clone
359 searching for changes
359 searching for changes
360 adding changesets
360 adding changesets
361 adding manifests
361 adding manifests
362 adding file changes
362 adding file changes
363 added 11 changesets with 15 changes to 10 files (+3 heads)
363 added 11 changesets with 15 changes to 10 files (+3 heads)
364 $ grep treemanifest clone/.hg/requires
364 $ grep treemanifest clone/.hg/requires
365 treemanifest
365 treemanifest
366 $ hg -R clone verify
367 checking changesets
368 checking manifests
369 checking directory manifests
370 crosschecking files in changesets and manifests
371 checking files
372 10 files, 11 changesets, 15 total revisions
366
373
367 Create deeper repo with tree manifests.
374 Create deeper repo with tree manifests.
368
375
369 $ hg --config experimental.treemanifest=True init deeprepo
376 $ hg --config experimental.treemanifest=True init deeprepo
370 $ cd deeprepo
377 $ cd deeprepo
371
378
372 $ mkdir .A
379 $ mkdir .A
373 $ mkdir b
380 $ mkdir b
374 $ mkdir b/bar
381 $ mkdir b/bar
375 $ mkdir b/bar/orange
382 $ mkdir b/bar/orange
376 $ mkdir b/bar/orange/fly
383 $ mkdir b/bar/orange/fly
377 $ mkdir b/foo
384 $ mkdir b/foo
378 $ mkdir b/foo/apple
385 $ mkdir b/foo/apple
379 $ mkdir b/foo/apple/bees
386 $ mkdir b/foo/apple/bees
380
387
381 $ touch .A/one.txt
388 $ touch .A/one.txt
382 $ touch .A/two.txt
389 $ touch .A/two.txt
383 $ touch b/bar/fruits.txt
390 $ touch b/bar/fruits.txt
384 $ touch b/bar/orange/fly/gnat.py
391 $ touch b/bar/orange/fly/gnat.py
385 $ touch b/bar/orange/fly/housefly.txt
392 $ touch b/bar/orange/fly/housefly.txt
386 $ touch b/foo/apple/bees/flower.py
393 $ touch b/foo/apple/bees/flower.py
387 $ touch c.txt
394 $ touch c.txt
388 $ touch d.py
395 $ touch d.py
389
396
390 $ hg ci -Aqm 'initial'
397 $ hg ci -Aqm 'initial'
391
398
392 We'll see that visitdir works by removing some treemanifest revlogs and running
399 We'll see that visitdir works by removing some treemanifest revlogs and running
393 the files command with various parameters.
400 the files command with various parameters.
394
401
395 Test files from the root.
402 Test files from the root.
396
403
397 $ hg files -r .
404 $ hg files -r .
398 .A/one.txt (glob)
405 .A/one.txt (glob)
399 .A/two.txt (glob)
406 .A/two.txt (glob)
400 b/bar/fruits.txt (glob)
407 b/bar/fruits.txt (glob)
401 b/bar/orange/fly/gnat.py (glob)
408 b/bar/orange/fly/gnat.py (glob)
402 b/bar/orange/fly/housefly.txt (glob)
409 b/bar/orange/fly/housefly.txt (glob)
403 b/foo/apple/bees/flower.py (glob)
410 b/foo/apple/bees/flower.py (glob)
404 c.txt
411 c.txt
405 d.py
412 d.py
406
413
407 Excludes with a glob should not exclude everything from the glob's root
414 Excludes with a glob should not exclude everything from the glob's root
408
415
409 $ hg files -r . -X 'b/fo?' b
416 $ hg files -r . -X 'b/fo?' b
410 b/bar/fruits.txt (glob)
417 b/bar/fruits.txt (glob)
411 b/bar/orange/fly/gnat.py (glob)
418 b/bar/orange/fly/gnat.py (glob)
412 b/bar/orange/fly/housefly.txt (glob)
419 b/bar/orange/fly/housefly.txt (glob)
413 $ cp -r .hg/store .hg/store-copy
420 $ cp -r .hg/store .hg/store-copy
414
421
415 Test files for a subdirectory.
422 Test files for a subdirectory.
416
423
417 $ rm -r .hg/store/meta/~2e_a
424 $ rm -r .hg/store/meta/~2e_a
418 $ hg files -r . b
425 $ hg files -r . b
419 b/bar/fruits.txt (glob)
426 b/bar/fruits.txt (glob)
420 b/bar/orange/fly/gnat.py (glob)
427 b/bar/orange/fly/gnat.py (glob)
421 b/bar/orange/fly/housefly.txt (glob)
428 b/bar/orange/fly/housefly.txt (glob)
422 b/foo/apple/bees/flower.py (glob)
429 b/foo/apple/bees/flower.py (glob)
423 $ cp -r .hg/store-copy/. .hg/store
430 $ cp -r .hg/store-copy/. .hg/store
424
431
425 Test files with just includes and excludes.
432 Test files with just includes and excludes.
426
433
427 $ rm -r .hg/store/meta/~2e_a
434 $ rm -r .hg/store/meta/~2e_a
428 $ rm -r .hg/store/meta/b/bar/orange/fly
435 $ rm -r .hg/store/meta/b/bar/orange/fly
429 $ rm -r .hg/store/meta/b/foo/apple/bees
436 $ rm -r .hg/store/meta/b/foo/apple/bees
430 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
437 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
431 b/bar/fruits.txt (glob)
438 b/bar/fruits.txt (glob)
432 $ cp -r .hg/store-copy/. .hg/store
439 $ cp -r .hg/store-copy/. .hg/store
433
440
434 Test files for a subdirectory, excluding a directory within it.
441 Test files for a subdirectory, excluding a directory within it.
435
442
436 $ rm -r .hg/store/meta/~2e_a
443 $ rm -r .hg/store/meta/~2e_a
437 $ rm -r .hg/store/meta/b/foo
444 $ rm -r .hg/store/meta/b/foo
438 $ hg files -r . -X path:b/foo b
445 $ hg files -r . -X path:b/foo b
439 b/bar/fruits.txt (glob)
446 b/bar/fruits.txt (glob)
440 b/bar/orange/fly/gnat.py (glob)
447 b/bar/orange/fly/gnat.py (glob)
441 b/bar/orange/fly/housefly.txt (glob)
448 b/bar/orange/fly/housefly.txt (glob)
442 $ cp -r .hg/store-copy/. .hg/store
449 $ cp -r .hg/store-copy/. .hg/store
443
450
444 Test files for a sub directory, including only a directory within it, and
451 Test files for a sub directory, including only a directory within it, and
445 including an unrelated directory.
452 including an unrelated directory.
446
453
447 $ rm -r .hg/store/meta/~2e_a
454 $ rm -r .hg/store/meta/~2e_a
448 $ rm -r .hg/store/meta/b/foo
455 $ rm -r .hg/store/meta/b/foo
449 $ hg files -r . -I path:b/bar/orange -I path:a b
456 $ hg files -r . -I path:b/bar/orange -I path:a b
450 b/bar/orange/fly/gnat.py (glob)
457 b/bar/orange/fly/gnat.py (glob)
451 b/bar/orange/fly/housefly.txt (glob)
458 b/bar/orange/fly/housefly.txt (glob)
452 $ cp -r .hg/store-copy/. .hg/store
459 $ cp -r .hg/store-copy/. .hg/store
453
460
454 Test files for a pattern, including a directory, and excluding a directory
461 Test files for a pattern, including a directory, and excluding a directory
455 within that.
462 within that.
456
463
457 $ rm -r .hg/store/meta/~2e_a
464 $ rm -r .hg/store/meta/~2e_a
458 $ rm -r .hg/store/meta/b/foo
465 $ rm -r .hg/store/meta/b/foo
459 $ rm -r .hg/store/meta/b/bar/orange
466 $ rm -r .hg/store/meta/b/bar/orange
460 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
467 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
461 b/bar/fruits.txt (glob)
468 b/bar/fruits.txt (glob)
462 $ cp -r .hg/store-copy/. .hg/store
469 $ cp -r .hg/store-copy/. .hg/store
463
470
464 Add some more changes to the deep repo
471 Add some more changes to the deep repo
465 $ echo narf >> b/bar/fruits.txt
472 $ echo narf >> b/bar/fruits.txt
466 $ hg ci -m narf
473 $ hg ci -m narf
467 $ echo troz >> b/bar/orange/fly/gnat.py
474 $ echo troz >> b/bar/orange/fly/gnat.py
468 $ hg ci -m troz
475 $ hg ci -m troz
469
476
470 Verify works
477 Verify works
471 $ hg verify
478 $ hg verify
472 checking changesets
479 checking changesets
473 checking manifests
480 checking manifests
474 checking directory manifests
481 checking directory manifests
475 crosschecking files in changesets and manifests
482 crosschecking files in changesets and manifests
476 checking files
483 checking files
477 8 files, 3 changesets, 10 total revisions
484 8 files, 3 changesets, 10 total revisions
478
485
479 Dirlogs are included in fncache
486 Dirlogs are included in fncache
480 $ grep meta/.A/00manifest.i .hg/store/fncache
487 $ grep meta/.A/00manifest.i .hg/store/fncache
481 meta/.A/00manifest.i
488 meta/.A/00manifest.i
482
489
483 Rebuilt fncache includes dirlogs
490 Rebuilt fncache includes dirlogs
484 $ rm .hg/store/fncache
491 $ rm .hg/store/fncache
485 $ hg debugrebuildfncache
492 $ hg debugrebuildfncache
486 adding data/.A/one.txt.i
493 adding data/.A/one.txt.i
487 adding data/.A/two.txt.i
494 adding data/.A/two.txt.i
488 adding data/b/bar/fruits.txt.i
495 adding data/b/bar/fruits.txt.i
489 adding data/b/bar/orange/fly/gnat.py.i
496 adding data/b/bar/orange/fly/gnat.py.i
490 adding data/b/bar/orange/fly/housefly.txt.i
497 adding data/b/bar/orange/fly/housefly.txt.i
491 adding data/b/foo/apple/bees/flower.py.i
498 adding data/b/foo/apple/bees/flower.py.i
492 adding data/c.txt.i
499 adding data/c.txt.i
493 adding data/d.py.i
500 adding data/d.py.i
494 adding meta/.A/00manifest.i
501 adding meta/.A/00manifest.i
495 adding meta/b/00manifest.i
502 adding meta/b/00manifest.i
496 adding meta/b/bar/00manifest.i
503 adding meta/b/bar/00manifest.i
497 adding meta/b/bar/orange/00manifest.i
504 adding meta/b/bar/orange/00manifest.i
498 adding meta/b/bar/orange/fly/00manifest.i
505 adding meta/b/bar/orange/fly/00manifest.i
499 adding meta/b/foo/00manifest.i
506 adding meta/b/foo/00manifest.i
500 adding meta/b/foo/apple/00manifest.i
507 adding meta/b/foo/apple/00manifest.i
501 adding meta/b/foo/apple/bees/00manifest.i
508 adding meta/b/foo/apple/bees/00manifest.i
502 16 items added, 0 removed from fncache
509 16 items added, 0 removed from fncache
503
510
504 Finish first server
511 Finish first server
505 $ killdaemons.py
512 $ killdaemons.py
506
513
507 Back up the recently added revlogs
514 Back up the recently added revlogs
508 $ cp -r .hg/store .hg/store-newcopy
515 $ cp -r .hg/store .hg/store-newcopy
509
516
510 Verify reports missing dirlog
517 Verify reports missing dirlog
511 $ rm .hg/store/meta/b/00manifest.*
518 $ rm .hg/store/meta/b/00manifest.*
512 $ hg verify
519 $ hg verify
513 checking changesets
520 checking changesets
514 checking manifests
521 checking manifests
515 checking directory manifests
522 checking directory manifests
516 0: empty or missing b/
523 0: empty or missing b/
517 b/@0: parent-directory manifest refers to unknown revision 67688a370455
524 b/@0: parent-directory manifest refers to unknown revision 67688a370455
518 b/@1: parent-directory manifest refers to unknown revision f38e85d334c5
525 b/@1: parent-directory manifest refers to unknown revision f38e85d334c5
519 b/@2: parent-directory manifest refers to unknown revision 99c9792fd4b0
526 b/@2: parent-directory manifest refers to unknown revision 99c9792fd4b0
520 warning: orphan revlog 'meta/b/bar/00manifest.i'
527 warning: orphan revlog 'meta/b/bar/00manifest.i'
521 warning: orphan revlog 'meta/b/bar/orange/00manifest.i'
528 warning: orphan revlog 'meta/b/bar/orange/00manifest.i'
522 warning: orphan revlog 'meta/b/bar/orange/fly/00manifest.i'
529 warning: orphan revlog 'meta/b/bar/orange/fly/00manifest.i'
523 warning: orphan revlog 'meta/b/foo/00manifest.i'
530 warning: orphan revlog 'meta/b/foo/00manifest.i'
524 warning: orphan revlog 'meta/b/foo/apple/00manifest.i'
531 warning: orphan revlog 'meta/b/foo/apple/00manifest.i'
525 warning: orphan revlog 'meta/b/foo/apple/bees/00manifest.i'
532 warning: orphan revlog 'meta/b/foo/apple/bees/00manifest.i'
526 crosschecking files in changesets and manifests
533 crosschecking files in changesets and manifests
527 b/bar/fruits.txt@0: in changeset but not in manifest
534 b/bar/fruits.txt@0: in changeset but not in manifest
528 b/bar/orange/fly/gnat.py@0: in changeset but not in manifest
535 b/bar/orange/fly/gnat.py@0: in changeset but not in manifest
529 b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest
536 b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest
530 b/foo/apple/bees/flower.py@0: in changeset but not in manifest
537 b/foo/apple/bees/flower.py@0: in changeset but not in manifest
531 checking files
538 checking files
532 8 files, 3 changesets, 10 total revisions
539 8 files, 3 changesets, 10 total revisions
533 6 warnings encountered!
540 6 warnings encountered!
534 8 integrity errors encountered!
541 8 integrity errors encountered!
535 (first damaged changeset appears to be 0)
542 (first damaged changeset appears to be 0)
536 [1]
543 [1]
537 $ cp -rT .hg/store-newcopy .hg/store
544 $ cp -rT .hg/store-newcopy .hg/store
538
545
539 Verify reports missing dirlog entry
546 Verify reports missing dirlog entry
540 $ mv -f .hg/store-copy/meta/b/00manifest.* .hg/store/meta/b/
547 $ mv -f .hg/store-copy/meta/b/00manifest.* .hg/store/meta/b/
541 $ hg verify
548 $ hg verify
542 checking changesets
549 checking changesets
543 checking manifests
550 checking manifests
544 checking directory manifests
551 checking directory manifests
545 b/@1: parent-directory manifest refers to unknown revision f38e85d334c5
552 b/@1: parent-directory manifest refers to unknown revision f38e85d334c5
546 b/@2: parent-directory manifest refers to unknown revision 99c9792fd4b0
553 b/@2: parent-directory manifest refers to unknown revision 99c9792fd4b0
547 b/bar/@?: rev 1 points to unexpected changeset 1
554 b/bar/@?: rev 1 points to unexpected changeset 1
548 b/bar/@?: 5e03c4ee5e4a not in parent-directory manifest
555 b/bar/@?: 5e03c4ee5e4a not in parent-directory manifest
549 b/bar/@?: rev 2 points to unexpected changeset 2
556 b/bar/@?: rev 2 points to unexpected changeset 2
550 b/bar/@?: 1b16940d66d6 not in parent-directory manifest
557 b/bar/@?: 1b16940d66d6 not in parent-directory manifest
551 b/bar/orange/@?: rev 1 points to unexpected changeset 2
558 b/bar/orange/@?: rev 1 points to unexpected changeset 2
552 (expected None)
559 (expected None)
553 b/bar/orange/fly/@?: rev 1 points to unexpected changeset 2
560 b/bar/orange/fly/@?: rev 1 points to unexpected changeset 2
554 (expected None)
561 (expected None)
555 crosschecking files in changesets and manifests
562 crosschecking files in changesets and manifests
556 checking files
563 checking files
557 8 files, 3 changesets, 10 total revisions
564 8 files, 3 changesets, 10 total revisions
558 2 warnings encountered!
565 2 warnings encountered!
559 8 integrity errors encountered!
566 8 integrity errors encountered!
560 (first damaged changeset appears to be 1)
567 (first damaged changeset appears to be 1)
561 [1]
568 [1]
562 $ cp -rT .hg/store-newcopy .hg/store
569 $ cp -rT .hg/store-newcopy .hg/store
563
570
564 Test cloning a treemanifest repo over http.
571 Test cloning a treemanifest repo over http.
565 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
572 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
566 $ cat hg.pid >> $DAEMON_PIDS
573 $ cat hg.pid >> $DAEMON_PIDS
567 $ cd ..
574 $ cd ..
568 We can clone even with the knob turned off and we'll get a treemanifest repo.
575 We can clone even with the knob turned off and we'll get a treemanifest repo.
569 $ hg clone --config experimental.treemanifest=False \
576 $ hg clone --config experimental.treemanifest=False \
570 > --config experimental.changegroup3=True \
577 > --config experimental.changegroup3=True \
571 > http://localhost:$HGPORT deepclone
578 > http://localhost:$HGPORT deepclone
572 requesting all changes
579 requesting all changes
573 adding changesets
580 adding changesets
574 adding manifests
581 adding manifests
575 adding file changes
582 adding file changes
576 added 3 changesets with 10 changes to 8 files
583 added 3 changesets with 10 changes to 8 files
577 updating to branch default
584 updating to branch default
578 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
585 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
579 No server errors.
586 No server errors.
580 $ cat deeprepo/errors.log
587 $ cat deeprepo/errors.log
581 requires got updated to include treemanifest
588 requires got updated to include treemanifest
582 $ cat deepclone/.hg/requires | grep treemanifest
589 $ cat deepclone/.hg/requires | grep treemanifest
583 treemanifest
590 treemanifest
584 Tree manifest revlogs exist.
591 Tree manifest revlogs exist.
585 $ find deepclone/.hg/store/meta | sort
592 $ find deepclone/.hg/store/meta | sort
586 deepclone/.hg/store/meta
593 deepclone/.hg/store/meta
587 deepclone/.hg/store/meta/b
594 deepclone/.hg/store/meta/b
588 deepclone/.hg/store/meta/b/00manifest.i
595 deepclone/.hg/store/meta/b/00manifest.i
589 deepclone/.hg/store/meta/b/bar
596 deepclone/.hg/store/meta/b/bar
590 deepclone/.hg/store/meta/b/bar/00manifest.i
597 deepclone/.hg/store/meta/b/bar/00manifest.i
591 deepclone/.hg/store/meta/b/bar/orange
598 deepclone/.hg/store/meta/b/bar/orange
592 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
599 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
593 deepclone/.hg/store/meta/b/bar/orange/fly
600 deepclone/.hg/store/meta/b/bar/orange/fly
594 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
601 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
595 deepclone/.hg/store/meta/b/foo
602 deepclone/.hg/store/meta/b/foo
596 deepclone/.hg/store/meta/b/foo/00manifest.i
603 deepclone/.hg/store/meta/b/foo/00manifest.i
597 deepclone/.hg/store/meta/b/foo/apple
604 deepclone/.hg/store/meta/b/foo/apple
598 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
605 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
599 deepclone/.hg/store/meta/b/foo/apple/bees
606 deepclone/.hg/store/meta/b/foo/apple/bees
600 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
607 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
601 deepclone/.hg/store/meta/~2e_a
608 deepclone/.hg/store/meta/~2e_a
602 deepclone/.hg/store/meta/~2e_a/00manifest.i
609 deepclone/.hg/store/meta/~2e_a/00manifest.i
603 Verify passes.
610 Verify passes.
604 $ cd deepclone
611 $ cd deepclone
605 $ hg verify
612 $ hg verify
606 checking changesets
613 checking changesets
607 checking manifests
614 checking manifests
608 checking directory manifests
615 checking directory manifests
609 crosschecking files in changesets and manifests
616 crosschecking files in changesets and manifests
610 checking files
617 checking files
611 8 files, 3 changesets, 10 total revisions
618 8 files, 3 changesets, 10 total revisions
612 $ cd ..
619 $ cd ..
613
620
614 Create clones using old repo formats to use in later tests
621 Create clones using old repo formats to use in later tests
615 $ hg clone --config format.usestore=False \
622 $ hg clone --config format.usestore=False \
616 > --config experimental.changegroup3=True \
623 > --config experimental.changegroup3=True \
617 > http://localhost:$HGPORT deeprepo-basicstore
624 > http://localhost:$HGPORT deeprepo-basicstore
618 requesting all changes
625 requesting all changes
619 adding changesets
626 adding changesets
620 adding manifests
627 adding manifests
621 adding file changes
628 adding file changes
622 added 3 changesets with 10 changes to 8 files
629 added 3 changesets with 10 changes to 8 files
623 updating to branch default
630 updating to branch default
624 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
631 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
625 $ cd deeprepo-basicstore
632 $ cd deeprepo-basicstore
626 $ grep store .hg/requires
633 $ grep store .hg/requires
627 [1]
634 [1]
628 $ hg serve -p $HGPORT1 -d --pid-file=hg.pid --errorlog=errors.log
635 $ hg serve -p $HGPORT1 -d --pid-file=hg.pid --errorlog=errors.log
629 $ cat hg.pid >> $DAEMON_PIDS
636 $ cat hg.pid >> $DAEMON_PIDS
630 $ cd ..
637 $ cd ..
631 $ hg clone --config format.usefncache=False \
638 $ hg clone --config format.usefncache=False \
632 > --config experimental.changegroup3=True \
639 > --config experimental.changegroup3=True \
633 > http://localhost:$HGPORT deeprepo-encodedstore
640 > http://localhost:$HGPORT deeprepo-encodedstore
634 requesting all changes
641 requesting all changes
635 adding changesets
642 adding changesets
636 adding manifests
643 adding manifests
637 adding file changes
644 adding file changes
638 added 3 changesets with 10 changes to 8 files
645 added 3 changesets with 10 changes to 8 files
639 updating to branch default
646 updating to branch default
640 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
647 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
641 $ cd deeprepo-encodedstore
648 $ cd deeprepo-encodedstore
642 $ grep fncache .hg/requires
649 $ grep fncache .hg/requires
643 [1]
650 [1]
644 $ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log
651 $ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log
645 $ cat hg.pid >> $DAEMON_PIDS
652 $ cat hg.pid >> $DAEMON_PIDS
646 $ cd ..
653 $ cd ..
647
654
648 Local clone with basicstore
655 Local clone with basicstore
649 $ hg clone -U deeprepo-basicstore local-clone-basicstore
656 $ hg clone -U deeprepo-basicstore local-clone-basicstore
650 $ hg -R local-clone-basicstore verify
657 $ hg -R local-clone-basicstore verify
651 checking changesets
658 checking changesets
652 checking manifests
659 checking manifests
653 checking directory manifests
660 checking directory manifests
654 crosschecking files in changesets and manifests
661 crosschecking files in changesets and manifests
655 checking files
662 checking files
656 8 files, 3 changesets, 10 total revisions
663 8 files, 3 changesets, 10 total revisions
657
664
658 Local clone with encodedstore
665 Local clone with encodedstore
659 $ hg clone -U deeprepo-encodedstore local-clone-encodedstore
666 $ hg clone -U deeprepo-encodedstore local-clone-encodedstore
660 $ hg -R local-clone-encodedstore verify
667 $ hg -R local-clone-encodedstore verify
661 checking changesets
668 checking changesets
662 checking manifests
669 checking manifests
663 checking directory manifests
670 checking directory manifests
664 crosschecking files in changesets and manifests
671 crosschecking files in changesets and manifests
665 checking files
672 checking files
666 8 files, 3 changesets, 10 total revisions
673 8 files, 3 changesets, 10 total revisions
667
674
668 Local clone with fncachestore
675 Local clone with fncachestore
669 $ hg clone -U deeprepo local-clone-fncachestore
676 $ hg clone -U deeprepo local-clone-fncachestore
670 $ hg -R local-clone-fncachestore verify
677 $ hg -R local-clone-fncachestore verify
671 checking changesets
678 checking changesets
672 checking manifests
679 checking manifests
673 checking directory manifests
680 checking directory manifests
674 crosschecking files in changesets and manifests
681 crosschecking files in changesets and manifests
675 checking files
682 checking files
676 8 files, 3 changesets, 10 total revisions
683 8 files, 3 changesets, 10 total revisions
677
684
678 Stream clone with basicstore
685 Stream clone with basicstore
679 $ hg clone --config experimental.changegroup3=True --uncompressed -U \
686 $ hg clone --config experimental.changegroup3=True --uncompressed -U \
680 > http://localhost:$HGPORT1 stream-clone-basicstore
687 > http://localhost:$HGPORT1 stream-clone-basicstore
681 streaming all changes
688 streaming all changes
682 18 files to transfer, * of data (glob)
689 18 files to transfer, * of data (glob)
683 transferred * in * seconds (*) (glob)
690 transferred * in * seconds (*) (glob)
684 searching for changes
691 searching for changes
685 no changes found
692 no changes found
686 $ hg -R stream-clone-basicstore verify
693 $ hg -R stream-clone-basicstore verify
687 checking changesets
694 checking changesets
688 checking manifests
695 checking manifests
689 checking directory manifests
696 checking directory manifests
690 crosschecking files in changesets and manifests
697 crosschecking files in changesets and manifests
691 checking files
698 checking files
692 8 files, 3 changesets, 10 total revisions
699 8 files, 3 changesets, 10 total revisions
693
700
694 Stream clone with encodedstore
701 Stream clone with encodedstore
695 $ hg clone --config experimental.changegroup3=True --uncompressed -U \
702 $ hg clone --config experimental.changegroup3=True --uncompressed -U \
696 > http://localhost:$HGPORT2 stream-clone-encodedstore
703 > http://localhost:$HGPORT2 stream-clone-encodedstore
697 streaming all changes
704 streaming all changes
698 18 files to transfer, * of data (glob)
705 18 files to transfer, * of data (glob)
699 transferred * in * seconds (*) (glob)
706 transferred * in * seconds (*) (glob)
700 searching for changes
707 searching for changes
701 no changes found
708 no changes found
702 $ hg -R stream-clone-encodedstore verify
709 $ hg -R stream-clone-encodedstore verify
703 checking changesets
710 checking changesets
704 checking manifests
711 checking manifests
705 checking directory manifests
712 checking directory manifests
706 crosschecking files in changesets and manifests
713 crosschecking files in changesets and manifests
707 checking files
714 checking files
708 8 files, 3 changesets, 10 total revisions
715 8 files, 3 changesets, 10 total revisions
709
716
710 Stream clone with fncachestore
717 Stream clone with fncachestore
711 $ hg clone --config experimental.changegroup3=True --uncompressed -U \
718 $ hg clone --config experimental.changegroup3=True --uncompressed -U \
712 > http://localhost:$HGPORT stream-clone-fncachestore
719 > http://localhost:$HGPORT stream-clone-fncachestore
713 streaming all changes
720 streaming all changes
714 18 files to transfer, * of data (glob)
721 18 files to transfer, * of data (glob)
715 transferred * in * seconds (*) (glob)
722 transferred * in * seconds (*) (glob)
716 searching for changes
723 searching for changes
717 no changes found
724 no changes found
718 $ hg -R stream-clone-fncachestore verify
725 $ hg -R stream-clone-fncachestore verify
719 checking changesets
726 checking changesets
720 checking manifests
727 checking manifests
721 checking directory manifests
728 checking directory manifests
722 crosschecking files in changesets and manifests
729 crosschecking files in changesets and manifests
723 checking files
730 checking files
724 8 files, 3 changesets, 10 total revisions
731 8 files, 3 changesets, 10 total revisions
725
732
726 Packed bundle
733 Packed bundle
727 $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
734 $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
728 writing 3349 bytes for 18 files
735 writing 3349 bytes for 18 files
729 bundle requirements: generaldelta, revlogv1, treemanifest
736 bundle requirements: generaldelta, revlogv1, treemanifest
730 $ hg debugbundle --spec repo-packed.hg
737 $ hg debugbundle --spec repo-packed.hg
731 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Ctreemanifest
738 none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Ctreemanifest
General Comments 0
You need to be logged in to leave comments. Login now