##// END OF EJS Templates
bundlerepo: build revlog index with flags...
Jun Wu -
r31835:4bafc80f default
parent child Browse files
Show More
@@ -1,556 +1,557 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18 import tempfile
18 import tempfile
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import nullid
21 from .node import nullid
22
22
23 from . import (
23 from . import (
24 bundle2,
24 bundle2,
25 changegroup,
25 changegroup,
26 changelog,
26 changelog,
27 cmdutil,
27 cmdutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 filelog,
31 filelog,
32 localrepo,
32 localrepo,
33 manifest,
33 manifest,
34 mdiff,
34 mdiff,
35 node as nodemod,
35 node as nodemod,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 revlog,
39 revlog,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43
43
44 class bundlerevlog(revlog.revlog):
44 class bundlerevlog(revlog.revlog):
45 def __init__(self, opener, indexfile, bundle, linkmapper):
45 def __init__(self, opener, indexfile, bundle, linkmapper):
46 # How it works:
46 # How it works:
47 # To retrieve a revision, we need to know the offset of the revision in
47 # To retrieve a revision, we need to know the offset of the revision in
48 # the bundle (an unbundle object). We store this offset in the index
48 # the bundle (an unbundle object). We store this offset in the index
49 # (start). The base of the delta is stored in the base field.
49 # (start). The base of the delta is stored in the base field.
50 #
50 #
51 # To differentiate a rev in the bundle from a rev in the revlog, we
51 # To differentiate a rev in the bundle from a rev in the revlog, we
52 # check revision against repotiprev.
52 # check revision against repotiprev.
53 opener = vfsmod.readonlyvfs(opener)
53 opener = vfsmod.readonlyvfs(opener)
54 revlog.revlog.__init__(self, opener, indexfile)
54 revlog.revlog.__init__(self, opener, indexfile)
55 self.bundle = bundle
55 self.bundle = bundle
56 n = len(self)
56 n = len(self)
57 self.repotiprev = n - 1
57 self.repotiprev = n - 1
58 chain = None
58 chain = None
59 self.bundlerevs = set() # used by 'bundle()' revset expression
59 self.bundlerevs = set() # used by 'bundle()' revset expression
60 getchunk = lambda: bundle.deltachunk(chain)
60 getchunk = lambda: bundle.deltachunk(chain)
61 for chunkdata in iter(getchunk, {}):
61 for chunkdata in iter(getchunk, {}):
62 node = chunkdata['node']
62 node = chunkdata['node']
63 p1 = chunkdata['p1']
63 p1 = chunkdata['p1']
64 p2 = chunkdata['p2']
64 p2 = chunkdata['p2']
65 cs = chunkdata['cs']
65 cs = chunkdata['cs']
66 deltabase = chunkdata['deltabase']
66 deltabase = chunkdata['deltabase']
67 delta = chunkdata['delta']
67 delta = chunkdata['delta']
68 flags = chunkdata['flags']
68
69
69 size = len(delta)
70 size = len(delta)
70 start = bundle.tell() - size
71 start = bundle.tell() - size
71
72
72 link = linkmapper(cs)
73 link = linkmapper(cs)
73 if node in self.nodemap:
74 if node in self.nodemap:
74 # this can happen if two branches make the same change
75 # this can happen if two branches make the same change
75 chain = node
76 chain = node
76 self.bundlerevs.add(self.nodemap[node])
77 self.bundlerevs.add(self.nodemap[node])
77 continue
78 continue
78
79
79 for p in (p1, p2):
80 for p in (p1, p2):
80 if p not in self.nodemap:
81 if p not in self.nodemap:
81 raise error.LookupError(p, self.indexfile,
82 raise error.LookupError(p, self.indexfile,
82 _("unknown parent"))
83 _("unknown parent"))
83
84
84 if deltabase not in self.nodemap:
85 if deltabase not in self.nodemap:
85 raise LookupError(deltabase, self.indexfile,
86 raise LookupError(deltabase, self.indexfile,
86 _('unknown delta base'))
87 _('unknown delta base'))
87
88
88 baserev = self.rev(deltabase)
89 baserev = self.rev(deltabase)
89 # start, size, full unc. size, base (unused), link, p1, p2, node
90 # start, size, full unc. size, base (unused), link, p1, p2, node
90 e = (revlog.offset_type(start, 0), size, -1, baserev, link,
91 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
91 self.rev(p1), self.rev(p2), node)
92 self.rev(p1), self.rev(p2), node)
92 self.index.insert(-1, e)
93 self.index.insert(-1, e)
93 self.nodemap[node] = n
94 self.nodemap[node] = n
94 self.bundlerevs.add(n)
95 self.bundlerevs.add(n)
95 chain = node
96 chain = node
96 n += 1
97 n += 1
97
98
98 def _chunk(self, rev):
99 def _chunk(self, rev):
99 # Warning: in case of bundle, the diff is against what we stored as
100 # Warning: in case of bundle, the diff is against what we stored as
100 # delta base, not against rev - 1
101 # delta base, not against rev - 1
101 # XXX: could use some caching
102 # XXX: could use some caching
102 if rev <= self.repotiprev:
103 if rev <= self.repotiprev:
103 return revlog.revlog._chunk(self, rev)
104 return revlog.revlog._chunk(self, rev)
104 self.bundle.seek(self.start(rev))
105 self.bundle.seek(self.start(rev))
105 return self.bundle.read(self.length(rev))
106 return self.bundle.read(self.length(rev))
106
107
107 def revdiff(self, rev1, rev2):
108 def revdiff(self, rev1, rev2):
108 """return or calculate a delta between two revisions"""
109 """return or calculate a delta between two revisions"""
109 if rev1 > self.repotiprev and rev2 > self.repotiprev:
110 if rev1 > self.repotiprev and rev2 > self.repotiprev:
110 # hot path for bundle
111 # hot path for bundle
111 revb = self.index[rev2][3]
112 revb = self.index[rev2][3]
112 if revb == rev1:
113 if revb == rev1:
113 return self._chunk(rev2)
114 return self._chunk(rev2)
114 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
115 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
115 return revlog.revlog.revdiff(self, rev1, rev2)
116 return revlog.revlog.revdiff(self, rev1, rev2)
116
117
117 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
118 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
118
119
119 def revision(self, nodeorrev, raw=False):
120 def revision(self, nodeorrev, raw=False):
120 """return an uncompressed revision of a given node or revision
121 """return an uncompressed revision of a given node or revision
121 number.
122 number.
122 """
123 """
123 if isinstance(nodeorrev, int):
124 if isinstance(nodeorrev, int):
124 rev = nodeorrev
125 rev = nodeorrev
125 node = self.node(rev)
126 node = self.node(rev)
126 else:
127 else:
127 node = nodeorrev
128 node = nodeorrev
128 rev = self.rev(node)
129 rev = self.rev(node)
129
130
130 if node == nullid:
131 if node == nullid:
131 return ""
132 return ""
132
133
133 text = None
134 text = None
134 chain = []
135 chain = []
135 iterrev = rev
136 iterrev = rev
136 # reconstruct the revision if it is from a changegroup
137 # reconstruct the revision if it is from a changegroup
137 while iterrev > self.repotiprev:
138 while iterrev > self.repotiprev:
138 if self._cache and self._cache[1] == iterrev:
139 if self._cache and self._cache[1] == iterrev:
139 text = self._cache[2]
140 text = self._cache[2]
140 break
141 break
141 chain.append(iterrev)
142 chain.append(iterrev)
142 iterrev = self.index[iterrev][3]
143 iterrev = self.index[iterrev][3]
143 if text is None:
144 if text is None:
144 text = self.baserevision(iterrev)
145 text = self.baserevision(iterrev)
145
146
146 while chain:
147 while chain:
147 delta = self._chunk(chain.pop())
148 delta = self._chunk(chain.pop())
148 text = mdiff.patches(text, [delta])
149 text = mdiff.patches(text, [delta])
149
150
150 text, validatehash = self._processflags(text, self.flags(rev),
151 text, validatehash = self._processflags(text, self.flags(rev),
151 'read', raw=raw)
152 'read', raw=raw)
152 if validatehash:
153 if validatehash:
153 self.checkhash(text, node, rev=rev)
154 self.checkhash(text, node, rev=rev)
154 self._cache = (node, rev, text)
155 self._cache = (node, rev, text)
155 return text
156 return text
156
157
157 def baserevision(self, nodeorrev):
158 def baserevision(self, nodeorrev):
158 # Revlog subclasses may override 'revision' method to modify format of
159 # Revlog subclasses may override 'revision' method to modify format of
159 # content retrieved from revlog. To use bundlerevlog with such class one
160 # content retrieved from revlog. To use bundlerevlog with such class one
160 # needs to override 'baserevision' and make more specific call here.
161 # needs to override 'baserevision' and make more specific call here.
161 return revlog.revlog.revision(self, nodeorrev, raw=True)
162 return revlog.revlog.revision(self, nodeorrev, raw=True)
162
163
163 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
164 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
164 raise NotImplementedError
165 raise NotImplementedError
165 def addgroup(self, revs, linkmapper, transaction):
166 def addgroup(self, revs, linkmapper, transaction):
166 raise NotImplementedError
167 raise NotImplementedError
167 def strip(self, rev, minlink):
168 def strip(self, rev, minlink):
168 raise NotImplementedError
169 raise NotImplementedError
169 def checksize(self):
170 def checksize(self):
170 raise NotImplementedError
171 raise NotImplementedError
171
172
172 class bundlechangelog(bundlerevlog, changelog.changelog):
173 class bundlechangelog(bundlerevlog, changelog.changelog):
173 def __init__(self, opener, bundle):
174 def __init__(self, opener, bundle):
174 changelog.changelog.__init__(self, opener)
175 changelog.changelog.__init__(self, opener)
175 linkmapper = lambda x: x
176 linkmapper = lambda x: x
176 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
177 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
177 linkmapper)
178 linkmapper)
178
179
179 def baserevision(self, nodeorrev):
180 def baserevision(self, nodeorrev):
180 # Although changelog doesn't override 'revision' method, some extensions
181 # Although changelog doesn't override 'revision' method, some extensions
181 # may replace this class with another that does. Same story with
182 # may replace this class with another that does. Same story with
182 # manifest and filelog classes.
183 # manifest and filelog classes.
183
184
184 # This bypasses filtering on changelog.node() and rev() because we need
185 # This bypasses filtering on changelog.node() and rev() because we need
185 # revision text of the bundle base even if it is hidden.
186 # revision text of the bundle base even if it is hidden.
186 oldfilter = self.filteredrevs
187 oldfilter = self.filteredrevs
187 try:
188 try:
188 self.filteredrevs = ()
189 self.filteredrevs = ()
189 return changelog.changelog.revision(self, nodeorrev, raw=True)
190 return changelog.changelog.revision(self, nodeorrev, raw=True)
190 finally:
191 finally:
191 self.filteredrevs = oldfilter
192 self.filteredrevs = oldfilter
192
193
193 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
194 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
194 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
195 def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
195 manifest.manifestrevlog.__init__(self, opener, dir=dir)
196 manifest.manifestrevlog.__init__(self, opener, dir=dir)
196 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
197 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
197 linkmapper)
198 linkmapper)
198 if dirlogstarts is None:
199 if dirlogstarts is None:
199 dirlogstarts = {}
200 dirlogstarts = {}
200 if self.bundle.version == "03":
201 if self.bundle.version == "03":
201 dirlogstarts = _getfilestarts(self.bundle)
202 dirlogstarts = _getfilestarts(self.bundle)
202 self._dirlogstarts = dirlogstarts
203 self._dirlogstarts = dirlogstarts
203 self._linkmapper = linkmapper
204 self._linkmapper = linkmapper
204
205
205 def baserevision(self, nodeorrev):
206 def baserevision(self, nodeorrev):
206 node = nodeorrev
207 node = nodeorrev
207 if isinstance(node, int):
208 if isinstance(node, int):
208 node = self.node(node)
209 node = self.node(node)
209
210
210 if node in self.fulltextcache:
211 if node in self.fulltextcache:
211 result = '%s' % self.fulltextcache[node]
212 result = '%s' % self.fulltextcache[node]
212 else:
213 else:
213 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
214 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
214 return result
215 return result
215
216
216 def dirlog(self, d):
217 def dirlog(self, d):
217 if d in self._dirlogstarts:
218 if d in self._dirlogstarts:
218 self.bundle.seek(self._dirlogstarts[d])
219 self.bundle.seek(self._dirlogstarts[d])
219 return bundlemanifest(
220 return bundlemanifest(
220 self.opener, self.bundle, self._linkmapper,
221 self.opener, self.bundle, self._linkmapper,
221 self._dirlogstarts, dir=d)
222 self._dirlogstarts, dir=d)
222 return super(bundlemanifest, self).dirlog(d)
223 return super(bundlemanifest, self).dirlog(d)
223
224
224 class bundlefilelog(bundlerevlog, filelog.filelog):
225 class bundlefilelog(bundlerevlog, filelog.filelog):
225 def __init__(self, opener, path, bundle, linkmapper):
226 def __init__(self, opener, path, bundle, linkmapper):
226 filelog.filelog.__init__(self, opener, path)
227 filelog.filelog.__init__(self, opener, path)
227 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
228 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
228 linkmapper)
229 linkmapper)
229
230
230 def baserevision(self, nodeorrev):
231 def baserevision(self, nodeorrev):
231 return filelog.filelog.revision(self, nodeorrev, raw=True)
232 return filelog.filelog.revision(self, nodeorrev, raw=True)
232
233
233 class bundlepeer(localrepo.localpeer):
234 class bundlepeer(localrepo.localpeer):
234 def canpush(self):
235 def canpush(self):
235 return False
236 return False
236
237
237 class bundlephasecache(phases.phasecache):
238 class bundlephasecache(phases.phasecache):
238 def __init__(self, *args, **kwargs):
239 def __init__(self, *args, **kwargs):
239 super(bundlephasecache, self).__init__(*args, **kwargs)
240 super(bundlephasecache, self).__init__(*args, **kwargs)
240 if util.safehasattr(self, 'opener'):
241 if util.safehasattr(self, 'opener'):
241 self.opener = vfsmod.readonlyvfs(self.opener)
242 self.opener = vfsmod.readonlyvfs(self.opener)
242
243
243 def write(self):
244 def write(self):
244 raise NotImplementedError
245 raise NotImplementedError
245
246
246 def _write(self, fp):
247 def _write(self, fp):
247 raise NotImplementedError
248 raise NotImplementedError
248
249
249 def _updateroots(self, phase, newroots, tr):
250 def _updateroots(self, phase, newroots, tr):
250 self.phaseroots[phase] = newroots
251 self.phaseroots[phase] = newroots
251 self.invalidate()
252 self.invalidate()
252 self.dirty = True
253 self.dirty = True
253
254
254 def _getfilestarts(bundle):
255 def _getfilestarts(bundle):
255 bundlefilespos = {}
256 bundlefilespos = {}
256 for chunkdata in iter(bundle.filelogheader, {}):
257 for chunkdata in iter(bundle.filelogheader, {}):
257 fname = chunkdata['filename']
258 fname = chunkdata['filename']
258 bundlefilespos[fname] = bundle.tell()
259 bundlefilespos[fname] = bundle.tell()
259 for chunk in iter(lambda: bundle.deltachunk(None), {}):
260 for chunk in iter(lambda: bundle.deltachunk(None), {}):
260 pass
261 pass
261 return bundlefilespos
262 return bundlefilespos
262
263
263 class bundlerepository(localrepo.localrepository):
264 class bundlerepository(localrepo.localrepository):
264 def __init__(self, ui, path, bundlename):
265 def __init__(self, ui, path, bundlename):
265 def _writetempbundle(read, suffix, header=''):
266 def _writetempbundle(read, suffix, header=''):
266 """Write a temporary file to disk
267 """Write a temporary file to disk
267
268
268 This is closure because we need to make sure this tracked by
269 This is closure because we need to make sure this tracked by
269 self.tempfile for cleanup purposes."""
270 self.tempfile for cleanup purposes."""
270 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
271 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
271 suffix=".hg10un")
272 suffix=".hg10un")
272 self.tempfile = temp
273 self.tempfile = temp
273
274
274 with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
275 with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
275 fptemp.write(header)
276 fptemp.write(header)
276 while True:
277 while True:
277 chunk = read(2**18)
278 chunk = read(2**18)
278 if not chunk:
279 if not chunk:
279 break
280 break
280 fptemp.write(chunk)
281 fptemp.write(chunk)
281
282
282 return self.vfs.open(self.tempfile, mode="rb")
283 return self.vfs.open(self.tempfile, mode="rb")
283 self._tempparent = None
284 self._tempparent = None
284 try:
285 try:
285 localrepo.localrepository.__init__(self, ui, path)
286 localrepo.localrepository.__init__(self, ui, path)
286 except error.RepoError:
287 except error.RepoError:
287 self._tempparent = tempfile.mkdtemp()
288 self._tempparent = tempfile.mkdtemp()
288 localrepo.instance(ui, self._tempparent, 1)
289 localrepo.instance(ui, self._tempparent, 1)
289 localrepo.localrepository.__init__(self, ui, self._tempparent)
290 localrepo.localrepository.__init__(self, ui, self._tempparent)
290 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
291 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
291
292
292 if path:
293 if path:
293 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
294 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
294 else:
295 else:
295 self._url = 'bundle:' + bundlename
296 self._url = 'bundle:' + bundlename
296
297
297 self.tempfile = None
298 self.tempfile = None
298 f = util.posixfile(bundlename, "rb")
299 f = util.posixfile(bundlename, "rb")
299 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
300 self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
300
301
301 if isinstance(self.bundle, bundle2.unbundle20):
302 if isinstance(self.bundle, bundle2.unbundle20):
302 cgstream = None
303 cgstream = None
303 for part in self.bundle.iterparts():
304 for part in self.bundle.iterparts():
304 if part.type == 'changegroup':
305 if part.type == 'changegroup':
305 if cgstream is not None:
306 if cgstream is not None:
306 raise NotImplementedError("can't process "
307 raise NotImplementedError("can't process "
307 "multiple changegroups")
308 "multiple changegroups")
308 cgstream = part
309 cgstream = part
309 version = part.params.get('version', '01')
310 version = part.params.get('version', '01')
310 legalcgvers = changegroup.supportedincomingversions(self)
311 legalcgvers = changegroup.supportedincomingversions(self)
311 if version not in legalcgvers:
312 if version not in legalcgvers:
312 msg = _('Unsupported changegroup version: %s')
313 msg = _('Unsupported changegroup version: %s')
313 raise error.Abort(msg % version)
314 raise error.Abort(msg % version)
314 if self.bundle.compressed():
315 if self.bundle.compressed():
315 cgstream = _writetempbundle(part.read,
316 cgstream = _writetempbundle(part.read,
316 ".cg%sun" % version)
317 ".cg%sun" % version)
317
318
318 if cgstream is None:
319 if cgstream is None:
319 raise error.Abort(_('No changegroups found'))
320 raise error.Abort(_('No changegroups found'))
320 cgstream.seek(0)
321 cgstream.seek(0)
321
322
322 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
323 self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
323
324
324 elif self.bundle.compressed():
325 elif self.bundle.compressed():
325 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
326 f = _writetempbundle(self.bundle.read, '.hg10un', header='HG10UN')
326 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
327 self.bundlefile = self.bundle = exchange.readbundle(ui, f,
327 bundlename,
328 bundlename,
328 self.vfs)
329 self.vfs)
329
330
330 # dict with the mapping 'filename' -> position in the bundle
331 # dict with the mapping 'filename' -> position in the bundle
331 self.bundlefilespos = {}
332 self.bundlefilespos = {}
332
333
333 self.firstnewrev = self.changelog.repotiprev + 1
334 self.firstnewrev = self.changelog.repotiprev + 1
334 phases.retractboundary(self, None, phases.draft,
335 phases.retractboundary(self, None, phases.draft,
335 [ctx.node() for ctx in self[self.firstnewrev:]])
336 [ctx.node() for ctx in self[self.firstnewrev:]])
336
337
337 @localrepo.unfilteredpropertycache
338 @localrepo.unfilteredpropertycache
338 def _phasecache(self):
339 def _phasecache(self):
339 return bundlephasecache(self, self._phasedefaults)
340 return bundlephasecache(self, self._phasedefaults)
340
341
341 @localrepo.unfilteredpropertycache
342 @localrepo.unfilteredpropertycache
342 def changelog(self):
343 def changelog(self):
343 # consume the header if it exists
344 # consume the header if it exists
344 self.bundle.changelogheader()
345 self.bundle.changelogheader()
345 c = bundlechangelog(self.svfs, self.bundle)
346 c = bundlechangelog(self.svfs, self.bundle)
346 self.manstart = self.bundle.tell()
347 self.manstart = self.bundle.tell()
347 return c
348 return c
348
349
349 def _constructmanifest(self):
350 def _constructmanifest(self):
350 self.bundle.seek(self.manstart)
351 self.bundle.seek(self.manstart)
351 # consume the header if it exists
352 # consume the header if it exists
352 self.bundle.manifestheader()
353 self.bundle.manifestheader()
353 linkmapper = self.unfiltered().changelog.rev
354 linkmapper = self.unfiltered().changelog.rev
354 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
355 m = bundlemanifest(self.svfs, self.bundle, linkmapper)
355 self.filestart = self.bundle.tell()
356 self.filestart = self.bundle.tell()
356 return m
357 return m
357
358
358 @localrepo.unfilteredpropertycache
359 @localrepo.unfilteredpropertycache
359 def manstart(self):
360 def manstart(self):
360 self.changelog
361 self.changelog
361 return self.manstart
362 return self.manstart
362
363
363 @localrepo.unfilteredpropertycache
364 @localrepo.unfilteredpropertycache
364 def filestart(self):
365 def filestart(self):
365 self.manifestlog
366 self.manifestlog
366 return self.filestart
367 return self.filestart
367
368
368 def url(self):
369 def url(self):
369 return self._url
370 return self._url
370
371
371 def file(self, f):
372 def file(self, f):
372 if not self.bundlefilespos:
373 if not self.bundlefilespos:
373 self.bundle.seek(self.filestart)
374 self.bundle.seek(self.filestart)
374 self.bundlefilespos = _getfilestarts(self.bundle)
375 self.bundlefilespos = _getfilestarts(self.bundle)
375
376
376 if f in self.bundlefilespos:
377 if f in self.bundlefilespos:
377 self.bundle.seek(self.bundlefilespos[f])
378 self.bundle.seek(self.bundlefilespos[f])
378 linkmapper = self.unfiltered().changelog.rev
379 linkmapper = self.unfiltered().changelog.rev
379 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
380 return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
380 else:
381 else:
381 return filelog.filelog(self.svfs, f)
382 return filelog.filelog(self.svfs, f)
382
383
383 def close(self):
384 def close(self):
384 """Close assigned bundle file immediately."""
385 """Close assigned bundle file immediately."""
385 self.bundlefile.close()
386 self.bundlefile.close()
386 if self.tempfile is not None:
387 if self.tempfile is not None:
387 self.vfs.unlink(self.tempfile)
388 self.vfs.unlink(self.tempfile)
388 if self._tempparent:
389 if self._tempparent:
389 shutil.rmtree(self._tempparent, True)
390 shutil.rmtree(self._tempparent, True)
390
391
391 def cancopy(self):
392 def cancopy(self):
392 return False
393 return False
393
394
394 def peer(self):
395 def peer(self):
395 return bundlepeer(self)
396 return bundlepeer(self)
396
397
397 def getcwd(self):
398 def getcwd(self):
398 return pycompat.getcwd() # always outside the repo
399 return pycompat.getcwd() # always outside the repo
399
400
400 # Check if parents exist in localrepo before setting
401 # Check if parents exist in localrepo before setting
401 def setparents(self, p1, p2=nullid):
402 def setparents(self, p1, p2=nullid):
402 p1rev = self.changelog.rev(p1)
403 p1rev = self.changelog.rev(p1)
403 p2rev = self.changelog.rev(p2)
404 p2rev = self.changelog.rev(p2)
404 msg = _("setting parent to node %s that only exists in the bundle\n")
405 msg = _("setting parent to node %s that only exists in the bundle\n")
405 if self.changelog.repotiprev < p1rev:
406 if self.changelog.repotiprev < p1rev:
406 self.ui.warn(msg % nodemod.hex(p1))
407 self.ui.warn(msg % nodemod.hex(p1))
407 if self.changelog.repotiprev < p2rev:
408 if self.changelog.repotiprev < p2rev:
408 self.ui.warn(msg % nodemod.hex(p2))
409 self.ui.warn(msg % nodemod.hex(p2))
409 return super(bundlerepository, self).setparents(p1, p2)
410 return super(bundlerepository, self).setparents(p1, p2)
410
411
411 def instance(ui, path, create):
412 def instance(ui, path, create):
412 if create:
413 if create:
413 raise error.Abort(_('cannot create new bundle repository'))
414 raise error.Abort(_('cannot create new bundle repository'))
414 # internal config: bundle.mainreporoot
415 # internal config: bundle.mainreporoot
415 parentpath = ui.config("bundle", "mainreporoot", "")
416 parentpath = ui.config("bundle", "mainreporoot", "")
416 if not parentpath:
417 if not parentpath:
417 # try to find the correct path to the working directory repo
418 # try to find the correct path to the working directory repo
418 parentpath = cmdutil.findrepo(pycompat.getcwd())
419 parentpath = cmdutil.findrepo(pycompat.getcwd())
419 if parentpath is None:
420 if parentpath is None:
420 parentpath = ''
421 parentpath = ''
421 if parentpath:
422 if parentpath:
422 # Try to make the full path relative so we get a nice, short URL.
423 # Try to make the full path relative so we get a nice, short URL.
423 # In particular, we don't want temp dir names in test outputs.
424 # In particular, we don't want temp dir names in test outputs.
424 cwd = pycompat.getcwd()
425 cwd = pycompat.getcwd()
425 if parentpath == cwd:
426 if parentpath == cwd:
426 parentpath = ''
427 parentpath = ''
427 else:
428 else:
428 cwd = pathutil.normasprefix(cwd)
429 cwd = pathutil.normasprefix(cwd)
429 if parentpath.startswith(cwd):
430 if parentpath.startswith(cwd):
430 parentpath = parentpath[len(cwd):]
431 parentpath = parentpath[len(cwd):]
431 u = util.url(path)
432 u = util.url(path)
432 path = u.localpath()
433 path = u.localpath()
433 if u.scheme == 'bundle':
434 if u.scheme == 'bundle':
434 s = path.split("+", 1)
435 s = path.split("+", 1)
435 if len(s) == 1:
436 if len(s) == 1:
436 repopath, bundlename = parentpath, s[0]
437 repopath, bundlename = parentpath, s[0]
437 else:
438 else:
438 repopath, bundlename = s
439 repopath, bundlename = s
439 else:
440 else:
440 repopath, bundlename = parentpath, path
441 repopath, bundlename = parentpath, path
441 return bundlerepository(ui, repopath, bundlename)
442 return bundlerepository(ui, repopath, bundlename)
442
443
443 class bundletransactionmanager(object):
444 class bundletransactionmanager(object):
444 def transaction(self):
445 def transaction(self):
445 return None
446 return None
446
447
447 def close(self):
448 def close(self):
448 raise NotImplementedError
449 raise NotImplementedError
449
450
450 def release(self):
451 def release(self):
451 raise NotImplementedError
452 raise NotImplementedError
452
453
453 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
454 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
454 force=False):
455 force=False):
455 '''obtains a bundle of changes incoming from other
456 '''obtains a bundle of changes incoming from other
456
457
457 "onlyheads" restricts the returned changes to those reachable from the
458 "onlyheads" restricts the returned changes to those reachable from the
458 specified heads.
459 specified heads.
459 "bundlename", if given, stores the bundle to this file path permanently;
460 "bundlename", if given, stores the bundle to this file path permanently;
460 otherwise it's stored to a temp file and gets deleted again when you call
461 otherwise it's stored to a temp file and gets deleted again when you call
461 the returned "cleanupfn".
462 the returned "cleanupfn".
462 "force" indicates whether to proceed on unrelated repos.
463 "force" indicates whether to proceed on unrelated repos.
463
464
464 Returns a tuple (local, csets, cleanupfn):
465 Returns a tuple (local, csets, cleanupfn):
465
466
466 "local" is a local repo from which to obtain the actual incoming
467 "local" is a local repo from which to obtain the actual incoming
467 changesets; it is a bundlerepo for the obtained bundle when the
468 changesets; it is a bundlerepo for the obtained bundle when the
468 original "other" is remote.
469 original "other" is remote.
469 "csets" lists the incoming changeset node ids.
470 "csets" lists the incoming changeset node ids.
470 "cleanupfn" must be called without arguments when you're done processing
471 "cleanupfn" must be called without arguments when you're done processing
471 the changes; it closes both the original "other" and the one returned
472 the changes; it closes both the original "other" and the one returned
472 here.
473 here.
473 '''
474 '''
474 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
475 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
475 force=force)
476 force=force)
476 common, incoming, rheads = tmp
477 common, incoming, rheads = tmp
477 if not incoming:
478 if not incoming:
478 try:
479 try:
479 if bundlename:
480 if bundlename:
480 os.unlink(bundlename)
481 os.unlink(bundlename)
481 except OSError:
482 except OSError:
482 pass
483 pass
483 return repo, [], other.close
484 return repo, [], other.close
484
485
485 commonset = set(common)
486 commonset = set(common)
486 rheads = [x for x in rheads if x not in commonset]
487 rheads = [x for x in rheads if x not in commonset]
487
488
488 bundle = None
489 bundle = None
489 bundlerepo = None
490 bundlerepo = None
490 localrepo = other.local()
491 localrepo = other.local()
491 if bundlename or not localrepo:
492 if bundlename or not localrepo:
492 # create a bundle (uncompressed if other repo is not local)
493 # create a bundle (uncompressed if other repo is not local)
493
494
494 # developer config: devel.legacy.exchange
495 # developer config: devel.legacy.exchange
495 legexc = ui.configlist('devel', 'legacy.exchange')
496 legexc = ui.configlist('devel', 'legacy.exchange')
496 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
497 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
497 canbundle2 = (not forcebundle1
498 canbundle2 = (not forcebundle1
498 and other.capable('getbundle')
499 and other.capable('getbundle')
499 and other.capable('bundle2'))
500 and other.capable('bundle2'))
500 if canbundle2:
501 if canbundle2:
501 kwargs = {}
502 kwargs = {}
502 kwargs['common'] = common
503 kwargs['common'] = common
503 kwargs['heads'] = rheads
504 kwargs['heads'] = rheads
504 kwargs['bundlecaps'] = exchange.caps20to10(repo)
505 kwargs['bundlecaps'] = exchange.caps20to10(repo)
505 kwargs['cg'] = True
506 kwargs['cg'] = True
506 b2 = other.getbundle('incoming', **kwargs)
507 b2 = other.getbundle('incoming', **kwargs)
507 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
508 fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
508 bundlename)
509 bundlename)
509 else:
510 else:
510 if other.capable('getbundle'):
511 if other.capable('getbundle'):
511 cg = other.getbundle('incoming', common=common, heads=rheads)
512 cg = other.getbundle('incoming', common=common, heads=rheads)
512 elif onlyheads is None and not other.capable('changegroupsubset'):
513 elif onlyheads is None and not other.capable('changegroupsubset'):
513 # compat with older servers when pulling all remote heads
514 # compat with older servers when pulling all remote heads
514 cg = other.changegroup(incoming, "incoming")
515 cg = other.changegroup(incoming, "incoming")
515 rheads = None
516 rheads = None
516 else:
517 else:
517 cg = other.changegroupsubset(incoming, rheads, 'incoming')
518 cg = other.changegroupsubset(incoming, rheads, 'incoming')
518 if localrepo:
519 if localrepo:
519 bundletype = "HG10BZ"
520 bundletype = "HG10BZ"
520 else:
521 else:
521 bundletype = "HG10UN"
522 bundletype = "HG10UN"
522 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
523 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
523 bundletype)
524 bundletype)
524 # keep written bundle?
525 # keep written bundle?
525 if bundlename:
526 if bundlename:
526 bundle = None
527 bundle = None
527 if not localrepo:
528 if not localrepo:
528 # use the created uncompressed bundlerepo
529 # use the created uncompressed bundlerepo
529 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
530 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
530 fname)
531 fname)
531 # this repo contains local and other now, so filter out local again
532 # this repo contains local and other now, so filter out local again
532 common = repo.heads()
533 common = repo.heads()
533 if localrepo:
534 if localrepo:
534 # Part of common may be remotely filtered
535 # Part of common may be remotely filtered
535 # So use an unfiltered version
536 # So use an unfiltered version
536 # The discovery process probably need cleanup to avoid that
537 # The discovery process probably need cleanup to avoid that
537 localrepo = localrepo.unfiltered()
538 localrepo = localrepo.unfiltered()
538
539
539 csets = localrepo.changelog.findmissing(common, rheads)
540 csets = localrepo.changelog.findmissing(common, rheads)
540
541
541 if bundlerepo:
542 if bundlerepo:
542 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
543 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
543 remotephases = other.listkeys('phases')
544 remotephases = other.listkeys('phases')
544
545
545 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
546 pullop = exchange.pulloperation(bundlerepo, other, heads=reponodes)
546 pullop.trmanager = bundletransactionmanager()
547 pullop.trmanager = bundletransactionmanager()
547 exchange._pullapplyphases(pullop, remotephases)
548 exchange._pullapplyphases(pullop, remotephases)
548
549
549 def cleanup():
550 def cleanup():
550 if bundlerepo:
551 if bundlerepo:
551 bundlerepo.close()
552 bundlerepo.close()
552 if bundle:
553 if bundle:
553 os.unlink(bundle)
554 os.unlink(bundle)
554 other.close()
555 other.close()
555
556
556 return (localrepo, csets, cleanup)
557 return (localrepo, csets, cleanup)
@@ -1,197 +1,198 b''
1 # Create server
1 # Create server
2 $ hg init server
2 $ hg init server
3 $ cd server
3 $ cd server
4 $ cat >> .hg/hgrc << EOF
4 $ cat >> .hg/hgrc << EOF
5 > [extensions]
5 > [extensions]
6 > extension=$TESTDIR/flagprocessorext.py
6 > extension=$TESTDIR/flagprocessorext.py
7 > EOF
7 > EOF
8 $ cd ../
8 $ cd ../
9
9
10 # Clone server and enable extensions
10 # Clone server and enable extensions
11 $ hg clone -q server client
11 $ hg clone -q server client
12 $ cd client
12 $ cd client
13 $ cat >> .hg/hgrc << EOF
13 $ cat >> .hg/hgrc << EOF
14 > [extensions]
14 > [extensions]
15 > extension=$TESTDIR/flagprocessorext.py
15 > extension=$TESTDIR/flagprocessorext.py
16 > EOF
16 > EOF
17
17
18 # Commit file that will trigger the noop extension
18 # Commit file that will trigger the noop extension
19 $ echo '[NOOP]' > noop
19 $ echo '[NOOP]' > noop
20 $ hg commit -Aqm "noop"
20 $ hg commit -Aqm "noop"
21
21
22 # Commit file that will trigger the base64 extension
22 # Commit file that will trigger the base64 extension
23 $ echo '[BASE64]' > base64
23 $ echo '[BASE64]' > base64
24 $ hg commit -Aqm 'base64'
24 $ hg commit -Aqm 'base64'
25
25
26 # Commit file that will trigger the gzip extension
26 # Commit file that will trigger the gzip extension
27 $ echo '[GZIP]' > gzip
27 $ echo '[GZIP]' > gzip
28 $ hg commit -Aqm 'gzip'
28 $ hg commit -Aqm 'gzip'
29
29
30 # Commit file that will trigger noop and base64
30 # Commit file that will trigger noop and base64
31 $ echo '[NOOP][BASE64]' > noop-base64
31 $ echo '[NOOP][BASE64]' > noop-base64
32 $ hg commit -Aqm 'noop+base64'
32 $ hg commit -Aqm 'noop+base64'
33
33
34 # Commit file that will trigger noop and gzip
34 # Commit file that will trigger noop and gzip
35 $ echo '[NOOP][GZIP]' > noop-gzip
35 $ echo '[NOOP][GZIP]' > noop-gzip
36 $ hg commit -Aqm 'noop+gzip'
36 $ hg commit -Aqm 'noop+gzip'
37
37
38 # Commit file that will trigger base64 and gzip
38 # Commit file that will trigger base64 and gzip
39 $ echo '[BASE64][GZIP]' > base64-gzip
39 $ echo '[BASE64][GZIP]' > base64-gzip
40 $ hg commit -Aqm 'base64+gzip'
40 $ hg commit -Aqm 'base64+gzip'
41
41
42 # Commit file that will trigger base64, gzip and noop
42 # Commit file that will trigger base64, gzip and noop
43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
44 $ hg commit -Aqm 'base64+gzip+noop'
44 $ hg commit -Aqm 'base64+gzip+noop'
45
45
46 # TEST: ensure the revision data is consistent
46 # TEST: ensure the revision data is consistent
47 $ hg cat noop
47 $ hg cat noop
48 [NOOP]
48 [NOOP]
49 $ hg debugdata noop 0
49 $ hg debugdata noop 0
50 [NOOP]
50 [NOOP]
51
51
52 $ hg cat -r . base64
52 $ hg cat -r . base64
53 [BASE64]
53 [BASE64]
54 $ hg debugdata base64 0
54 $ hg debugdata base64 0
55 W0JBU0U2NF0K (no-eol)
55 W0JBU0U2NF0K (no-eol)
56
56
57 $ hg cat -r . gzip
57 $ hg cat -r . gzip
58 [GZIP]
58 [GZIP]
59 $ hg debugdata gzip 0
59 $ hg debugdata gzip 0
60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
61
61
62 $ hg cat -r . noop-base64
62 $ hg cat -r . noop-base64
63 [NOOP][BASE64]
63 [NOOP][BASE64]
64 $ hg debugdata noop-base64 0
64 $ hg debugdata noop-base64 0
65 W05PT1BdW0JBU0U2NF0K (no-eol)
65 W05PT1BdW0JBU0U2NF0K (no-eol)
66
66
67 $ hg cat -r . noop-gzip
67 $ hg cat -r . noop-gzip
68 [NOOP][GZIP]
68 [NOOP][GZIP]
69 $ hg debugdata noop-gzip 0
69 $ hg debugdata noop-gzip 0
70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
71
71
72 $ hg cat -r . base64-gzip
72 $ hg cat -r . base64-gzip
73 [BASE64][GZIP]
73 [BASE64][GZIP]
74 $ hg debugdata base64-gzip 0
74 $ hg debugdata base64-gzip 0
75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
76
76
77 $ hg cat -r . base64-gzip-noop
77 $ hg cat -r . base64-gzip-noop
78 [BASE64][GZIP][NOOP]
78 [BASE64][GZIP][NOOP]
79 $ hg debugdata base64-gzip-noop 0
79 $ hg debugdata base64-gzip-noop 0
80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
81
81
82 # Push to the server
82 # Push to the server
83 $ hg push
83 $ hg push
84 pushing to $TESTTMP/server (glob)
84 pushing to $TESTTMP/server (glob)
85 searching for changes
85 searching for changes
86 adding changesets
86 adding changesets
87 adding manifests
87 adding manifests
88 adding file changes
88 adding file changes
89 added 7 changesets with 7 changes to 7 files
89 added 7 changesets with 7 changes to 7 files
90
90
91 # Initialize new client (not cloning) and setup extension
91 # Initialize new client (not cloning) and setup extension
92 $ cd ..
92 $ cd ..
93 $ hg init client2
93 $ hg init client2
94 $ cd client2
94 $ cd client2
95 $ cat >> .hg/hgrc << EOF
95 $ cat >> .hg/hgrc << EOF
96 > [paths]
96 > [paths]
97 > default = $TESTTMP/server
97 > default = $TESTTMP/server
98 > [extensions]
98 > [extensions]
99 > extension=$TESTDIR/flagprocessorext.py
99 > extension=$TESTDIR/flagprocessorext.py
100 > EOF
100 > EOF
101
101
102 # Pull from server and update to latest revision
102 # Pull from server and update to latest revision
103 $ hg pull default
103 $ hg pull default
104 pulling from $TESTTMP/server (glob)
104 pulling from $TESTTMP/server (glob)
105 requesting all changes
105 requesting all changes
106 adding changesets
106 adding changesets
107 adding manifests
107 adding manifests
108 adding file changes
108 adding file changes
109 added 7 changesets with 7 changes to 7 files
109 added 7 changesets with 7 changes to 7 files
110 (run 'hg update' to get a working copy)
110 (run 'hg update' to get a working copy)
111 $ hg update
111 $ hg update
112 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
113
113
114 # TEST: ensure the revision data is consistent
114 # TEST: ensure the revision data is consistent
115 $ hg cat noop
115 $ hg cat noop
116 [NOOP]
116 [NOOP]
117 $ hg debugdata noop 0
117 $ hg debugdata noop 0
118 [NOOP]
118 [NOOP]
119
119
120 $ hg cat -r . base64
120 $ hg cat -r . base64
121 [BASE64]
121 [BASE64]
122 $ hg debugdata base64 0
122 $ hg debugdata base64 0
123 W0JBU0U2NF0K (no-eol)
123 W0JBU0U2NF0K (no-eol)
124
124
125 $ hg cat -r . gzip
125 $ hg cat -r . gzip
126 [GZIP]
126 [GZIP]
127 $ hg debugdata gzip 0
127 $ hg debugdata gzip 0
128 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
128 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
129
129
130 $ hg cat -r . noop-base64
130 $ hg cat -r . noop-base64
131 [NOOP][BASE64]
131 [NOOP][BASE64]
132 $ hg debugdata noop-base64 0
132 $ hg debugdata noop-base64 0
133 W05PT1BdW0JBU0U2NF0K (no-eol)
133 W05PT1BdW0JBU0U2NF0K (no-eol)
134
134
135 $ hg cat -r . noop-gzip
135 $ hg cat -r . noop-gzip
136 [NOOP][GZIP]
136 [NOOP][GZIP]
137 $ hg debugdata noop-gzip 0
137 $ hg debugdata noop-gzip 0
138 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
138 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
139
139
140 $ hg cat -r . base64-gzip
140 $ hg cat -r . base64-gzip
141 [BASE64][GZIP]
141 [BASE64][GZIP]
142 $ hg debugdata base64-gzip 0
142 $ hg debugdata base64-gzip 0
143 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
143 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
144
144
145 $ hg cat -r . base64-gzip-noop
145 $ hg cat -r . base64-gzip-noop
146 [BASE64][GZIP][NOOP]
146 [BASE64][GZIP][NOOP]
147 $ hg debugdata base64-gzip-noop 0
147 $ hg debugdata base64-gzip-noop 0
148 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
148 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
149
149
150 # TEST: ensure a missing processor is handled
150 # TEST: ensure a missing processor is handled
151 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
151 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
152 $ hg commit -Aqm 'fail+base64+gzip+noop'
152 $ hg commit -Aqm 'fail+base64+gzip+noop'
153 abort: missing processor for flag '0x1'!
153 abort: missing processor for flag '0x1'!
154 [255]
154 [255]
155
155
156 # TEST: ensure we cannot register several flag processors on the same flag
156 # TEST: ensure we cannot register several flag processors on the same flag
157 $ cat >> .hg/hgrc << EOF
157 $ cat >> .hg/hgrc << EOF
158 > [extensions]
158 > [extensions]
159 > extension=$TESTDIR/flagprocessorext.py
159 > extension=$TESTDIR/flagprocessorext.py
160 > duplicate=$TESTDIR/flagprocessorext.py
160 > duplicate=$TESTDIR/flagprocessorext.py
161 > EOF
161 > EOF
162 $ echo 'this should fail' > file
162 $ echo 'this should fail' > file
163 $ hg commit -Aqm 'add file'
163 $ hg commit -Aqm 'add file'
164 abort: cannot register multiple processors on flag '0x8'.
164 abort: cannot register multiple processors on flag '0x8'.
165 [255]
165 [255]
166
166
167 $ cd ..
167 $ cd ..
168
168
169 # TEST: bundle repo
169 # TEST: bundle repo
170 $ hg init bundletest
170 $ hg init bundletest
171 $ cd bundletest
171 $ cd bundletest
172
172
173 $ cat >> .hg/hgrc << EOF
173 $ cat >> .hg/hgrc << EOF
174 > [extensions]
174 > [extensions]
175 > flagprocessor=$TESTDIR/flagprocessorext.py
175 > flagprocessor=$TESTDIR/flagprocessorext.py
176 > EOF
176 > EOF
177
177
178 $ for i in 0 single two three 4; do
178 $ for i in 0 single two three 4; do
179 > echo '[BASE64]a-bit-longer-'$i > base64
179 > echo '[BASE64]a-bit-longer-'$i > base64
180 > hg commit -m base64-$i -A base64
180 > hg commit -m base64-$i -A base64
181 > done
181 > done
182
182
183 $ hg update 2 -q
183 $ hg update 2 -q
184 $ echo '[BASE64]a-bit-longer-branching' > base64
184 $ echo '[BASE64]a-bit-longer-branching' > base64
185 $ hg commit -q -m branching
185 $ hg commit -q -m branching
186
186
187 $ hg bundle --base 1 bundle.hg
187 $ hg bundle --base 1 bundle.hg
188 4 changesets found
188 4 changesets found
189 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
189 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
190 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
190 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
191 abort: integrity check failed on data/base64.i:2!
191 Traceback (most recent call last):
192 mercurial.mpatch.mpatchError: invalid patch
192
193
193 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q 2>&1 | egrep -v '^(\*\*| )'
194 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q 2>&1 | egrep -v '^(\*\*| )'
194 Traceback (most recent call last):
195 Traceback (most recent call last):
195 mercurial.mpatch.mpatchError: invalid patch
196 mercurial.mpatch.mpatchError: invalid patch
196 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
197 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64 2>&1 | egrep -v '^(\*\*| )'
197 abort: repository bundle-again.hg not found!
198 abort: repository bundle-again.hg not found!
General Comments 0
You need to be logged in to leave comments. Login now