##// END OF EJS Templates
strip: compress bundle2 backup using BZ...
Pierre-Yves David -
r26425:eb21b667 default
parent child Browse files
Show More
@@ -1,296 +1,300 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import short
14 from .node import short
15 from . import (
15 from . import (
16 bundle2,
16 bundle2,
17 changegroup,
17 changegroup,
18 exchange,
18 exchange,
19 util,
19 util,
20 )
20 )
21
21
22 def _bundle(repo, bases, heads, node, suffix, compress=True):
22 def _bundle(repo, bases, heads, node, suffix, compress=True):
23 """create a bundle with the specified revisions as a backup"""
23 """create a bundle with the specified revisions as a backup"""
24 cgversion = '01'
24 cgversion = '01'
25 if 'generaldelta' in repo.requirements:
25 if 'generaldelta' in repo.requirements:
26 cgversion = '02'
26 cgversion = '02'
27
27
28 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
28 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 version=cgversion)
29 version=cgversion)
30 backupdir = "strip-backup"
30 backupdir = "strip-backup"
31 vfs = repo.vfs
31 vfs = repo.vfs
32 if not vfs.isdir(backupdir):
32 if not vfs.isdir(backupdir):
33 vfs.mkdir(backupdir)
33 vfs.mkdir(backupdir)
34
34
35 # Include a hash of all the nodes in the filename for uniqueness
35 # Include a hash of all the nodes in the filename for uniqueness
36 allcommits = repo.set('%ln::%ln', bases, heads)
36 allcommits = repo.set('%ln::%ln', bases, heads)
37 allhashes = sorted(c.hex() for c in allcommits)
37 allhashes = sorted(c.hex() for c in allcommits)
38 totalhash = util.sha1(''.join(allhashes)).hexdigest()
38 totalhash = util.sha1(''.join(allhashes)).hexdigest()
39 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
39 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40
40
41 comp = None
41 if cgversion != '01':
42 if cgversion != '01':
42 bundletype = "HG20"
43 bundletype = "HG20"
44 if compress:
45 comp = 'BZ'
43 elif compress:
46 elif compress:
44 bundletype = "HG10BZ"
47 bundletype = "HG10BZ"
45 else:
48 else:
46 bundletype = "HG10UN"
49 bundletype = "HG10UN"
47 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs)
50 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs,
51 compression=comp)
48
52
49 def _collectfiles(repo, striprev):
53 def _collectfiles(repo, striprev):
50 """find out the filelogs affected by the strip"""
54 """find out the filelogs affected by the strip"""
51 files = set()
55 files = set()
52
56
53 for x in xrange(striprev, len(repo)):
57 for x in xrange(striprev, len(repo)):
54 files.update(repo[x].files())
58 files.update(repo[x].files())
55
59
56 return sorted(files)
60 return sorted(files)
57
61
58 def _collectbrokencsets(repo, files, striprev):
62 def _collectbrokencsets(repo, files, striprev):
59 """return the changesets which will be broken by the truncation"""
63 """return the changesets which will be broken by the truncation"""
60 s = set()
64 s = set()
61 def collectone(revlog):
65 def collectone(revlog):
62 _, brokenset = revlog.getstrippoint(striprev)
66 _, brokenset = revlog.getstrippoint(striprev)
63 s.update([revlog.linkrev(r) for r in brokenset])
67 s.update([revlog.linkrev(r) for r in brokenset])
64
68
65 collectone(repo.manifest)
69 collectone(repo.manifest)
66 for fname in files:
70 for fname in files:
67 collectone(repo.file(fname))
71 collectone(repo.file(fname))
68
72
69 return s
73 return s
70
74
71 def strip(ui, repo, nodelist, backup=True, topic='backup'):
75 def strip(ui, repo, nodelist, backup=True, topic='backup'):
72
76
73 # Simple way to maintain backwards compatibility for this
77 # Simple way to maintain backwards compatibility for this
74 # argument.
78 # argument.
75 if backup in ['none', 'strip']:
79 if backup in ['none', 'strip']:
76 backup = False
80 backup = False
77
81
78 repo = repo.unfiltered()
82 repo = repo.unfiltered()
79 repo.destroying()
83 repo.destroying()
80
84
81 cl = repo.changelog
85 cl = repo.changelog
82 # TODO handle undo of merge sets
86 # TODO handle undo of merge sets
83 if isinstance(nodelist, str):
87 if isinstance(nodelist, str):
84 nodelist = [nodelist]
88 nodelist = [nodelist]
85 striplist = [cl.rev(node) for node in nodelist]
89 striplist = [cl.rev(node) for node in nodelist]
86 striprev = min(striplist)
90 striprev = min(striplist)
87
91
88 # Some revisions with rev > striprev may not be descendants of striprev.
92 # Some revisions with rev > striprev may not be descendants of striprev.
89 # We have to find these revisions and put them in a bundle, so that
93 # We have to find these revisions and put them in a bundle, so that
90 # we can restore them after the truncations.
94 # we can restore them after the truncations.
91 # To create the bundle we use repo.changegroupsubset which requires
95 # To create the bundle we use repo.changegroupsubset which requires
92 # the list of heads and bases of the set of interesting revisions.
96 # the list of heads and bases of the set of interesting revisions.
93 # (head = revision in the set that has no descendant in the set;
97 # (head = revision in the set that has no descendant in the set;
94 # base = revision in the set that has no ancestor in the set)
98 # base = revision in the set that has no ancestor in the set)
95 tostrip = set(striplist)
99 tostrip = set(striplist)
96 for rev in striplist:
100 for rev in striplist:
97 for desc in cl.descendants([rev]):
101 for desc in cl.descendants([rev]):
98 tostrip.add(desc)
102 tostrip.add(desc)
99
103
100 files = _collectfiles(repo, striprev)
104 files = _collectfiles(repo, striprev)
101 saverevs = _collectbrokencsets(repo, files, striprev)
105 saverevs = _collectbrokencsets(repo, files, striprev)
102
106
103 # compute heads
107 # compute heads
104 saveheads = set(saverevs)
108 saveheads = set(saverevs)
105 for r in xrange(striprev + 1, len(cl)):
109 for r in xrange(striprev + 1, len(cl)):
106 if r not in tostrip:
110 if r not in tostrip:
107 saverevs.add(r)
111 saverevs.add(r)
108 saveheads.difference_update(cl.parentrevs(r))
112 saveheads.difference_update(cl.parentrevs(r))
109 saveheads.add(r)
113 saveheads.add(r)
110 saveheads = [cl.node(r) for r in saveheads]
114 saveheads = [cl.node(r) for r in saveheads]
111
115
112 # compute base nodes
116 # compute base nodes
113 if saverevs:
117 if saverevs:
114 descendants = set(cl.descendants(saverevs))
118 descendants = set(cl.descendants(saverevs))
115 saverevs.difference_update(descendants)
119 saverevs.difference_update(descendants)
116 savebases = [cl.node(r) for r in saverevs]
120 savebases = [cl.node(r) for r in saverevs]
117 stripbases = [cl.node(r) for r in tostrip]
121 stripbases = [cl.node(r) for r in tostrip]
118
122
119 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
123 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
120 # is much faster
124 # is much faster
121 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
125 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
122 if newbmtarget:
126 if newbmtarget:
123 newbmtarget = repo[newbmtarget.first()].node()
127 newbmtarget = repo[newbmtarget.first()].node()
124 else:
128 else:
125 newbmtarget = '.'
129 newbmtarget = '.'
126
130
127 bm = repo._bookmarks
131 bm = repo._bookmarks
128 updatebm = []
132 updatebm = []
129 for m in bm:
133 for m in bm:
130 rev = repo[bm[m]].rev()
134 rev = repo[bm[m]].rev()
131 if rev in tostrip:
135 if rev in tostrip:
132 updatebm.append(m)
136 updatebm.append(m)
133
137
134 # create a changegroup for all the branches we need to keep
138 # create a changegroup for all the branches we need to keep
135 backupfile = None
139 backupfile = None
136 vfs = repo.vfs
140 vfs = repo.vfs
137 node = nodelist[-1]
141 node = nodelist[-1]
138 if backup:
142 if backup:
139 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
143 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
140 repo.ui.status(_("saved backup bundle to %s\n") %
144 repo.ui.status(_("saved backup bundle to %s\n") %
141 vfs.join(backupfile))
145 vfs.join(backupfile))
142 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
146 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
143 vfs.join(backupfile))
147 vfs.join(backupfile))
144 if saveheads or savebases:
148 if saveheads or savebases:
145 # do not compress partial bundle if we remove it from disk later
149 # do not compress partial bundle if we remove it from disk later
146 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
150 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
147 compress=False)
151 compress=False)
148
152
149 mfst = repo.manifest
153 mfst = repo.manifest
150
154
151 curtr = repo.currenttransaction()
155 curtr = repo.currenttransaction()
152 if curtr is not None:
156 if curtr is not None:
153 del curtr # avoid carrying reference to transaction for nothing
157 del curtr # avoid carrying reference to transaction for nothing
154 msg = _('programming error: cannot strip from inside a transaction')
158 msg = _('programming error: cannot strip from inside a transaction')
155 raise util.Abort(msg, hint=_('contact your extension maintainer'))
159 raise util.Abort(msg, hint=_('contact your extension maintainer'))
156
160
157 tr = repo.transaction("strip")
161 tr = repo.transaction("strip")
158 offset = len(tr.entries)
162 offset = len(tr.entries)
159
163
160 try:
164 try:
161 tr.startgroup()
165 tr.startgroup()
162 cl.strip(striprev, tr)
166 cl.strip(striprev, tr)
163 mfst.strip(striprev, tr)
167 mfst.strip(striprev, tr)
164 for fn in files:
168 for fn in files:
165 repo.file(fn).strip(striprev, tr)
169 repo.file(fn).strip(striprev, tr)
166 tr.endgroup()
170 tr.endgroup()
167
171
168 try:
172 try:
169 for i in xrange(offset, len(tr.entries)):
173 for i in xrange(offset, len(tr.entries)):
170 file, troffset, ignore = tr.entries[i]
174 file, troffset, ignore = tr.entries[i]
171 repo.svfs(file, 'a').truncate(troffset)
175 repo.svfs(file, 'a').truncate(troffset)
172 if troffset == 0:
176 if troffset == 0:
173 repo.store.markremoved(file)
177 repo.store.markremoved(file)
174 tr.close()
178 tr.close()
175 finally:
179 finally:
176 tr.release()
180 tr.release()
177
181
178 if saveheads or savebases:
182 if saveheads or savebases:
179 ui.note(_("adding branch\n"))
183 ui.note(_("adding branch\n"))
180 f = vfs.open(chgrpfile, "rb")
184 f = vfs.open(chgrpfile, "rb")
181 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
185 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
182 if not repo.ui.verbose:
186 if not repo.ui.verbose:
183 # silence internal shuffling chatter
187 # silence internal shuffling chatter
184 repo.ui.pushbuffer()
188 repo.ui.pushbuffer()
185 if isinstance(gen, bundle2.unbundle20):
189 if isinstance(gen, bundle2.unbundle20):
186 tr = repo.transaction('strip')
190 tr = repo.transaction('strip')
187 tr.hookargs = {'source': 'strip',
191 tr.hookargs = {'source': 'strip',
188 'url': 'bundle:' + vfs.join(chgrpfile)}
192 'url': 'bundle:' + vfs.join(chgrpfile)}
189 try:
193 try:
190 bundle2.processbundle(repo, gen, lambda: tr)
194 bundle2.processbundle(repo, gen, lambda: tr)
191 tr.close()
195 tr.close()
192 finally:
196 finally:
193 tr.release()
197 tr.release()
194 else:
198 else:
195 changegroup.addchangegroup(repo, gen, 'strip',
199 changegroup.addchangegroup(repo, gen, 'strip',
196 'bundle:' + vfs.join(chgrpfile),
200 'bundle:' + vfs.join(chgrpfile),
197 True)
201 True)
198 if not repo.ui.verbose:
202 if not repo.ui.verbose:
199 repo.ui.popbuffer()
203 repo.ui.popbuffer()
200 f.close()
204 f.close()
201
205
202 # remove undo files
206 # remove undo files
203 for undovfs, undofile in repo.undofiles():
207 for undovfs, undofile in repo.undofiles():
204 try:
208 try:
205 undovfs.unlink(undofile)
209 undovfs.unlink(undofile)
206 except OSError as e:
210 except OSError as e:
207 if e.errno != errno.ENOENT:
211 if e.errno != errno.ENOENT:
208 ui.warn(_('error removing %s: %s\n') %
212 ui.warn(_('error removing %s: %s\n') %
209 (undovfs.join(undofile), str(e)))
213 (undovfs.join(undofile), str(e)))
210
214
211 for m in updatebm:
215 for m in updatebm:
212 bm[m] = repo[newbmtarget].node()
216 bm[m] = repo[newbmtarget].node()
213 bm.write()
217 bm.write()
214 except: # re-raises
218 except: # re-raises
215 if backupfile:
219 if backupfile:
216 ui.warn(_("strip failed, full bundle stored in '%s'\n")
220 ui.warn(_("strip failed, full bundle stored in '%s'\n")
217 % vfs.join(backupfile))
221 % vfs.join(backupfile))
218 elif saveheads:
222 elif saveheads:
219 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
223 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
220 % vfs.join(chgrpfile))
224 % vfs.join(chgrpfile))
221 raise
225 raise
222 else:
226 else:
223 if saveheads or savebases:
227 if saveheads or savebases:
224 # Remove partial backup only if there were no exceptions
228 # Remove partial backup only if there were no exceptions
225 vfs.unlink(chgrpfile)
229 vfs.unlink(chgrpfile)
226
230
227 repo.destroyed()
231 repo.destroyed()
228
232
229 def rebuildfncache(ui, repo):
233 def rebuildfncache(ui, repo):
230 """Rebuilds the fncache file from repo history.
234 """Rebuilds the fncache file from repo history.
231
235
232 Missing entries will be added. Extra entries will be removed.
236 Missing entries will be added. Extra entries will be removed.
233 """
237 """
234 repo = repo.unfiltered()
238 repo = repo.unfiltered()
235
239
236 if 'fncache' not in repo.requirements:
240 if 'fncache' not in repo.requirements:
237 ui.warn(_('(not rebuilding fncache because repository does not '
241 ui.warn(_('(not rebuilding fncache because repository does not '
238 'support fncache)\n'))
242 'support fncache)\n'))
239 return
243 return
240
244
241 lock = repo.lock()
245 lock = repo.lock()
242 try:
246 try:
243 fnc = repo.store.fncache
247 fnc = repo.store.fncache
244 # Trigger load of fncache.
248 # Trigger load of fncache.
245 if 'irrelevant' in fnc:
249 if 'irrelevant' in fnc:
246 pass
250 pass
247
251
248 oldentries = set(fnc.entries)
252 oldentries = set(fnc.entries)
249 newentries = set()
253 newentries = set()
250 seenfiles = set()
254 seenfiles = set()
251
255
252 repolen = len(repo)
256 repolen = len(repo)
253 for rev in repo:
257 for rev in repo:
254 ui.progress(_('changeset'), rev, total=repolen)
258 ui.progress(_('changeset'), rev, total=repolen)
255
259
256 ctx = repo[rev]
260 ctx = repo[rev]
257 for f in ctx.files():
261 for f in ctx.files():
258 # This is to minimize I/O.
262 # This is to minimize I/O.
259 if f in seenfiles:
263 if f in seenfiles:
260 continue
264 continue
261 seenfiles.add(f)
265 seenfiles.add(f)
262
266
263 i = 'data/%s.i' % f
267 i = 'data/%s.i' % f
264 d = 'data/%s.d' % f
268 d = 'data/%s.d' % f
265
269
266 if repo.store._exists(i):
270 if repo.store._exists(i):
267 newentries.add(i)
271 newentries.add(i)
268 if repo.store._exists(d):
272 if repo.store._exists(d):
269 newentries.add(d)
273 newentries.add(d)
270
274
271 ui.progress(_('changeset'), None)
275 ui.progress(_('changeset'), None)
272
276
273 addcount = len(newentries - oldentries)
277 addcount = len(newentries - oldentries)
274 removecount = len(oldentries - newentries)
278 removecount = len(oldentries - newentries)
275 for p in sorted(oldentries - newentries):
279 for p in sorted(oldentries - newentries):
276 ui.write(_('removing %s\n') % p)
280 ui.write(_('removing %s\n') % p)
277 for p in sorted(newentries - oldentries):
281 for p in sorted(newentries - oldentries):
278 ui.write(_('adding %s\n') % p)
282 ui.write(_('adding %s\n') % p)
279
283
280 if addcount or removecount:
284 if addcount or removecount:
281 ui.write(_('%d items added, %d removed from fncache\n') %
285 ui.write(_('%d items added, %d removed from fncache\n') %
282 (addcount, removecount))
286 (addcount, removecount))
283 fnc.entries = newentries
287 fnc.entries = newentries
284 fnc._dirty = True
288 fnc._dirty = True
285
289
286 tr = repo.transaction('fncache')
290 tr = repo.transaction('fncache')
287 try:
291 try:
288 fnc.write(tr)
292 fnc.write(tr)
289 tr.close()
293 tr.close()
290 finally:
294 finally:
291 tr.release()
295 tr.release()
292 else:
296 else:
293 ui.write(_('fncache already up to date\n'))
297 ui.write(_('fncache already up to date\n'))
294 finally:
298 finally:
295 lock.release()
299 lock.release()
296
300
@@ -1,114 +1,114 b''
1 Check whether size of generaldelta revlog is not bigger than its
1 Check whether size of generaldelta revlog is not bigger than its
2 regular equivalent. Test would fail if generaldelta was naive
2 regular equivalent. Test would fail if generaldelta was naive
3 implementation of parentdelta: third manifest revision would be fully
3 implementation of parentdelta: third manifest revision would be fully
4 inserted due to big distance from its paren revision (zero).
4 inserted due to big distance from its paren revision (zero).
5
5
6 $ hg init repo
6 $ hg init repo
7 $ cd repo
7 $ cd repo
8 $ echo foo > foo
8 $ echo foo > foo
9 $ echo bar > bar
9 $ echo bar > bar
10 $ hg commit -q -Am boo
10 $ hg commit -q -Am boo
11 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
11 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
12 $ for r in 1 2 3; do
12 $ for r in 1 2 3; do
13 > echo $r > foo
13 > echo $r > foo
14 > hg commit -q -m $r
14 > hg commit -q -m $r
15 > hg up -q -r 0
15 > hg up -q -r 0
16 > hg pull . -q -r $r -R ../gdrepo
16 > hg pull . -q -r $r -R ../gdrepo
17 > done
17 > done
18
18
19 $ cd ..
19 $ cd ..
20 >>> import os
20 >>> import os
21 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
21 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
22 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
22 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
23 >>> if regsize < gdsize:
23 >>> if regsize < gdsize:
24 ... print 'generaldata increased size of manifest'
24 ... print 'generaldata increased size of manifest'
25
25
26 Verify rev reordering doesnt create invalid bundles (issue4462)
26 Verify rev reordering doesnt create invalid bundles (issue4462)
27 This requires a commit tree that when pulled will reorder manifest revs such
27 This requires a commit tree that when pulled will reorder manifest revs such
28 that the second manifest to create a file rev will be ordered before the first
28 that the second manifest to create a file rev will be ordered before the first
29 manifest to create that file rev. We also need to do a partial pull to ensure
29 manifest to create that file rev. We also need to do a partial pull to ensure
30 reordering happens. At the end we verify the linkrev points at the earliest
30 reordering happens. At the end we verify the linkrev points at the earliest
31 commit.
31 commit.
32
32
33 $ hg init server --config format.generaldelta=True
33 $ hg init server --config format.generaldelta=True
34 $ cd server
34 $ cd server
35 $ touch a
35 $ touch a
36 $ hg commit -Aqm a
36 $ hg commit -Aqm a
37 $ echo x > x
37 $ echo x > x
38 $ echo y > y
38 $ echo y > y
39 $ hg commit -Aqm xy
39 $ hg commit -Aqm xy
40 $ hg up -q '.^'
40 $ hg up -q '.^'
41 $ echo x > x
41 $ echo x > x
42 $ echo z > z
42 $ echo z > z
43 $ hg commit -Aqm xz
43 $ hg commit -Aqm xz
44 $ hg up -q 1
44 $ hg up -q 1
45 $ echo b > b
45 $ echo b > b
46 $ hg commit -Aqm b
46 $ hg commit -Aqm b
47 $ hg merge -q 2
47 $ hg merge -q 2
48 $ hg commit -Aqm merge
48 $ hg commit -Aqm merge
49 $ echo c > c
49 $ echo c > c
50 $ hg commit -Aqm c
50 $ hg commit -Aqm c
51 $ hg log -G -T '{rev} {shortest(node)} {desc}'
51 $ hg log -G -T '{rev} {shortest(node)} {desc}'
52 @ 5 ebb8 c
52 @ 5 ebb8 c
53 |
53 |
54 o 4 baf7 merge
54 o 4 baf7 merge
55 |\
55 |\
56 | o 3 a129 b
56 | o 3 a129 b
57 | |
57 | |
58 o | 2 958c xz
58 o | 2 958c xz
59 | |
59 | |
60 | o 1 f00c xy
60 | o 1 f00c xy
61 |/
61 |/
62 o 0 3903 a
62 o 0 3903 a
63
63
64 $ cd ..
64 $ cd ..
65 $ hg init client
65 $ hg init client
66 $ cd client
66 $ cd client
67 $ hg pull -q ../server -r 4
67 $ hg pull -q ../server -r 4
68 $ hg debugindex x
68 $ hg debugindex x
69 rev offset length base linkrev nodeid p1 p2
69 rev offset length base linkrev nodeid p1 p2
70 0 0 3 0 1 1406e7411862 000000000000 000000000000
70 0 0 3 0 1 1406e7411862 000000000000 000000000000
71
71
72 $ cd ..
72 $ cd ..
73
73
74 Test format.aggressivemergedeltas
74 Test format.aggressivemergedeltas
75
75
76 $ hg init --config format.generaldelta=1 aggressive
76 $ hg init --config format.generaldelta=1 aggressive
77 $ cd aggressive
77 $ cd aggressive
78 $ touch a b c d e
78 $ touch a b c d e
79 $ hg commit -Aqm side1
79 $ hg commit -Aqm side1
80 $ hg up -q null
80 $ hg up -q null
81 $ touch x y
81 $ touch x y
82 $ hg commit -Aqm side2
82 $ hg commit -Aqm side2
83
83
84 - Verify non-aggressive merge uses p1 (commit 1) as delta parent
84 - Verify non-aggressive merge uses p1 (commit 1) as delta parent
85 $ hg merge -q 0
85 $ hg merge -q 0
86 $ hg commit -q -m merge
86 $ hg commit -q -m merge
87 $ hg debugindex -m
87 $ hg debugindex -m
88 rev offset length delta linkrev nodeid p1 p2
88 rev offset length delta linkrev nodeid p1 p2
89 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
89 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
90 1 59 59 -1 1 315c023f341d 000000000000 000000000000
90 1 59 59 -1 1 315c023f341d 000000000000 000000000000
91 2 118 65 1 2 2ab389a983eb 315c023f341d 8dde941edb6e
91 2 118 65 1 2 2ab389a983eb 315c023f341d 8dde941edb6e
92
92
93 $ hg strip -q -r . --config extensions.strip=
93 $ hg strip -q -r . --config extensions.strip=
94
94
95 - Verify aggressive merge uses p2 (commit 0) as delta parent
95 - Verify aggressive merge uses p2 (commit 0) as delta parent
96 $ hg up -q -C 1
96 $ hg up -q -C 1
97 $ hg merge -q 0
97 $ hg merge -q 0
98 $ hg commit -q -m merge --config format.aggressivemergedeltas=True
98 $ hg commit -q -m merge --config format.aggressivemergedeltas=True
99 $ hg debugindex -m
99 $ hg debugindex -m
100 rev offset length delta linkrev nodeid p1 p2
100 rev offset length delta linkrev nodeid p1 p2
101 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
101 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
102 1 59 59 -1 1 315c023f341d 000000000000 000000000000
102 1 59 59 -1 1 315c023f341d 000000000000 000000000000
103 2 118 62 0 2 2ab389a983eb 315c023f341d 8dde941edb6e
103 2 118 62 0 2 2ab389a983eb 315c023f341d 8dde941edb6e
104
104
105 Test that strip bundle use bundle2
105 Test that strip bundle use bundle2
106 $ hg --config extensions.strip= strip .
106 $ hg --config extensions.strip= strip .
107 0 files updated, 0 files merged, 5 files removed, 0 files unresolved
107 0 files updated, 0 files merged, 5 files removed, 0 files unresolved
108 saved backup bundle to $TESTTMP/aggressive/.hg/strip-backup/1c5d4dc9a8b8-6c68e60c-backup.hg (glob)
108 saved backup bundle to $TESTTMP/aggressive/.hg/strip-backup/1c5d4dc9a8b8-6c68e60c-backup.hg (glob)
109 $ hg debugbundle .hg/strip-backup/*
109 $ hg debugbundle .hg/strip-backup/*
110 Stream params: {}
110 Stream params: {'Compression': 'BZ'}
111 changegroup -- "{'version': '02'}"
111 changegroup -- "{'version': '02'}"
112 1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9
112 1c5d4dc9a8b8d6e1750966d343e94db665e7a1e9
113
113
114 $ cd ..
114 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now