##// END OF EJS Templates
strip: factor out revset calculation for strip -B...
Ryan McElroy -
r26624:bcace0fb default
parent child Browse files
Show More
@@ -1,225 +1,222
1 """strip changesets and their descendants from history
1 """strip changesets and their descendants from history
2
2
3 This extension allows you to strip changesets and all their descendants from the
3 This extension allows you to strip changesets and all their descendants from the
4 repository. See the command help for details.
4 repository. See the command help for details.
5 """
5 """
6 from mercurial.i18n import _
6 from mercurial.i18n import _
7 from mercurial.node import nullid
7 from mercurial.node import nullid
8 from mercurial.lock import release
8 from mercurial.lock import release
9 from mercurial import cmdutil, hg, scmutil, util, error
9 from mercurial import cmdutil, hg, scmutil, util, error
10 from mercurial import repair, bookmarks, merge
10 from mercurial import repair, bookmarks, merge
11
11
12 cmdtable = {}
12 cmdtable = {}
13 command = cmdutil.command(cmdtable)
13 command = cmdutil.command(cmdtable)
14 # Note for extension authors: ONLY specify testedwith = 'internal' for
14 # Note for extension authors: ONLY specify testedwith = 'internal' for
15 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
15 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
16 # be specifying the version(s) of Mercurial they are tested with, or
16 # be specifying the version(s) of Mercurial they are tested with, or
17 # leave the attribute unspecified.
17 # leave the attribute unspecified.
18 testedwith = 'internal'
18 testedwith = 'internal'
19
19
20 def checksubstate(repo, baserev=None):
20 def checksubstate(repo, baserev=None):
21 '''return list of subrepos at a different revision than substate.
21 '''return list of subrepos at a different revision than substate.
22 Abort if any subrepos have uncommitted changes.'''
22 Abort if any subrepos have uncommitted changes.'''
23 inclsubs = []
23 inclsubs = []
24 wctx = repo[None]
24 wctx = repo[None]
25 if baserev:
25 if baserev:
26 bctx = repo[baserev]
26 bctx = repo[baserev]
27 else:
27 else:
28 bctx = wctx.parents()[0]
28 bctx = wctx.parents()[0]
29 for s in sorted(wctx.substate):
29 for s in sorted(wctx.substate):
30 wctx.sub(s).bailifchanged(True)
30 wctx.sub(s).bailifchanged(True)
31 if s not in bctx.substate or bctx.sub(s).dirty():
31 if s not in bctx.substate or bctx.sub(s).dirty():
32 inclsubs.append(s)
32 inclsubs.append(s)
33 return inclsubs
33 return inclsubs
34
34
35 def checklocalchanges(repo, force=False, excsuffix=''):
35 def checklocalchanges(repo, force=False, excsuffix=''):
36 cmdutil.checkunfinished(repo)
36 cmdutil.checkunfinished(repo)
37 s = repo.status()
37 s = repo.status()
38 if not force:
38 if not force:
39 if s.modified or s.added or s.removed or s.deleted:
39 if s.modified or s.added or s.removed or s.deleted:
40 _("local changes found") # i18n tool detection
40 _("local changes found") # i18n tool detection
41 raise error.Abort(_("local changes found" + excsuffix))
41 raise error.Abort(_("local changes found" + excsuffix))
42 if checksubstate(repo):
42 if checksubstate(repo):
43 _("local changed subrepos found") # i18n tool detection
43 _("local changed subrepos found") # i18n tool detection
44 raise error.Abort(_("local changed subrepos found" + excsuffix))
44 raise error.Abort(_("local changed subrepos found" + excsuffix))
45 return s
45 return s
46
46
47 def strip(ui, repo, revs, update=True, backup=True, force=None, bookmark=None):
47 def strip(ui, repo, revs, update=True, backup=True, force=None, bookmark=None):
48 wlock = lock = None
48 wlock = lock = None
49 try:
49 try:
50 wlock = repo.wlock()
50 wlock = repo.wlock()
51 lock = repo.lock()
51 lock = repo.lock()
52
52
53 if update:
53 if update:
54 checklocalchanges(repo, force=force)
54 checklocalchanges(repo, force=force)
55 urev, p2 = repo.changelog.parents(revs[0])
55 urev, p2 = repo.changelog.parents(revs[0])
56 if (util.safehasattr(repo, 'mq') and
56 if (util.safehasattr(repo, 'mq') and
57 p2 != nullid
57 p2 != nullid
58 and p2 in [x.node for x in repo.mq.applied]):
58 and p2 in [x.node for x in repo.mq.applied]):
59 urev = p2
59 urev = p2
60 hg.clean(repo, urev)
60 hg.clean(repo, urev)
61 repo.dirstate.write()
61 repo.dirstate.write()
62
62
63 repair.strip(ui, repo, revs, backup)
63 repair.strip(ui, repo, revs, backup)
64
64
65 marks = repo._bookmarks
65 marks = repo._bookmarks
66 if bookmark:
66 if bookmark:
67 if bookmark == repo._activebookmark:
67 if bookmark == repo._activebookmark:
68 bookmarks.deactivate(repo)
68 bookmarks.deactivate(repo)
69 del marks[bookmark]
69 del marks[bookmark]
70 marks.write()
70 marks.write()
71 ui.write(_("bookmark '%s' deleted\n") % bookmark)
71 ui.write(_("bookmark '%s' deleted\n") % bookmark)
72 finally:
72 finally:
73 release(lock, wlock)
73 release(lock, wlock)
74
74
75
75
76 @command("strip",
76 @command("strip",
77 [
77 [
78 ('r', 'rev', [], _('strip specified revision (optional, '
78 ('r', 'rev', [], _('strip specified revision (optional, '
79 'can specify revisions without this '
79 'can specify revisions without this '
80 'option)'), _('REV')),
80 'option)'), _('REV')),
81 ('f', 'force', None, _('force removal of changesets, discard '
81 ('f', 'force', None, _('force removal of changesets, discard '
82 'uncommitted changes (no backup)')),
82 'uncommitted changes (no backup)')),
83 ('', 'no-backup', None, _('no backups')),
83 ('', 'no-backup', None, _('no backups')),
84 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
84 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
85 ('n', '', None, _('ignored (DEPRECATED)')),
85 ('n', '', None, _('ignored (DEPRECATED)')),
86 ('k', 'keep', None, _("do not modify working directory during "
86 ('k', 'keep', None, _("do not modify working directory during "
87 "strip")),
87 "strip")),
88 ('B', 'bookmark', '', _("remove revs only reachable from given"
88 ('B', 'bookmark', '', _("remove revs only reachable from given"
89 " bookmark"))],
89 " bookmark"))],
90 _('hg strip [-k] [-f] [-n] [-B bookmark] [-r] REV...'))
90 _('hg strip [-k] [-f] [-n] [-B bookmark] [-r] REV...'))
91 def stripcmd(ui, repo, *revs, **opts):
91 def stripcmd(ui, repo, *revs, **opts):
92 """strip changesets and all their descendants from the repository
92 """strip changesets and all their descendants from the repository
93
93
94 The strip command removes the specified changesets and all their
94 The strip command removes the specified changesets and all their
95 descendants. If the working directory has uncommitted changes, the
95 descendants. If the working directory has uncommitted changes, the
96 operation is aborted unless the --force flag is supplied, in which
96 operation is aborted unless the --force flag is supplied, in which
97 case changes will be discarded.
97 case changes will be discarded.
98
98
99 If a parent of the working directory is stripped, then the working
99 If a parent of the working directory is stripped, then the working
100 directory will automatically be updated to the most recent
100 directory will automatically be updated to the most recent
101 available ancestor of the stripped parent after the operation
101 available ancestor of the stripped parent after the operation
102 completes.
102 completes.
103
103
104 Any stripped changesets are stored in ``.hg/strip-backup`` as a
104 Any stripped changesets are stored in ``.hg/strip-backup`` as a
105 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
105 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
106 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
106 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
107 where BUNDLE is the bundle file created by the strip. Note that
107 where BUNDLE is the bundle file created by the strip. Note that
108 the local revision numbers will in general be different after the
108 the local revision numbers will in general be different after the
109 restore.
109 restore.
110
110
111 Use the --no-backup option to discard the backup bundle once the
111 Use the --no-backup option to discard the backup bundle once the
112 operation completes.
112 operation completes.
113
113
114 Strip is not a history-rewriting operation and can be used on
114 Strip is not a history-rewriting operation and can be used on
115 changesets in the public phase. But if the stripped changesets have
115 changesets in the public phase. But if the stripped changesets have
116 been pushed to a remote repository you will likely pull them again.
116 been pushed to a remote repository you will likely pull them again.
117
117
118 Return 0 on success.
118 Return 0 on success.
119 """
119 """
120 backup = True
120 backup = True
121 if opts.get('no_backup') or opts.get('nobackup'):
121 if opts.get('no_backup') or opts.get('nobackup'):
122 backup = False
122 backup = False
123
123
124 cl = repo.changelog
124 cl = repo.changelog
125 revs = list(revs) + opts.get('rev')
125 revs = list(revs) + opts.get('rev')
126 revs = set(scmutil.revrange(repo, revs))
126 revs = set(scmutil.revrange(repo, revs))
127
127
128 wlock = repo.wlock()
128 wlock = repo.wlock()
129 try:
129 try:
130 if opts.get('bookmark'):
130 if opts.get('bookmark'):
131 mark = opts.get('bookmark')
131 mark = opts.get('bookmark')
132 marks = repo._bookmarks
132 marks = repo._bookmarks
133 if mark not in marks:
133 if mark not in marks:
134 raise error.Abort(_("bookmark '%s' not found") % mark)
134 raise error.Abort(_("bookmark '%s' not found") % mark)
135
135
136 # If the requested bookmark is not the only one pointing to a
136 # If the requested bookmark is not the only one pointing to a
137 # a revision we have to only delete the bookmark and not strip
137 # a revision we have to only delete the bookmark and not strip
138 # anything. revsets cannot detect that case.
138 # anything. revsets cannot detect that case.
139 uniquebm = True
139 uniquebm = True
140 for m, n in marks.iteritems():
140 for m, n in marks.iteritems():
141 if m != mark and n == repo[mark].node():
141 if m != mark and n == repo[mark].node():
142 uniquebm = False
142 uniquebm = False
143 break
143 break
144 if uniquebm:
144 if uniquebm:
145 rsrevs = repo.revs("ancestors(bookmark(%s)) - "
145 rsrevs = repair.stripbmrevset(repo, mark)
146 "ancestors(head() and not bookmark(%s)) - "
147 "ancestors(bookmark() and not bookmark(%s))",
148 mark, mark, mark)
149 revs.update(set(rsrevs))
146 revs.update(set(rsrevs))
150 if not revs:
147 if not revs:
151 del marks[mark]
148 del marks[mark]
152 marks.write()
149 marks.write()
153 ui.write(_("bookmark '%s' deleted\n") % mark)
150 ui.write(_("bookmark '%s' deleted\n") % mark)
154
151
155 if not revs:
152 if not revs:
156 raise error.Abort(_('empty revision set'))
153 raise error.Abort(_('empty revision set'))
157
154
158 descendants = set(cl.descendants(revs))
155 descendants = set(cl.descendants(revs))
159 strippedrevs = revs.union(descendants)
156 strippedrevs = revs.union(descendants)
160 roots = revs.difference(descendants)
157 roots = revs.difference(descendants)
161
158
162 update = False
159 update = False
163 # if one of the wdir parent is stripped we'll need
160 # if one of the wdir parent is stripped we'll need
164 # to update away to an earlier revision
161 # to update away to an earlier revision
165 for p in repo.dirstate.parents():
162 for p in repo.dirstate.parents():
166 if p != nullid and cl.rev(p) in strippedrevs:
163 if p != nullid and cl.rev(p) in strippedrevs:
167 update = True
164 update = True
168 break
165 break
169
166
170 rootnodes = set(cl.node(r) for r in roots)
167 rootnodes = set(cl.node(r) for r in roots)
171
168
172 q = getattr(repo, 'mq', None)
169 q = getattr(repo, 'mq', None)
173 if q is not None and q.applied:
170 if q is not None and q.applied:
174 # refresh queue state if we're about to strip
171 # refresh queue state if we're about to strip
175 # applied patches
172 # applied patches
176 if cl.rev(repo.lookup('qtip')) in strippedrevs:
173 if cl.rev(repo.lookup('qtip')) in strippedrevs:
177 q.applieddirty = True
174 q.applieddirty = True
178 start = 0
175 start = 0
179 end = len(q.applied)
176 end = len(q.applied)
180 for i, statusentry in enumerate(q.applied):
177 for i, statusentry in enumerate(q.applied):
181 if statusentry.node in rootnodes:
178 if statusentry.node in rootnodes:
182 # if one of the stripped roots is an applied
179 # if one of the stripped roots is an applied
183 # patch, only part of the queue is stripped
180 # patch, only part of the queue is stripped
184 start = i
181 start = i
185 break
182 break
186 del q.applied[start:end]
183 del q.applied[start:end]
187 q.savedirty()
184 q.savedirty()
188
185
189 revs = sorted(rootnodes)
186 revs = sorted(rootnodes)
190 if update and opts.get('keep'):
187 if update and opts.get('keep'):
191 urev, p2 = repo.changelog.parents(revs[0])
188 urev, p2 = repo.changelog.parents(revs[0])
192 if (util.safehasattr(repo, 'mq') and p2 != nullid
189 if (util.safehasattr(repo, 'mq') and p2 != nullid
193 and p2 in [x.node for x in repo.mq.applied]):
190 and p2 in [x.node for x in repo.mq.applied]):
194 urev = p2
191 urev = p2
195 uctx = repo[urev]
192 uctx = repo[urev]
196
193
197 # only reset the dirstate for files that would actually change
194 # only reset the dirstate for files that would actually change
198 # between the working context and uctx
195 # between the working context and uctx
199 descendantrevs = repo.revs("%s::." % uctx.rev())
196 descendantrevs = repo.revs("%s::." % uctx.rev())
200 changedfiles = []
197 changedfiles = []
201 for rev in descendantrevs:
198 for rev in descendantrevs:
202 # blindly reset the files, regardless of what actually changed
199 # blindly reset the files, regardless of what actually changed
203 changedfiles.extend(repo[rev].files())
200 changedfiles.extend(repo[rev].files())
204
201
205 # reset files that only changed in the dirstate too
202 # reset files that only changed in the dirstate too
206 dirstate = repo.dirstate
203 dirstate = repo.dirstate
207 dirchanges = [f for f in dirstate if dirstate[f] != 'n']
204 dirchanges = [f for f in dirstate if dirstate[f] != 'n']
208 changedfiles.extend(dirchanges)
205 changedfiles.extend(dirchanges)
209
206
210 repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
207 repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles)
211 repo.dirstate.write()
208 repo.dirstate.write()
212
209
213 # clear resolve state
210 # clear resolve state
214 ms = merge.mergestate(repo)
211 ms = merge.mergestate(repo)
215 ms.reset(repo['.'].node())
212 ms.reset(repo['.'].node())
216
213
217 update = False
214 update = False
218
215
219
216
220 strip(ui, repo, revs, backup=backup, update=update,
217 strip(ui, repo, revs, backup=backup, update=update,
221 force=opts.get('force'), bookmark=opts.get('bookmark'))
218 force=opts.get('force'), bookmark=opts.get('bookmark'))
222 finally:
219 finally:
223 wlock.release()
220 wlock.release()
224
221
225 return 0
222 return 0
@@ -1,301 +1,313
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import short
14 from .node import short
15 from . import (
15 from . import (
16 bundle2,
16 bundle2,
17 changegroup,
17 changegroup,
18 error,
18 error,
19 exchange,
19 exchange,
20 util,
20 util,
21 )
21 )
22
22
23 def _bundle(repo, bases, heads, node, suffix, compress=True):
23 def _bundle(repo, bases, heads, node, suffix, compress=True):
24 """create a bundle with the specified revisions as a backup"""
24 """create a bundle with the specified revisions as a backup"""
25 cgversion = '01'
25 cgversion = '01'
26 if 'generaldelta' in repo.requirements:
26 if 'generaldelta' in repo.requirements:
27 cgversion = '02'
27 cgversion = '02'
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
30 version=cgversion)
31 backupdir = "strip-backup"
31 backupdir = "strip-backup"
32 vfs = repo.vfs
32 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
35
35
36 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = util.sha1(''.join(allhashes)).hexdigest()
39 totalhash = util.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
41
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs,
51 return changegroup.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
52 compression=comp)
53
53
54 def _collectfiles(repo, striprev):
54 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
55 """find out the filelogs affected by the strip"""
56 files = set()
56 files = set()
57
57
58 for x in xrange(striprev, len(repo)):
58 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
59 files.update(repo[x].files())
60
60
61 return sorted(files)
61 return sorted(files)
62
62
63 def _collectbrokencsets(repo, files, striprev):
63 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
64 """return the changesets which will be broken by the truncation"""
65 s = set()
65 s = set()
66 def collectone(revlog):
66 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
67 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
68 s.update([revlog.linkrev(r) for r in brokenset])
69
69
70 collectone(repo.manifest)
70 collectone(repo.manifest)
71 for fname in files:
71 for fname in files:
72 collectone(repo.file(fname))
72 collectone(repo.file(fname))
73
73
74 return s
74 return s
75
75
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77
77
78 # Simple way to maintain backwards compatibility for this
78 # Simple way to maintain backwards compatibility for this
79 # argument.
79 # argument.
80 if backup in ['none', 'strip']:
80 if backup in ['none', 'strip']:
81 backup = False
81 backup = False
82
82
83 repo = repo.unfiltered()
83 repo = repo.unfiltered()
84 repo.destroying()
84 repo.destroying()
85
85
86 cl = repo.changelog
86 cl = repo.changelog
87 # TODO handle undo of merge sets
87 # TODO handle undo of merge sets
88 if isinstance(nodelist, str):
88 if isinstance(nodelist, str):
89 nodelist = [nodelist]
89 nodelist = [nodelist]
90 striplist = [cl.rev(node) for node in nodelist]
90 striplist = [cl.rev(node) for node in nodelist]
91 striprev = min(striplist)
91 striprev = min(striplist)
92
92
93 # Some revisions with rev > striprev may not be descendants of striprev.
93 # Some revisions with rev > striprev may not be descendants of striprev.
94 # We have to find these revisions and put them in a bundle, so that
94 # We have to find these revisions and put them in a bundle, so that
95 # we can restore them after the truncations.
95 # we can restore them after the truncations.
96 # To create the bundle we use repo.changegroupsubset which requires
96 # To create the bundle we use repo.changegroupsubset which requires
97 # the list of heads and bases of the set of interesting revisions.
97 # the list of heads and bases of the set of interesting revisions.
98 # (head = revision in the set that has no descendant in the set;
98 # (head = revision in the set that has no descendant in the set;
99 # base = revision in the set that has no ancestor in the set)
99 # base = revision in the set that has no ancestor in the set)
100 tostrip = set(striplist)
100 tostrip = set(striplist)
101 for rev in striplist:
101 for rev in striplist:
102 for desc in cl.descendants([rev]):
102 for desc in cl.descendants([rev]):
103 tostrip.add(desc)
103 tostrip.add(desc)
104
104
105 files = _collectfiles(repo, striprev)
105 files = _collectfiles(repo, striprev)
106 saverevs = _collectbrokencsets(repo, files, striprev)
106 saverevs = _collectbrokencsets(repo, files, striprev)
107
107
108 # compute heads
108 # compute heads
109 saveheads = set(saverevs)
109 saveheads = set(saverevs)
110 for r in xrange(striprev + 1, len(cl)):
110 for r in xrange(striprev + 1, len(cl)):
111 if r not in tostrip:
111 if r not in tostrip:
112 saverevs.add(r)
112 saverevs.add(r)
113 saveheads.difference_update(cl.parentrevs(r))
113 saveheads.difference_update(cl.parentrevs(r))
114 saveheads.add(r)
114 saveheads.add(r)
115 saveheads = [cl.node(r) for r in saveheads]
115 saveheads = [cl.node(r) for r in saveheads]
116
116
117 # compute base nodes
117 # compute base nodes
118 if saverevs:
118 if saverevs:
119 descendants = set(cl.descendants(saverevs))
119 descendants = set(cl.descendants(saverevs))
120 saverevs.difference_update(descendants)
120 saverevs.difference_update(descendants)
121 savebases = [cl.node(r) for r in saverevs]
121 savebases = [cl.node(r) for r in saverevs]
122 stripbases = [cl.node(r) for r in tostrip]
122 stripbases = [cl.node(r) for r in tostrip]
123
123
124 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
124 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
125 # is much faster
125 # is much faster
126 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
126 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
127 if newbmtarget:
127 if newbmtarget:
128 newbmtarget = repo[newbmtarget.first()].node()
128 newbmtarget = repo[newbmtarget.first()].node()
129 else:
129 else:
130 newbmtarget = '.'
130 newbmtarget = '.'
131
131
132 bm = repo._bookmarks
132 bm = repo._bookmarks
133 updatebm = []
133 updatebm = []
134 for m in bm:
134 for m in bm:
135 rev = repo[bm[m]].rev()
135 rev = repo[bm[m]].rev()
136 if rev in tostrip:
136 if rev in tostrip:
137 updatebm.append(m)
137 updatebm.append(m)
138
138
139 # create a changegroup for all the branches we need to keep
139 # create a changegroup for all the branches we need to keep
140 backupfile = None
140 backupfile = None
141 vfs = repo.vfs
141 vfs = repo.vfs
142 node = nodelist[-1]
142 node = nodelist[-1]
143 if backup:
143 if backup:
144 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
144 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
145 repo.ui.status(_("saved backup bundle to %s\n") %
145 repo.ui.status(_("saved backup bundle to %s\n") %
146 vfs.join(backupfile))
146 vfs.join(backupfile))
147 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
147 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
148 vfs.join(backupfile))
148 vfs.join(backupfile))
149 if saveheads or savebases:
149 if saveheads or savebases:
150 # do not compress partial bundle if we remove it from disk later
150 # do not compress partial bundle if we remove it from disk later
151 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
151 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
152 compress=False)
152 compress=False)
153
153
154 mfst = repo.manifest
154 mfst = repo.manifest
155
155
156 curtr = repo.currenttransaction()
156 curtr = repo.currenttransaction()
157 if curtr is not None:
157 if curtr is not None:
158 del curtr # avoid carrying reference to transaction for nothing
158 del curtr # avoid carrying reference to transaction for nothing
159 msg = _('programming error: cannot strip from inside a transaction')
159 msg = _('programming error: cannot strip from inside a transaction')
160 raise error.Abort(msg, hint=_('contact your extension maintainer'))
160 raise error.Abort(msg, hint=_('contact your extension maintainer'))
161
161
162 tr = repo.transaction("strip")
162 tr = repo.transaction("strip")
163 offset = len(tr.entries)
163 offset = len(tr.entries)
164
164
165 try:
165 try:
166 tr.startgroup()
166 tr.startgroup()
167 cl.strip(striprev, tr)
167 cl.strip(striprev, tr)
168 mfst.strip(striprev, tr)
168 mfst.strip(striprev, tr)
169 for fn in files:
169 for fn in files:
170 repo.file(fn).strip(striprev, tr)
170 repo.file(fn).strip(striprev, tr)
171 tr.endgroup()
171 tr.endgroup()
172
172
173 try:
173 try:
174 for i in xrange(offset, len(tr.entries)):
174 for i in xrange(offset, len(tr.entries)):
175 file, troffset, ignore = tr.entries[i]
175 file, troffset, ignore = tr.entries[i]
176 repo.svfs(file, 'a').truncate(troffset)
176 repo.svfs(file, 'a').truncate(troffset)
177 if troffset == 0:
177 if troffset == 0:
178 repo.store.markremoved(file)
178 repo.store.markremoved(file)
179 tr.close()
179 tr.close()
180 finally:
180 finally:
181 tr.release()
181 tr.release()
182
182
183 if saveheads or savebases:
183 if saveheads or savebases:
184 ui.note(_("adding branch\n"))
184 ui.note(_("adding branch\n"))
185 f = vfs.open(chgrpfile, "rb")
185 f = vfs.open(chgrpfile, "rb")
186 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
186 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
187 if not repo.ui.verbose:
187 if not repo.ui.verbose:
188 # silence internal shuffling chatter
188 # silence internal shuffling chatter
189 repo.ui.pushbuffer()
189 repo.ui.pushbuffer()
190 if isinstance(gen, bundle2.unbundle20):
190 if isinstance(gen, bundle2.unbundle20):
191 tr = repo.transaction('strip')
191 tr = repo.transaction('strip')
192 tr.hookargs = {'source': 'strip',
192 tr.hookargs = {'source': 'strip',
193 'url': 'bundle:' + vfs.join(chgrpfile)}
193 'url': 'bundle:' + vfs.join(chgrpfile)}
194 try:
194 try:
195 bundle2.processbundle(repo, gen, lambda: tr)
195 bundle2.processbundle(repo, gen, lambda: tr)
196 tr.close()
196 tr.close()
197 finally:
197 finally:
198 tr.release()
198 tr.release()
199 else:
199 else:
200 changegroup.addchangegroup(repo, gen, 'strip',
200 changegroup.addchangegroup(repo, gen, 'strip',
201 'bundle:' + vfs.join(chgrpfile),
201 'bundle:' + vfs.join(chgrpfile),
202 True)
202 True)
203 if not repo.ui.verbose:
203 if not repo.ui.verbose:
204 repo.ui.popbuffer()
204 repo.ui.popbuffer()
205 f.close()
205 f.close()
206
206
207 # remove undo files
207 # remove undo files
208 for undovfs, undofile in repo.undofiles():
208 for undovfs, undofile in repo.undofiles():
209 try:
209 try:
210 undovfs.unlink(undofile)
210 undovfs.unlink(undofile)
211 except OSError as e:
211 except OSError as e:
212 if e.errno != errno.ENOENT:
212 if e.errno != errno.ENOENT:
213 ui.warn(_('error removing %s: %s\n') %
213 ui.warn(_('error removing %s: %s\n') %
214 (undovfs.join(undofile), str(e)))
214 (undovfs.join(undofile), str(e)))
215
215
216 for m in updatebm:
216 for m in updatebm:
217 bm[m] = repo[newbmtarget].node()
217 bm[m] = repo[newbmtarget].node()
218 bm.write()
218 bm.write()
219 except: # re-raises
219 except: # re-raises
220 if backupfile:
220 if backupfile:
221 ui.warn(_("strip failed, full bundle stored in '%s'\n")
221 ui.warn(_("strip failed, full bundle stored in '%s'\n")
222 % vfs.join(backupfile))
222 % vfs.join(backupfile))
223 elif saveheads:
223 elif saveheads:
224 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
224 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
225 % vfs.join(chgrpfile))
225 % vfs.join(chgrpfile))
226 raise
226 raise
227 else:
227 else:
228 if saveheads or savebases:
228 if saveheads or savebases:
229 # Remove partial backup only if there were no exceptions
229 # Remove partial backup only if there were no exceptions
230 vfs.unlink(chgrpfile)
230 vfs.unlink(chgrpfile)
231
231
232 repo.destroyed()
232 repo.destroyed()
233
233
234 def rebuildfncache(ui, repo):
234 def rebuildfncache(ui, repo):
235 """Rebuilds the fncache file from repo history.
235 """Rebuilds the fncache file from repo history.
236
236
237 Missing entries will be added. Extra entries will be removed.
237 Missing entries will be added. Extra entries will be removed.
238 """
238 """
239 repo = repo.unfiltered()
239 repo = repo.unfiltered()
240
240
241 if 'fncache' not in repo.requirements:
241 if 'fncache' not in repo.requirements:
242 ui.warn(_('(not rebuilding fncache because repository does not '
242 ui.warn(_('(not rebuilding fncache because repository does not '
243 'support fncache)\n'))
243 'support fncache)\n'))
244 return
244 return
245
245
246 lock = repo.lock()
246 lock = repo.lock()
247 try:
247 try:
248 fnc = repo.store.fncache
248 fnc = repo.store.fncache
249 # Trigger load of fncache.
249 # Trigger load of fncache.
250 if 'irrelevant' in fnc:
250 if 'irrelevant' in fnc:
251 pass
251 pass
252
252
253 oldentries = set(fnc.entries)
253 oldentries = set(fnc.entries)
254 newentries = set()
254 newentries = set()
255 seenfiles = set()
255 seenfiles = set()
256
256
257 repolen = len(repo)
257 repolen = len(repo)
258 for rev in repo:
258 for rev in repo:
259 ui.progress(_('changeset'), rev, total=repolen)
259 ui.progress(_('changeset'), rev, total=repolen)
260
260
261 ctx = repo[rev]
261 ctx = repo[rev]
262 for f in ctx.files():
262 for f in ctx.files():
263 # This is to minimize I/O.
263 # This is to minimize I/O.
264 if f in seenfiles:
264 if f in seenfiles:
265 continue
265 continue
266 seenfiles.add(f)
266 seenfiles.add(f)
267
267
268 i = 'data/%s.i' % f
268 i = 'data/%s.i' % f
269 d = 'data/%s.d' % f
269 d = 'data/%s.d' % f
270
270
271 if repo.store._exists(i):
271 if repo.store._exists(i):
272 newentries.add(i)
272 newentries.add(i)
273 if repo.store._exists(d):
273 if repo.store._exists(d):
274 newentries.add(d)
274 newentries.add(d)
275
275
276 ui.progress(_('changeset'), None)
276 ui.progress(_('changeset'), None)
277
277
278 addcount = len(newentries - oldentries)
278 addcount = len(newentries - oldentries)
279 removecount = len(oldentries - newentries)
279 removecount = len(oldentries - newentries)
280 for p in sorted(oldentries - newentries):
280 for p in sorted(oldentries - newentries):
281 ui.write(_('removing %s\n') % p)
281 ui.write(_('removing %s\n') % p)
282 for p in sorted(newentries - oldentries):
282 for p in sorted(newentries - oldentries):
283 ui.write(_('adding %s\n') % p)
283 ui.write(_('adding %s\n') % p)
284
284
285 if addcount or removecount:
285 if addcount or removecount:
286 ui.write(_('%d items added, %d removed from fncache\n') %
286 ui.write(_('%d items added, %d removed from fncache\n') %
287 (addcount, removecount))
287 (addcount, removecount))
288 fnc.entries = newentries
288 fnc.entries = newentries
289 fnc._dirty = True
289 fnc._dirty = True
290
290
291 tr = repo.transaction('fncache')
291 tr = repo.transaction('fncache')
292 try:
292 try:
293 fnc.write(tr)
293 fnc.write(tr)
294 tr.close()
294 tr.close()
295 finally:
295 finally:
296 tr.release()
296 tr.release()
297 else:
297 else:
298 ui.write(_('fncache already up to date\n'))
298 ui.write(_('fncache already up to date\n'))
299 finally:
299 finally:
300 lock.release()
300 lock.release()
301
301
302 def stripbmrevset(repo, mark):
303 """
304 The revset to strip when strip is called with -B mark
305
306 Needs to live here so extensions can use it and wrap it even when strip is
307 not enabled or not present on a box.
308 """
309 return repo.revs("ancestors(bookmark(%s)) - "
310 "ancestors(head() and not bookmark(%s)) - "
311 "ancestors(bookmark() and not bookmark(%s))",
312 mark, mark, mark)
313
General Comments 0
You need to be logged in to leave comments. Login now