##// END OF EJS Templates
spelling: fix minor spell checker issues
Mads Kiilerich -
r17738:b8424c92 default
parent child Browse files
Show More
@@ -1,48 +1,48
1 """Extension to verify locks are obtained in the required places.
1 """Extension to verify locks are obtained in the required places.
2
2
3 This works by wrapping functions that should be surrounded by a lock
3 This works by wrapping functions that should be surrounded by a lock
4 and asserting the lock is held. Missing locks are called out with a
4 and asserting the lock is held. Missing locks are called out with a
5 traceback printed to stderr.
5 traceback printed to stderr.
6
6
7 This currently only checks store locks, not working copy locks.
7 This currently only checks store locks, not working copy locks.
8 """
8 """
9 import os
9 import os
10 import traceback
10 import traceback
11
11
12 def _warnstack(ui, msg, skip=1):
12 def _warnstack(ui, msg, skip=1):
13 '''issue warning with the message and the current stack, skipping the
13 '''issue warning with the message and the current stack, skipping the
14 skip last entries'''
14 skip last entries'''
15 ui.warn('%s at:\n' % msg)
15 ui.warn('%s at:\n' % msg)
16 entries = traceback.extract_stack()[:-skip]
16 entries = traceback.extract_stack()[:-skip]
17 fnmax = max(len(entry[0]) for entry in entries)
17 fnmax = max(len(entry[0]) for entry in entries)
18 for fn, ln, func, _text in entries:
18 for fn, ln, func, _text in entries:
19 ui.warn(' %*s:%-4s in %s\n' % (fnmax, fn, ln, func))
19 ui.warn(' %*s:%-4s in %s\n' % (fnmax, fn, ln, func))
20
20
21 def _checklock(repo):
21 def _checklock(repo):
22 l = repo._lockref and repo._lockref()
22 l = repo._lockref and repo._lockref()
23 if l is None or not l.held:
23 if l is None or not l.held:
24 _warnstack(repo.ui, 'missing lock', skip=2)
24 _warnstack(repo.ui, 'missing lock', skip=2)
25
25
26 def reposetup(ui, repo):
26 def reposetup(ui, repo):
27 orig = repo.__class__
27 orig = repo.__class__
28 class lockcheckrepo(repo.__class__):
28 class lockcheckrepo(repo.__class__):
29 def _writejournal(self, *args, **kwargs):
29 def _writejournal(self, *args, **kwargs):
30 _checklock(self)
30 _checklock(self)
31 return orig._writejournal(self, *args, **kwargs)
31 return orig._writejournal(self, *args, **kwargs)
32
32
33 def transaction(self, *args, **kwargs):
33 def transaction(self, *args, **kwargs):
34 _checklock(self)
34 _checklock(self)
35 return orig.transaction(self, *args, **kwargs)
35 return orig.transaction(self, *args, **kwargs)
36
36
37 # TODO(durin42): kiilerix had a commented-out lock check in
37 # TODO(durin42): kiilerix had a commented-out lock check in
38 # writebranchcache and _writerequirements
38 # _writebranchcache and _writerequirements
39
39
40 def _tag(self, *args, **kwargs):
40 def _tag(self, *args, **kwargs):
41 _checklock(self)
41 _checklock(self)
42 return orig._tag(self, *args, **kwargs)
42 return orig._tag(self, *args, **kwargs)
43
43
44 def write(self, *args, **kwargs):
44 def write(self, *args, **kwargs):
45 assert os.path.lexists(self._join('.hg/wlock'))
45 assert os.path.lexists(self._join('.hg/wlock'))
46 return orig.write(self, *args, **kwargs)
46 return orig.write(self, *args, **kwargs)
47
47
48 repo.__class__ = lockcheckrepo
48 repo.__class__ = lockcheckrepo
@@ -1,743 +1,743
1 # histedit.py - interactive history editing for mercurial
1 # histedit.py - interactive history editing for mercurial
2 #
2 #
3 # Copyright 2009 Augie Fackler <raf@durin42.com>
3 # Copyright 2009 Augie Fackler <raf@durin42.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """interactive history editing
7 """interactive history editing
8
8
9 With this extension installed, Mercurial gains one new command: histedit. Usage
9 With this extension installed, Mercurial gains one new command: histedit. Usage
10 is as follows, assuming the following history::
10 is as follows, assuming the following history::
11
11
12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
13 | Add delta
13 | Add delta
14 |
14 |
15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
16 | Add gamma
16 | Add gamma
17 |
17 |
18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
19 | Add beta
19 | Add beta
20 |
20 |
21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
22 Add alpha
22 Add alpha
23
23
24 If you were to run ``hg histedit c561b4e977df``, you would see the following
24 If you were to run ``hg histedit c561b4e977df``, you would see the following
25 file open in your editor::
25 file open in your editor::
26
26
27 pick c561b4e977df Add beta
27 pick c561b4e977df Add beta
28 pick 030b686bedc4 Add gamma
28 pick 030b686bedc4 Add gamma
29 pick 7c2fd3b9020c Add delta
29 pick 7c2fd3b9020c Add delta
30
30
31 # Edit history between 633536316234 and 7c2fd3b9020c
31 # Edit history between 633536316234 and 7c2fd3b9020c
32 #
32 #
33 # Commands:
33 # Commands:
34 # p, pick = use commit
34 # p, pick = use commit
35 # e, edit = use commit, but stop for amending
35 # e, edit = use commit, but stop for amending
36 # f, fold = use commit, but fold into previous commit (combines N and N-1)
36 # f, fold = use commit, but fold into previous commit (combines N and N-1)
37 # d, drop = remove commit from history
37 # d, drop = remove commit from history
38 # m, mess = edit message without changing commit content
38 # m, mess = edit message without changing commit content
39 #
39 #
40
40
41 In this file, lines beginning with ``#`` are ignored. You must specify a rule
41 In this file, lines beginning with ``#`` are ignored. You must specify a rule
42 for each revision in your history. For example, if you had meant to add gamma
42 for each revision in your history. For example, if you had meant to add gamma
43 before beta, and then wanted to add delta in the same revision as beta, you
43 before beta, and then wanted to add delta in the same revision as beta, you
44 would reorganize the file to look like this::
44 would reorganize the file to look like this::
45
45
46 pick 030b686bedc4 Add gamma
46 pick 030b686bedc4 Add gamma
47 pick c561b4e977df Add beta
47 pick c561b4e977df Add beta
48 fold 7c2fd3b9020c Add delta
48 fold 7c2fd3b9020c Add delta
49
49
50 # Edit history between 633536316234 and 7c2fd3b9020c
50 # Edit history between 633536316234 and 7c2fd3b9020c
51 #
51 #
52 # Commands:
52 # Commands:
53 # p, pick = use commit
53 # p, pick = use commit
54 # e, edit = use commit, but stop for amending
54 # e, edit = use commit, but stop for amending
55 # f, fold = use commit, but fold into previous commit (combines N and N-1)
55 # f, fold = use commit, but fold into previous commit (combines N and N-1)
56 # d, drop = remove commit from history
56 # d, drop = remove commit from history
57 # m, mess = edit message without changing commit content
57 # m, mess = edit message without changing commit content
58 #
58 #
59
59
60 At which point you close the editor and ``histedit`` starts working. When you
60 At which point you close the editor and ``histedit`` starts working. When you
61 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
61 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
62 those revisions together, offering you a chance to clean up the commit message::
62 those revisions together, offering you a chance to clean up the commit message::
63
63
64 Add beta
64 Add beta
65 ***
65 ***
66 Add delta
66 Add delta
67
67
68 Edit the commit message to your liking, then close the editor. For
68 Edit the commit message to your liking, then close the editor. For
69 this example, let's assume that the commit message was changed to
69 this example, let's assume that the commit message was changed to
70 ``Add beta and delta.`` After histedit has run and had a chance to
70 ``Add beta and delta.`` After histedit has run and had a chance to
71 remove any old or temporary revisions it needed, the history looks
71 remove any old or temporary revisions it needed, the history looks
72 like this::
72 like this::
73
73
74 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
74 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
75 | Add beta and delta.
75 | Add beta and delta.
76 |
76 |
77 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
77 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
78 | Add gamma
78 | Add gamma
79 |
79 |
80 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
80 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
81 Add alpha
81 Add alpha
82
82
83 Note that ``histedit`` does *not* remove any revisions (even its own temporary
83 Note that ``histedit`` does *not* remove any revisions (even its own temporary
84 ones) until after it has completed all the editing operations, so it will
84 ones) until after it has completed all the editing operations, so it will
85 probably perform several strip operations when it's done. For the above example,
85 probably perform several strip operations when it's done. For the above example,
86 it had to run strip twice. Strip can be slow depending on a variety of factors,
86 it had to run strip twice. Strip can be slow depending on a variety of factors,
87 so you might need to be a little patient. You can choose to keep the original
87 so you might need to be a little patient. You can choose to keep the original
88 revisions by passing the ``--keep`` flag.
88 revisions by passing the ``--keep`` flag.
89
89
90 The ``edit`` operation will drop you back to a command prompt,
90 The ``edit`` operation will drop you back to a command prompt,
91 allowing you to edit files freely, or even use ``hg record`` to commit
91 allowing you to edit files freely, or even use ``hg record`` to commit
92 some changes as a separate commit. When you're done, any remaining
92 some changes as a separate commit. When you're done, any remaining
93 uncommitted changes will be committed as well. When done, run ``hg
93 uncommitted changes will be committed as well. When done, run ``hg
94 histedit --continue`` to finish this step. You'll be prompted for a
94 histedit --continue`` to finish this step. You'll be prompted for a
95 new commit message, but the default commit message will be the
95 new commit message, but the default commit message will be the
96 original message for the ``edit`` ed revision.
96 original message for the ``edit`` ed revision.
97
97
98 The ``message`` operation will give you a chance to revise a commit
98 The ``message`` operation will give you a chance to revise a commit
99 message without changing the contents. It's a shortcut for doing
99 message without changing the contents. It's a shortcut for doing
100 ``edit`` immediately followed by `hg histedit --continue``.
100 ``edit`` immediately followed by `hg histedit --continue``.
101
101
102 If ``histedit`` encounters a conflict when moving a revision (while
102 If ``histedit`` encounters a conflict when moving a revision (while
103 handling ``pick`` or ``fold``), it'll stop in a similar manner to
103 handling ``pick`` or ``fold``), it'll stop in a similar manner to
104 ``edit`` with the difference that it won't prompt you for a commit
104 ``edit`` with the difference that it won't prompt you for a commit
105 message when done. If you decide at this point that you don't like how
105 message when done. If you decide at this point that you don't like how
106 much work it will be to rearrange history, or that you made a mistake,
106 much work it will be to rearrange history, or that you made a mistake,
107 you can use ``hg histedit --abort`` to abandon the new changes you
107 you can use ``hg histedit --abort`` to abandon the new changes you
108 have made and return to the state before you attempted to edit your
108 have made and return to the state before you attempted to edit your
109 history.
109 history.
110
110
111 If we clone the example repository above and add three more changes, such that
111 If we clone the example repository above and add three more changes, such that
112 we have the following history::
112 we have the following history::
113
113
114 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
114 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
115 | Add theta
115 | Add theta
116 |
116 |
117 o 5 140988835471 2009-04-27 18:04 -0500 stefan
117 o 5 140988835471 2009-04-27 18:04 -0500 stefan
118 | Add eta
118 | Add eta
119 |
119 |
120 o 4 122930637314 2009-04-27 18:04 -0500 stefan
120 o 4 122930637314 2009-04-27 18:04 -0500 stefan
121 | Add zeta
121 | Add zeta
122 |
122 |
123 o 3 836302820282 2009-04-27 18:04 -0500 stefan
123 o 3 836302820282 2009-04-27 18:04 -0500 stefan
124 | Add epsilon
124 | Add epsilon
125 |
125 |
126 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
126 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
127 | Add beta and delta.
127 | Add beta and delta.
128 |
128 |
129 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
129 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
130 | Add gamma
130 | Add gamma
131 |
131 |
132 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
132 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
133 Add alpha
133 Add alpha
134
134
135 If you run ``hg histedit --outgoing`` on the clone then it is the same
135 If you run ``hg histedit --outgoing`` on the clone then it is the same
136 as running ``hg histedit 836302820282``. If you need plan to push to a
136 as running ``hg histedit 836302820282``. If you need plan to push to a
137 repository that Mercurial does not detect to be related to the source
137 repository that Mercurial does not detect to be related to the source
138 repo, you can add a ``--force`` option.
138 repo, you can add a ``--force`` option.
139 """
139 """
140
140
141 try:
141 try:
142 import cPickle as pickle
142 import cPickle as pickle
143 except ImportError:
143 except ImportError:
144 import pickle
144 import pickle
145 import os
145 import os
146
146
147 from mercurial import bookmarks
147 from mercurial import bookmarks
148 from mercurial import cmdutil
148 from mercurial import cmdutil
149 from mercurial import discovery
149 from mercurial import discovery
150 from mercurial import error
150 from mercurial import error
151 from mercurial import copies
151 from mercurial import copies
152 from mercurial import context
152 from mercurial import context
153 from mercurial import hg
153 from mercurial import hg
154 from mercurial import lock as lockmod
154 from mercurial import lock as lockmod
155 from mercurial import node
155 from mercurial import node
156 from mercurial import repair
156 from mercurial import repair
157 from mercurial import scmutil
157 from mercurial import scmutil
158 from mercurial import util
158 from mercurial import util
159 from mercurial import merge as mergemod
159 from mercurial import merge as mergemod
160 from mercurial.i18n import _
160 from mercurial.i18n import _
161
161
162 cmdtable = {}
162 cmdtable = {}
163 command = cmdutil.command(cmdtable)
163 command = cmdutil.command(cmdtable)
164
164
165 testedwith = 'internal'
165 testedwith = 'internal'
166
166
167 # i18n: command names and abbreviations must remain untranslated
167 # i18n: command names and abbreviations must remain untranslated
168 editcomment = _("""# Edit history between %s and %s
168 editcomment = _("""# Edit history between %s and %s
169 #
169 #
170 # Commands:
170 # Commands:
171 # p, pick = use commit
171 # p, pick = use commit
172 # e, edit = use commit, but stop for amending
172 # e, edit = use commit, but stop for amending
173 # f, fold = use commit, but fold into previous commit (combines N and N-1)
173 # f, fold = use commit, but fold into previous commit (combines N and N-1)
174 # d, drop = remove commit from history
174 # d, drop = remove commit from history
175 # m, mess = edit message without changing commit content
175 # m, mess = edit message without changing commit content
176 #
176 #
177 """)
177 """)
178
178
179 def applychanges(ui, repo, ctx, opts):
179 def applychanges(ui, repo, ctx, opts):
180 """Merge changeset from ctx (only) in the current working directory"""
180 """Merge changeset from ctx (only) in the current working directory"""
181 wcpar = repo.dirstate.parents()[0]
181 wcpar = repo.dirstate.parents()[0]
182 if ctx.p1().node() == wcpar:
182 if ctx.p1().node() == wcpar:
183 # edition ar "in place" we do not need to make any merge,
183 # edition ar "in place" we do not need to make any merge,
184 # just applies changes on parent for edition
184 # just applies changes on parent for edition
185 cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True)
185 cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True)
186 stats = None
186 stats = None
187 else:
187 else:
188 try:
188 try:
189 # ui.forcemerge is an internal variable, do not document
189 # ui.forcemerge is an internal variable, do not document
190 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
190 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
191 stats = mergemod.update(repo, ctx.node(), True, True, False,
191 stats = mergemod.update(repo, ctx.node(), True, True, False,
192 ctx.p1().node())
192 ctx.p1().node())
193 finally:
193 finally:
194 repo.ui.setconfig('ui', 'forcemerge', '')
194 repo.ui.setconfig('ui', 'forcemerge', '')
195 repo.setparents(wcpar, node.nullid)
195 repo.setparents(wcpar, node.nullid)
196 repo.dirstate.write()
196 repo.dirstate.write()
197 # fix up dirstate for copies and renames
197 # fix up dirstate for copies and renames
198 cmdutil.duplicatecopies(repo, ctx.rev(), ctx.p1().rev())
198 cmdutil.duplicatecopies(repo, ctx.rev(), ctx.p1().rev())
199 return stats
199 return stats
200
200
201 def collapse(repo, first, last, commitopts):
201 def collapse(repo, first, last, commitopts):
202 """collapse the set of revisions from first to last as new one.
202 """collapse the set of revisions from first to last as new one.
203
203
204 Expected commit options are:
204 Expected commit options are:
205 - message
205 - message
206 - date
206 - date
207 - username
207 - username
208 Edition of commit message is trigered in all case.
208 Commit message is edited in all cases.
209
209
210 This function works in memory."""
210 This function works in memory."""
211 ctxs = list(repo.set('%d::%d', first, last))
211 ctxs = list(repo.set('%d::%d', first, last))
212 if not ctxs:
212 if not ctxs:
213 return None
213 return None
214 base = first.parents()[0]
214 base = first.parents()[0]
215
215
216 # commit a new version of the old changeset, including the update
216 # commit a new version of the old changeset, including the update
217 # collect all files which might be affected
217 # collect all files which might be affected
218 files = set()
218 files = set()
219 for ctx in ctxs:
219 for ctx in ctxs:
220 files.update(ctx.files())
220 files.update(ctx.files())
221
221
222 # Recompute copies (avoid recording a -> b -> a)
222 # Recompute copies (avoid recording a -> b -> a)
223 copied = copies.pathcopies(first, last)
223 copied = copies.pathcopies(first, last)
224
224
225 # prune files which were reverted by the updates
225 # prune files which were reverted by the updates
226 def samefile(f):
226 def samefile(f):
227 if f in last.manifest():
227 if f in last.manifest():
228 a = last.filectx(f)
228 a = last.filectx(f)
229 if f in base.manifest():
229 if f in base.manifest():
230 b = base.filectx(f)
230 b = base.filectx(f)
231 return (a.data() == b.data()
231 return (a.data() == b.data()
232 and a.flags() == b.flags())
232 and a.flags() == b.flags())
233 else:
233 else:
234 return False
234 return False
235 else:
235 else:
236 return f not in base.manifest()
236 return f not in base.manifest()
237 files = [f for f in files if not samefile(f)]
237 files = [f for f in files if not samefile(f)]
238 # commit version of these files as defined by head
238 # commit version of these files as defined by head
239 headmf = last.manifest()
239 headmf = last.manifest()
240 def filectxfn(repo, ctx, path):
240 def filectxfn(repo, ctx, path):
241 if path in headmf:
241 if path in headmf:
242 fctx = last[path]
242 fctx = last[path]
243 flags = fctx.flags()
243 flags = fctx.flags()
244 mctx = context.memfilectx(fctx.path(), fctx.data(),
244 mctx = context.memfilectx(fctx.path(), fctx.data(),
245 islink='l' in flags,
245 islink='l' in flags,
246 isexec='x' in flags,
246 isexec='x' in flags,
247 copied=copied.get(path))
247 copied=copied.get(path))
248 return mctx
248 return mctx
249 raise IOError()
249 raise IOError()
250
250
251 if commitopts.get('message'):
251 if commitopts.get('message'):
252 message = commitopts['message']
252 message = commitopts['message']
253 else:
253 else:
254 message = first.description()
254 message = first.description()
255 user = commitopts.get('user')
255 user = commitopts.get('user')
256 date = commitopts.get('date')
256 date = commitopts.get('date')
257 extra = first.extra()
257 extra = first.extra()
258
258
259 parents = (first.p1().node(), first.p2().node())
259 parents = (first.p1().node(), first.p2().node())
260 new = context.memctx(repo,
260 new = context.memctx(repo,
261 parents=parents,
261 parents=parents,
262 text=message,
262 text=message,
263 files=files,
263 files=files,
264 filectxfn=filectxfn,
264 filectxfn=filectxfn,
265 user=user,
265 user=user,
266 date=date,
266 date=date,
267 extra=extra)
267 extra=extra)
268 new._text = cmdutil.commitforceeditor(repo, new, [])
268 new._text = cmdutil.commitforceeditor(repo, new, [])
269 return repo.commitctx(new)
269 return repo.commitctx(new)
270
270
271 def pick(ui, repo, ctx, ha, opts):
271 def pick(ui, repo, ctx, ha, opts):
272 oldctx = repo[ha]
272 oldctx = repo[ha]
273 if oldctx.parents()[0] == ctx:
273 if oldctx.parents()[0] == ctx:
274 ui.debug('node %s unchanged\n' % ha)
274 ui.debug('node %s unchanged\n' % ha)
275 return oldctx, [], [], []
275 return oldctx, [], [], []
276 hg.update(repo, ctx.node())
276 hg.update(repo, ctx.node())
277 stats = applychanges(ui, repo, oldctx, opts)
277 stats = applychanges(ui, repo, oldctx, opts)
278 if stats and stats[3] > 0:
278 if stats and stats[3] > 0:
279 raise util.Abort(_('Fix up the change and run '
279 raise util.Abort(_('Fix up the change and run '
280 'hg histedit --continue'))
280 'hg histedit --continue'))
281 # drop the second merge parent
281 # drop the second merge parent
282 n = repo.commit(text=oldctx.description(), user=oldctx.user(),
282 n = repo.commit(text=oldctx.description(), user=oldctx.user(),
283 date=oldctx.date(), extra=oldctx.extra())
283 date=oldctx.date(), extra=oldctx.extra())
284 if n is None:
284 if n is None:
285 ui.warn(_('%s: empty changeset\n')
285 ui.warn(_('%s: empty changeset\n')
286 % node.hex(ha))
286 % node.hex(ha))
287 return ctx, [], [], []
287 return ctx, [], [], []
288 return repo[n], [n], [oldctx.node()], []
288 return repo[n], [n], [oldctx.node()], []
289
289
290
290
291 def edit(ui, repo, ctx, ha, opts):
291 def edit(ui, repo, ctx, ha, opts):
292 oldctx = repo[ha]
292 oldctx = repo[ha]
293 hg.update(repo, ctx.node())
293 hg.update(repo, ctx.node())
294 applychanges(ui, repo, oldctx, opts)
294 applychanges(ui, repo, oldctx, opts)
295 raise util.Abort(_('Make changes as needed, you may commit or record as '
295 raise util.Abort(_('Make changes as needed, you may commit or record as '
296 'needed now.\nWhen you are finished, run hg'
296 'needed now.\nWhen you are finished, run hg'
297 ' histedit --continue to resume.'))
297 ' histedit --continue to resume.'))
298
298
299 def fold(ui, repo, ctx, ha, opts):
299 def fold(ui, repo, ctx, ha, opts):
300 oldctx = repo[ha]
300 oldctx = repo[ha]
301 hg.update(repo, ctx.node())
301 hg.update(repo, ctx.node())
302 stats = applychanges(ui, repo, oldctx, opts)
302 stats = applychanges(ui, repo, oldctx, opts)
303 if stats and stats[3] > 0:
303 if stats and stats[3] > 0:
304 raise util.Abort(_('Fix up the change and run '
304 raise util.Abort(_('Fix up the change and run '
305 'hg histedit --continue'))
305 'hg histedit --continue'))
306 n = repo.commit(text='fold-temp-revision %s' % ha, user=oldctx.user(),
306 n = repo.commit(text='fold-temp-revision %s' % ha, user=oldctx.user(),
307 date=oldctx.date(), extra=oldctx.extra())
307 date=oldctx.date(), extra=oldctx.extra())
308 if n is None:
308 if n is None:
309 ui.warn(_('%s: empty changeset')
309 ui.warn(_('%s: empty changeset')
310 % node.hex(ha))
310 % node.hex(ha))
311 return ctx, [], [], []
311 return ctx, [], [], []
312 return finishfold(ui, repo, ctx, oldctx, n, opts, [])
312 return finishfold(ui, repo, ctx, oldctx, n, opts, [])
313
313
314 def finishfold(ui, repo, ctx, oldctx, newnode, opts, internalchanges):
314 def finishfold(ui, repo, ctx, oldctx, newnode, opts, internalchanges):
315 parent = ctx.parents()[0].node()
315 parent = ctx.parents()[0].node()
316 hg.update(repo, parent)
316 hg.update(repo, parent)
317 ### prepare new commit data
317 ### prepare new commit data
318 commitopts = opts.copy()
318 commitopts = opts.copy()
319 # username
319 # username
320 if ctx.user() == oldctx.user():
320 if ctx.user() == oldctx.user():
321 username = ctx.user()
321 username = ctx.user()
322 else:
322 else:
323 username = ui.username()
323 username = ui.username()
324 commitopts['user'] = username
324 commitopts['user'] = username
325 # commit message
325 # commit message
326 newmessage = '\n***\n'.join(
326 newmessage = '\n***\n'.join(
327 [ctx.description()] +
327 [ctx.description()] +
328 [repo[r].description() for r in internalchanges] +
328 [repo[r].description() for r in internalchanges] +
329 [oldctx.description()]) + '\n'
329 [oldctx.description()]) + '\n'
330 commitopts['message'] = newmessage
330 commitopts['message'] = newmessage
331 # date
331 # date
332 commitopts['date'] = max(ctx.date(), oldctx.date())
332 commitopts['date'] = max(ctx.date(), oldctx.date())
333 n = collapse(repo, ctx, repo[newnode], commitopts)
333 n = collapse(repo, ctx, repo[newnode], commitopts)
334 if n is None:
334 if n is None:
335 return ctx, [], [], []
335 return ctx, [], [], []
336 hg.update(repo, n)
336 hg.update(repo, n)
337 return repo[n], [n], [oldctx.node(), ctx.node()], [newnode]
337 return repo[n], [n], [oldctx.node(), ctx.node()], [newnode]
338
338
339 def drop(ui, repo, ctx, ha, opts):
339 def drop(ui, repo, ctx, ha, opts):
340 return ctx, [], [repo[ha].node()], []
340 return ctx, [], [repo[ha].node()], []
341
341
342
342
343 def message(ui, repo, ctx, ha, opts):
343 def message(ui, repo, ctx, ha, opts):
344 oldctx = repo[ha]
344 oldctx = repo[ha]
345 hg.update(repo, ctx.node())
345 hg.update(repo, ctx.node())
346 stats = applychanges(ui, repo, oldctx, opts)
346 stats = applychanges(ui, repo, oldctx, opts)
347 if stats and stats[3] > 0:
347 if stats and stats[3] > 0:
348 raise util.Abort(_('Fix up the change and run '
348 raise util.Abort(_('Fix up the change and run '
349 'hg histedit --continue'))
349 'hg histedit --continue'))
350 message = oldctx.description() + '\n'
350 message = oldctx.description() + '\n'
351 message = ui.edit(message, ui.username())
351 message = ui.edit(message, ui.username())
352 new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(),
352 new = repo.commit(text=message, user=oldctx.user(), date=oldctx.date(),
353 extra=oldctx.extra())
353 extra=oldctx.extra())
354 newctx = repo[new]
354 newctx = repo[new]
355 if oldctx.node() != newctx.node():
355 if oldctx.node() != newctx.node():
356 return newctx, [new], [oldctx.node()], []
356 return newctx, [new], [oldctx.node()], []
357 # We didn't make an edit, so just indicate no replaced nodes
357 # We didn't make an edit, so just indicate no replaced nodes
358 return newctx, [new], [], []
358 return newctx, [new], [], []
359
359
360 actiontable = {'p': pick,
360 actiontable = {'p': pick,
361 'pick': pick,
361 'pick': pick,
362 'e': edit,
362 'e': edit,
363 'edit': edit,
363 'edit': edit,
364 'f': fold,
364 'f': fold,
365 'fold': fold,
365 'fold': fold,
366 'd': drop,
366 'd': drop,
367 'drop': drop,
367 'drop': drop,
368 'm': message,
368 'm': message,
369 'mess': message,
369 'mess': message,
370 }
370 }
371
371
372 @command('histedit',
372 @command('histedit',
373 [('', 'commands', '',
373 [('', 'commands', '',
374 _('Read history edits from the specified file.')),
374 _('Read history edits from the specified file.')),
375 ('c', 'continue', False, _('continue an edit already in progress')),
375 ('c', 'continue', False, _('continue an edit already in progress')),
376 ('k', 'keep', False,
376 ('k', 'keep', False,
377 _("don't strip old nodes after edit is complete")),
377 _("don't strip old nodes after edit is complete")),
378 ('', 'abort', False, _('abort an edit in progress')),
378 ('', 'abort', False, _('abort an edit in progress')),
379 ('o', 'outgoing', False, _('changesets not found in destination')),
379 ('o', 'outgoing', False, _('changesets not found in destination')),
380 ('f', 'force', False,
380 ('f', 'force', False,
381 _('force outgoing even for unrelated repositories')),
381 _('force outgoing even for unrelated repositories')),
382 ('r', 'rev', [], _('first revision to be edited'))],
382 ('r', 'rev', [], _('first revision to be edited'))],
383 _("[PARENT]"))
383 _("[PARENT]"))
384 def histedit(ui, repo, *parent, **opts):
384 def histedit(ui, repo, *parent, **opts):
385 """interactively edit changeset history
385 """interactively edit changeset history
386 """
386 """
387 # TODO only abort if we try and histedit mq patches, not just
387 # TODO only abort if we try and histedit mq patches, not just
388 # blanket if mq patches are applied somewhere
388 # blanket if mq patches are applied somewhere
389 mq = getattr(repo, 'mq', None)
389 mq = getattr(repo, 'mq', None)
390 if mq and mq.applied:
390 if mq and mq.applied:
391 raise util.Abort(_('source has mq patches applied'))
391 raise util.Abort(_('source has mq patches applied'))
392
392
393 parent = list(parent) + opts.get('rev', [])
393 parent = list(parent) + opts.get('rev', [])
394 if opts.get('outgoing'):
394 if opts.get('outgoing'):
395 if len(parent) > 1:
395 if len(parent) > 1:
396 raise util.Abort(
396 raise util.Abort(
397 _('only one repo argument allowed with --outgoing'))
397 _('only one repo argument allowed with --outgoing'))
398 elif parent:
398 elif parent:
399 parent = parent[0]
399 parent = parent[0]
400
400
401 dest = ui.expandpath(parent or 'default-push', parent or 'default')
401 dest = ui.expandpath(parent or 'default-push', parent or 'default')
402 dest, revs = hg.parseurl(dest, None)[:2]
402 dest, revs = hg.parseurl(dest, None)[:2]
403 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
403 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
404
404
405 revs, checkout = hg.addbranchrevs(repo, repo, revs, None)
405 revs, checkout = hg.addbranchrevs(repo, repo, revs, None)
406 other = hg.peer(repo, opts, dest)
406 other = hg.peer(repo, opts, dest)
407
407
408 if revs:
408 if revs:
409 revs = [repo.lookup(rev) for rev in revs]
409 revs = [repo.lookup(rev) for rev in revs]
410
410
411 parent = discovery.findcommonoutgoing(
411 parent = discovery.findcommonoutgoing(
412 repo, other, [], force=opts.get('force')).missing[0:1]
412 repo, other, [], force=opts.get('force')).missing[0:1]
413 else:
413 else:
414 if opts.get('force'):
414 if opts.get('force'):
415 raise util.Abort(_('--force only allowed with --outgoing'))
415 raise util.Abort(_('--force only allowed with --outgoing'))
416
416
417 if opts.get('continue', False):
417 if opts.get('continue', False):
418 if len(parent) != 0:
418 if len(parent) != 0:
419 raise util.Abort(_('no arguments allowed with --continue'))
419 raise util.Abort(_('no arguments allowed with --continue'))
420 (parentctxnode, created, replaced, tmpnodes,
420 (parentctxnode, created, replaced, tmpnodes,
421 existing, rules, keep, topmost, replacemap) = readstate(repo)
421 existing, rules, keep, topmost, replacemap) = readstate(repo)
422 parentctx = repo[parentctxnode]
422 parentctx = repo[parentctxnode]
423 existing = set(existing)
423 existing = set(existing)
424 parentctx = bootstrapcontinue(ui, repo, parentctx, existing,
424 parentctx = bootstrapcontinue(ui, repo, parentctx, existing,
425 replacemap, rules, tmpnodes, created,
425 replacemap, rules, tmpnodes, created,
426 replaced, opts)
426 replaced, opts)
427 elif opts.get('abort', False):
427 elif opts.get('abort', False):
428 if len(parent) != 0:
428 if len(parent) != 0:
429 raise util.Abort(_('no arguments allowed with --abort'))
429 raise util.Abort(_('no arguments allowed with --abort'))
430 (parentctxnode, created, replaced, tmpnodes,
430 (parentctxnode, created, replaced, tmpnodes,
431 existing, rules, keep, topmost, replacemap) = readstate(repo)
431 existing, rules, keep, topmost, replacemap) = readstate(repo)
432 ui.debug('restore wc to old parent %s\n' % node.short(topmost))
432 ui.debug('restore wc to old parent %s\n' % node.short(topmost))
433 hg.clean(repo, topmost)
433 hg.clean(repo, topmost)
434 cleanupnode(ui, repo, 'created', created)
434 cleanupnode(ui, repo, 'created', created)
435 cleanupnode(ui, repo, 'temp', tmpnodes)
435 cleanupnode(ui, repo, 'temp', tmpnodes)
436 os.unlink(os.path.join(repo.path, 'histedit-state'))
436 os.unlink(os.path.join(repo.path, 'histedit-state'))
437 return
437 return
438 else:
438 else:
439 cmdutil.bailifchanged(repo)
439 cmdutil.bailifchanged(repo)
440 if os.path.exists(os.path.join(repo.path, 'histedit-state')):
440 if os.path.exists(os.path.join(repo.path, 'histedit-state')):
441 raise util.Abort(_('history edit already in progress, try '
441 raise util.Abort(_('history edit already in progress, try '
442 '--continue or --abort'))
442 '--continue or --abort'))
443
443
444 topmost, empty = repo.dirstate.parents()
444 topmost, empty = repo.dirstate.parents()
445
445
446
446
447 if len(parent) != 1:
447 if len(parent) != 1:
448 raise util.Abort(_('histedit requires exactly one parent revision'))
448 raise util.Abort(_('histedit requires exactly one parent revision'))
449 parent = scmutil.revsingle(repo, parent[0]).node()
449 parent = scmutil.revsingle(repo, parent[0]).node()
450
450
451 keep = opts.get('keep', False)
451 keep = opts.get('keep', False)
452 revs = between(repo, parent, topmost, keep)
452 revs = between(repo, parent, topmost, keep)
453
453
454 ctxs = [repo[r] for r in revs]
454 ctxs = [repo[r] for r in revs]
455 existing = [r.node() for r in ctxs]
455 existing = [r.node() for r in ctxs]
456 rules = opts.get('commands', '')
456 rules = opts.get('commands', '')
457 if not rules:
457 if not rules:
458 rules = '\n'.join([makedesc(c) for c in ctxs])
458 rules = '\n'.join([makedesc(c) for c in ctxs])
459 rules += '\n\n'
459 rules += '\n\n'
460 rules += editcomment % (node.short(parent), node.short(topmost))
460 rules += editcomment % (node.short(parent), node.short(topmost))
461 rules = ui.edit(rules, ui.username())
461 rules = ui.edit(rules, ui.username())
462 # Save edit rules in .hg/histedit-last-edit.txt in case
462 # Save edit rules in .hg/histedit-last-edit.txt in case
463 # the user needs to ask for help after something
463 # the user needs to ask for help after something
464 # surprising happens.
464 # surprising happens.
465 f = open(repo.join('histedit-last-edit.txt'), 'w')
465 f = open(repo.join('histedit-last-edit.txt'), 'w')
466 f.write(rules)
466 f.write(rules)
467 f.close()
467 f.close()
468 else:
468 else:
469 f = open(rules)
469 f = open(rules)
470 rules = f.read()
470 rules = f.read()
471 f.close()
471 f.close()
472 rules = [l for l in (r.strip() for r in rules.splitlines())
472 rules = [l for l in (r.strip() for r in rules.splitlines())
473 if l and not l[0] == '#']
473 if l and not l[0] == '#']
474 rules = verifyrules(rules, repo, ctxs)
474 rules = verifyrules(rules, repo, ctxs)
475
475
476 parentctx = repo[parent].parents()[0]
476 parentctx = repo[parent].parents()[0]
477 keep = opts.get('keep', False)
477 keep = opts.get('keep', False)
478 replaced = []
478 replaced = []
479 replacemap = {}
479 replacemap = {}
480 tmpnodes = []
480 tmpnodes = []
481 created = []
481 created = []
482
482
483
483
484 while rules:
484 while rules:
485 writestate(repo, parentctx.node(), created, replaced,
485 writestate(repo, parentctx.node(), created, replaced,
486 tmpnodes, existing, rules, keep, topmost, replacemap)
486 tmpnodes, existing, rules, keep, topmost, replacemap)
487 action, ha = rules.pop(0)
487 action, ha = rules.pop(0)
488 ui.debug('histedit: processing %s %s\n' % (action, ha))
488 ui.debug('histedit: processing %s %s\n' % (action, ha))
489 (parentctx, created_, replaced_, tmpnodes_) = actiontable[action](
489 (parentctx, created_, replaced_, tmpnodes_) = actiontable[action](
490 ui, repo, parentctx, ha, opts)
490 ui, repo, parentctx, ha, opts)
491
491
492 if replaced_:
492 if replaced_:
493 clen, rlen = len(created_), len(replaced_)
493 clen, rlen = len(created_), len(replaced_)
494 if clen == rlen == 1:
494 if clen == rlen == 1:
495 ui.debug('histedit: exact replacement of %s with %s\n' % (
495 ui.debug('histedit: exact replacement of %s with %s\n' % (
496 node.short(replaced_[0]), node.short(created_[0])))
496 node.short(replaced_[0]), node.short(created_[0])))
497
497
498 replacemap[replaced_[0]] = created_[0]
498 replacemap[replaced_[0]] = created_[0]
499 elif clen > rlen:
499 elif clen > rlen:
500 assert rlen == 1, ('unexpected replacement of '
500 assert rlen == 1, ('unexpected replacement of '
501 '%d changes with %d changes' % (rlen, clen))
501 '%d changes with %d changes' % (rlen, clen))
502 # made more changesets than we're replacing
502 # made more changesets than we're replacing
503 # TODO synthesize patch names for created patches
503 # TODO synthesize patch names for created patches
504 replacemap[replaced_[0]] = created_[-1]
504 replacemap[replaced_[0]] = created_[-1]
505 ui.debug('histedit: created many, assuming %s replaced by %s' %
505 ui.debug('histedit: created many, assuming %s replaced by %s' %
506 (node.short(replaced_[0]), node.short(created_[-1])))
506 (node.short(replaced_[0]), node.short(created_[-1])))
507 elif rlen > clen:
507 elif rlen > clen:
508 if not created_:
508 if not created_:
509 # This must be a drop. Try and put our metadata on
509 # This must be a drop. Try and put our metadata on
510 # the parent change.
510 # the parent change.
511 assert rlen == 1
511 assert rlen == 1
512 r = replaced_[0]
512 r = replaced_[0]
513 ui.debug('histedit: %s seems replaced with nothing, '
513 ui.debug('histedit: %s seems replaced with nothing, '
514 'finding a parent\n' % (node.short(r)))
514 'finding a parent\n' % (node.short(r)))
515 pctx = repo[r].parents()[0]
515 pctx = repo[r].parents()[0]
516 if pctx.node() in replacemap:
516 if pctx.node() in replacemap:
517 ui.debug('histedit: parent is already replaced\n')
517 ui.debug('histedit: parent is already replaced\n')
518 replacemap[r] = replacemap[pctx.node()]
518 replacemap[r] = replacemap[pctx.node()]
519 else:
519 else:
520 replacemap[r] = pctx.node()
520 replacemap[r] = pctx.node()
521 ui.debug('histedit: %s best replaced by %s\n' % (
521 ui.debug('histedit: %s best replaced by %s\n' % (
522 node.short(r), node.short(replacemap[r])))
522 node.short(r), node.short(replacemap[r])))
523 else:
523 else:
524 assert len(created_) == 1
524 assert len(created_) == 1
525 for r in replaced_:
525 for r in replaced_:
526 ui.debug('histedit: %s replaced by %s\n' % (
526 ui.debug('histedit: %s replaced by %s\n' % (
527 node.short(r), node.short(created_[0])))
527 node.short(r), node.short(created_[0])))
528 replacemap[r] = created_[0]
528 replacemap[r] = created_[0]
529 else:
529 else:
530 assert False, (
530 assert False, (
531 'Unhandled case in replacement mapping! '
531 'Unhandled case in replacement mapping! '
532 'replacing %d changes with %d changes' % (rlen, clen))
532 'replacing %d changes with %d changes' % (rlen, clen))
533 created.extend(created_)
533 created.extend(created_)
534 replaced.extend(replaced_)
534 replaced.extend(replaced_)
535 tmpnodes.extend(tmpnodes_)
535 tmpnodes.extend(tmpnodes_)
536
536
537 hg.update(repo, parentctx.node())
537 hg.update(repo, parentctx.node())
538
538
539 if not keep:
539 if not keep:
540 if replacemap:
540 if replacemap:
541 movebookmarks(ui, repo, replacemap, tmpnodes, created)
541 movebookmarks(ui, repo, replacemap, tmpnodes, created)
542 # TODO update mq state
542 # TODO update mq state
543 cleanupnode(ui, repo, 'replaced', replaced)
543 cleanupnode(ui, repo, 'replaced', replaced)
544
544
545 cleanupnode(ui, repo, 'temp', tmpnodes)
545 cleanupnode(ui, repo, 'temp', tmpnodes)
546 os.unlink(os.path.join(repo.path, 'histedit-state'))
546 os.unlink(os.path.join(repo.path, 'histedit-state'))
547 if os.path.exists(repo.sjoin('undo')):
547 if os.path.exists(repo.sjoin('undo')):
548 os.unlink(repo.sjoin('undo'))
548 os.unlink(repo.sjoin('undo'))
549
549
550
550
551 def bootstrapcontinue(ui, repo, parentctx, existing, replacemap, rules,
551 def bootstrapcontinue(ui, repo, parentctx, existing, replacemap, rules,
552 tmpnodes, created, replaced, opts):
552 tmpnodes, created, replaced, opts):
553 currentparent, wantnull = repo.dirstate.parents()
553 currentparent, wantnull = repo.dirstate.parents()
554 # existing is the list of revisions initially considered by
554 # existing is the list of revisions initially considered by
555 # histedit. Here we use it to list new changesets, descendants
555 # histedit. Here we use it to list new changesets, descendants
556 # of parentctx without an 'existing' changeset in-between. We
556 # of parentctx without an 'existing' changeset in-between. We
557 # also have to exclude 'existing' changesets which were
557 # also have to exclude 'existing' changesets which were
558 # previously dropped.
558 # previously dropped.
559 descendants = set(c.node() for c in
559 descendants = set(c.node() for c in
560 repo.set('(%d::) - %d', parentctx, parentctx))
560 repo.set('(%d::) - %d', parentctx, parentctx))
561 notdropped = set(n for n in existing if n in descendants and
561 notdropped = set(n for n in existing if n in descendants and
562 (n not in replacemap or replacemap[n] in descendants))
562 (n not in replacemap or replacemap[n] in descendants))
563 # Discover any nodes the user has added in the interim. We can
563 # Discover any nodes the user has added in the interim. We can
564 # miss changesets which were dropped and recreated the same.
564 # miss changesets which were dropped and recreated the same.
565 newchildren = list(c.node() for c in repo.set(
565 newchildren = list(c.node() for c in repo.set(
566 'sort(%ln - (%ln or %ln::))', descendants, existing, notdropped))
566 'sort(%ln - (%ln or %ln::))', descendants, existing, notdropped))
567 action, currentnode = rules.pop(0)
567 action, currentnode = rules.pop(0)
568 if action in ('f', 'fold'):
568 if action in ('f', 'fold'):
569 tmpnodes.extend(newchildren)
569 tmpnodes.extend(newchildren)
570 else:
570 else:
571 created.extend(newchildren)
571 created.extend(newchildren)
572
572
573 m, a, r, d = repo.status()[:4]
573 m, a, r, d = repo.status()[:4]
574 oldctx = repo[currentnode]
574 oldctx = repo[currentnode]
575 message = oldctx.description() + '\n'
575 message = oldctx.description() + '\n'
576 if action in ('e', 'edit', 'm', 'mess'):
576 if action in ('e', 'edit', 'm', 'mess'):
577 message = ui.edit(message, ui.username())
577 message = ui.edit(message, ui.username())
578 elif action in ('f', 'fold'):
578 elif action in ('f', 'fold'):
579 message = 'fold-temp-revision %s' % currentnode
579 message = 'fold-temp-revision %s' % currentnode
580 new = None
580 new = None
581 if m or a or r or d:
581 if m or a or r or d:
582 new = repo.commit(text=message, user=oldctx.user(),
582 new = repo.commit(text=message, user=oldctx.user(),
583 date=oldctx.date(), extra=oldctx.extra())
583 date=oldctx.date(), extra=oldctx.extra())
584
584
585 # If we're resuming a fold and we have new changes, mark the
585 # If we're resuming a fold and we have new changes, mark the
586 # replacements and finish the fold. If not, it's more like a
586 # replacements and finish the fold. If not, it's more like a
587 # drop of the changesets that disappeared, and we can skip
587 # drop of the changesets that disappeared, and we can skip
588 # this step.
588 # this step.
589 if action in ('f', 'fold') and (new or newchildren):
589 if action in ('f', 'fold') and (new or newchildren):
590 if new:
590 if new:
591 tmpnodes.append(new)
591 tmpnodes.append(new)
592 else:
592 else:
593 new = newchildren[-1]
593 new = newchildren[-1]
594 (parentctx, created_, replaced_, tmpnodes_) = finishfold(
594 (parentctx, created_, replaced_, tmpnodes_) = finishfold(
595 ui, repo, parentctx, oldctx, new, opts, newchildren)
595 ui, repo, parentctx, oldctx, new, opts, newchildren)
596 replaced.extend(replaced_)
596 replaced.extend(replaced_)
597 created.extend(created_)
597 created.extend(created_)
598 tmpnodes.extend(tmpnodes_)
598 tmpnodes.extend(tmpnodes_)
599 elif action not in ('d', 'drop'):
599 elif action not in ('d', 'drop'):
600 if new != oldctx.node():
600 if new != oldctx.node():
601 replaced.append(oldctx.node())
601 replaced.append(oldctx.node())
602 if new:
602 if new:
603 if new != oldctx.node():
603 if new != oldctx.node():
604 created.append(new)
604 created.append(new)
605 parentctx = repo[new]
605 parentctx = repo[new]
606 return parentctx
606 return parentctx
607
607
608
608
609 def between(repo, old, new, keep):
609 def between(repo, old, new, keep):
610 """select and validate the set of revision to edit
610 """select and validate the set of revision to edit
611
611
612 When keep is false, the specified set can't have children."""
612 When keep is false, the specified set can't have children."""
613 revs = [old]
613 revs = [old]
614 current = old
614 current = old
615 while current != new:
615 while current != new:
616 ctx = repo[current]
616 ctx = repo[current]
617 if not keep and len(ctx.children()) > 1:
617 if not keep and len(ctx.children()) > 1:
618 raise util.Abort(_('cannot edit history that would orphan nodes'))
618 raise util.Abort(_('cannot edit history that would orphan nodes'))
619 if len(ctx.parents()) != 1 and ctx.parents()[1] != node.nullid:
619 if len(ctx.parents()) != 1 and ctx.parents()[1] != node.nullid:
620 raise util.Abort(_("can't edit history with merges"))
620 raise util.Abort(_("can't edit history with merges"))
621 if not ctx.children():
621 if not ctx.children():
622 current = new
622 current = new
623 else:
623 else:
624 current = ctx.children()[0].node()
624 current = ctx.children()[0].node()
625 revs.append(current)
625 revs.append(current)
626 if len(repo[current].children()) and not keep:
626 if len(repo[current].children()) and not keep:
627 raise util.Abort(_('cannot edit history that would orphan nodes'))
627 raise util.Abort(_('cannot edit history that would orphan nodes'))
628 return revs
628 return revs
629
629
630
630
631 def writestate(repo, parentctxnode, created, replaced,
631 def writestate(repo, parentctxnode, created, replaced,
632 tmpnodes, existing, rules, keep, topmost, replacemap):
632 tmpnodes, existing, rules, keep, topmost, replacemap):
633 fp = open(os.path.join(repo.path, 'histedit-state'), 'w')
633 fp = open(os.path.join(repo.path, 'histedit-state'), 'w')
634 pickle.dump((parentctxnode, created, replaced,
634 pickle.dump((parentctxnode, created, replaced,
635 tmpnodes, existing, rules, keep, topmost, replacemap),
635 tmpnodes, existing, rules, keep, topmost, replacemap),
636 fp)
636 fp)
637 fp.close()
637 fp.close()
638
638
639 def readstate(repo):
639 def readstate(repo):
640 """Returns a tuple of (parentnode, created, replaced, tmp, existing, rules,
640 """Returns a tuple of (parentnode, created, replaced, tmp, existing, rules,
641 keep, topmost, replacemap ).
641 keep, topmost, replacemap ).
642 """
642 """
643 fp = open(os.path.join(repo.path, 'histedit-state'))
643 fp = open(os.path.join(repo.path, 'histedit-state'))
644 return pickle.load(fp)
644 return pickle.load(fp)
645
645
646
646
647 def makedesc(c):
647 def makedesc(c):
648 """build a initial action line for a ctx `c`
648 """build a initial action line for a ctx `c`
649
649
650 line are in the form:
650 line are in the form:
651
651
652 pick <hash> <rev> <summary>
652 pick <hash> <rev> <summary>
653 """
653 """
654 summary = ''
654 summary = ''
655 if c.description():
655 if c.description():
656 summary = c.description().splitlines()[0]
656 summary = c.description().splitlines()[0]
657 line = 'pick %s %d %s' % (c, c.rev(), summary)
657 line = 'pick %s %d %s' % (c, c.rev(), summary)
658 return line[:80] # trim to 80 chars so it's not stupidly wide in my editor
658 return line[:80] # trim to 80 chars so it's not stupidly wide in my editor
659
659
660 def verifyrules(rules, repo, ctxs):
660 def verifyrules(rules, repo, ctxs):
661 """Verify that there exists exactly one edit rule per given changeset.
661 """Verify that there exists exactly one edit rule per given changeset.
662
662
663 Will abort if there are to many or too few rules, a malformed rule,
663 Will abort if there are to many or too few rules, a malformed rule,
664 or a rule on a changeset outside of the user-given range.
664 or a rule on a changeset outside of the user-given range.
665 """
665 """
666 parsed = []
666 parsed = []
667 if len(rules) != len(ctxs):
667 if len(rules) != len(ctxs):
668 raise util.Abort(_('must specify a rule for each changeset once'))
668 raise util.Abort(_('must specify a rule for each changeset once'))
669 for r in rules:
669 for r in rules:
670 if ' ' not in r:
670 if ' ' not in r:
671 raise util.Abort(_('malformed line "%s"') % r)
671 raise util.Abort(_('malformed line "%s"') % r)
672 action, rest = r.split(' ', 1)
672 action, rest = r.split(' ', 1)
673 if ' ' in rest.strip():
673 if ' ' in rest.strip():
674 ha, rest = rest.split(' ', 1)
674 ha, rest = rest.split(' ', 1)
675 else:
675 else:
676 ha = r.strip()
676 ha = r.strip()
677 try:
677 try:
678 if repo[ha] not in ctxs:
678 if repo[ha] not in ctxs:
679 raise util.Abort(
679 raise util.Abort(
680 _('may not use changesets other than the ones listed'))
680 _('may not use changesets other than the ones listed'))
681 except error.RepoError:
681 except error.RepoError:
682 raise util.Abort(_('unknown changeset %s listed') % ha)
682 raise util.Abort(_('unknown changeset %s listed') % ha)
683 if action not in actiontable:
683 if action not in actiontable:
684 raise util.Abort(_('unknown action "%s"') % action)
684 raise util.Abort(_('unknown action "%s"') % action)
685 parsed.append([action, ha])
685 parsed.append([action, ha])
686 return parsed
686 return parsed
687
687
688 def movebookmarks(ui, repo, replacemap, tmpnodes, created):
688 def movebookmarks(ui, repo, replacemap, tmpnodes, created):
689 """Move bookmark from old to newly created node"""
689 """Move bookmark from old to newly created node"""
690 ui.note(_('histedit: Should update metadata for the following '
690 ui.note(_('histedit: Should update metadata for the following '
691 'changes:\n'))
691 'changes:\n'))
692
692
693 def copybms(old, new):
693 def copybms(old, new):
694 if old in tmpnodes or old in created:
694 if old in tmpnodes or old in created:
695 # can't have any metadata we'd want to update
695 # can't have any metadata we'd want to update
696 return
696 return
697 while new in replacemap:
697 while new in replacemap:
698 new = replacemap[new]
698 new = replacemap[new]
699 ui.note(_('histedit: %s to %s\n') % (node.short(old),
699 ui.note(_('histedit: %s to %s\n') % (node.short(old),
700 node.short(new)))
700 node.short(new)))
701 octx = repo[old]
701 octx = repo[old]
702 marks = octx.bookmarks()
702 marks = octx.bookmarks()
703 if marks:
703 if marks:
704 ui.note(_('histedit: moving bookmarks %s\n') %
704 ui.note(_('histedit: moving bookmarks %s\n') %
705 ', '.join(marks))
705 ', '.join(marks))
706 for mark in marks:
706 for mark in marks:
707 repo._bookmarks[mark] = new
707 repo._bookmarks[mark] = new
708 bookmarks.write(repo)
708 bookmarks.write(repo)
709
709
710 # We assume that bookmarks on the tip should remain
710 # We assume that bookmarks on the tip should remain
711 # tipmost, but bookmarks on non-tip changesets should go
711 # tipmost, but bookmarks on non-tip changesets should go
712 # to their most reasonable successor. As a result, find
712 # to their most reasonable successor. As a result, find
713 # the old tip and new tip and copy those bookmarks first,
713 # the old tip and new tip and copy those bookmarks first,
714 # then do the rest of the bookmark copies.
714 # then do the rest of the bookmark copies.
715 oldtip = sorted(replacemap.keys(), key=repo.changelog.rev)[-1]
715 oldtip = sorted(replacemap.keys(), key=repo.changelog.rev)[-1]
716 newtip = sorted(replacemap.values(), key=repo.changelog.rev)[-1]
716 newtip = sorted(replacemap.values(), key=repo.changelog.rev)[-1]
717 copybms(oldtip, newtip)
717 copybms(oldtip, newtip)
718
718
719 for old, new in sorted(replacemap.iteritems()):
719 for old, new in sorted(replacemap.iteritems()):
720 copybms(old, new)
720 copybms(old, new)
721
721
722 def cleanupnode(ui, repo, name, nodes):
722 def cleanupnode(ui, repo, name, nodes):
723 """strip a group of nodes from the repository
723 """strip a group of nodes from the repository
724
724
725 The set of node to strip may contains unknown nodes."""
725 The set of node to strip may contains unknown nodes."""
726 ui.debug('should strip %s nodes %s\n' %
726 ui.debug('should strip %s nodes %s\n' %
727 (name, ', '.join([node.short(n) for n in nodes])))
727 (name, ', '.join([node.short(n) for n in nodes])))
728 lock = None
728 lock = None
729 try:
729 try:
730 lock = repo.lock()
730 lock = repo.lock()
731 # Find all node that need to be stripped
731 # Find all node that need to be stripped
732 # (we hg %lr instead of %ln to silently ignore unknown item
732 # (we hg %lr instead of %ln to silently ignore unknown item
733 nm = repo.changelog.nodemap
733 nm = repo.changelog.nodemap
734 nodes = [n for n in nodes if n in nm]
734 nodes = [n for n in nodes if n in nm]
735 roots = [c.node() for c in repo.set("roots(%ln)", nodes)]
735 roots = [c.node() for c in repo.set("roots(%ln)", nodes)]
736 for c in roots:
736 for c in roots:
737 # We should process node in reverse order to strip tip most first.
737 # We should process node in reverse order to strip tip most first.
738 # but this trigger a bug in changegroup hook.
738 # but this trigger a bug in changegroup hook.
739 # This would reduce bundle overhead
739 # This would reduce bundle overhead
740 repair.strip(ui, repo, c)
740 repair.strip(ui, repo, c)
741 finally:
741 finally:
742 lockmod.release(lock)
742 lockmod.release(lock)
743
743
@@ -1,284 +1,284
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial.node import hex
9 from mercurial.node import hex
10 from mercurial import encoding, error, util, obsolete, phases
10 from mercurial import encoding, error, util, obsolete, phases
11 import errno, os
11 import errno, os
12
12
13 def valid(mark):
13 def valid(mark):
14 for c in (':', '\0', '\n', '\r'):
14 for c in (':', '\0', '\n', '\r'):
15 if c in mark:
15 if c in mark:
16 return False
16 return False
17 return True
17 return True
18
18
19 def read(repo):
19 def read(repo):
20 '''Parse .hg/bookmarks file and return a dictionary
20 '''Parse .hg/bookmarks file and return a dictionary
21
21
22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
23 in the .hg/bookmarks file.
23 in the .hg/bookmarks file.
24 Read the file and return a (name=>nodeid) dictionary
24 Read the file and return a (name=>nodeid) dictionary
25 '''
25 '''
26 bookmarks = {}
26 bookmarks = {}
27 try:
27 try:
28 for line in repo.opener('bookmarks'):
28 for line in repo.opener('bookmarks'):
29 line = line.strip()
29 line = line.strip()
30 if not line:
30 if not line:
31 continue
31 continue
32 if ' ' not in line:
32 if ' ' not in line:
33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
34 continue
34 continue
35 sha, refspec = line.split(' ', 1)
35 sha, refspec = line.split(' ', 1)
36 refspec = encoding.tolocal(refspec)
36 refspec = encoding.tolocal(refspec)
37 try:
37 try:
38 bookmarks[refspec] = repo.changelog.lookup(sha)
38 bookmarks[refspec] = repo.changelog.lookup(sha)
39 except LookupError:
39 except LookupError:
40 pass
40 pass
41 except IOError, inst:
41 except IOError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 return bookmarks
44 return bookmarks
45
45
46 def readcurrent(repo):
46 def readcurrent(repo):
47 '''Get the current bookmark
47 '''Get the current bookmark
48
48
49 If we use gittishsh branches we have a current bookmark that
49 If we use gittishsh branches we have a current bookmark that
50 we are on. This function returns the name of the bookmark. It
50 we are on. This function returns the name of the bookmark. It
51 is stored in .hg/bookmarks.current
51 is stored in .hg/bookmarks.current
52 '''
52 '''
53 mark = None
53 mark = None
54 try:
54 try:
55 file = repo.opener('bookmarks.current')
55 file = repo.opener('bookmarks.current')
56 except IOError, inst:
56 except IOError, inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 return None
59 return None
60 try:
60 try:
61 # No readline() in osutil.posixfile, reading everything is cheap
61 # No readline() in osutil.posixfile, reading everything is cheap
62 mark = encoding.tolocal((file.readlines() or [''])[0])
62 mark = encoding.tolocal((file.readlines() or [''])[0])
63 if mark == '' or mark not in repo._bookmarks:
63 if mark == '' or mark not in repo._bookmarks:
64 mark = None
64 mark = None
65 finally:
65 finally:
66 file.close()
66 file.close()
67 return mark
67 return mark
68
68
69 def write(repo):
69 def write(repo):
70 '''Write bookmarks
70 '''Write bookmarks
71
71
72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
73 in a format equal to those of localtags.
73 in a format equal to those of localtags.
74
74
75 We also store a backup of the previous state in undo.bookmarks that
75 We also store a backup of the previous state in undo.bookmarks that
76 can be copied back on rollback.
76 can be copied back on rollback.
77 '''
77 '''
78 refs = repo._bookmarks
78 refs = repo._bookmarks
79
79
80 if repo._bookmarkcurrent not in refs:
80 if repo._bookmarkcurrent not in refs:
81 setcurrent(repo, None)
81 setcurrent(repo, None)
82 for mark in refs.keys():
82 for mark in refs.keys():
83 if not valid(mark):
83 if not valid(mark):
84 raise util.Abort(_("bookmark '%s' contains illegal "
84 raise util.Abort(_("bookmark '%s' contains illegal "
85 "character" % mark))
85 "character" % mark))
86
86
87 wlock = repo.wlock()
87 wlock = repo.wlock()
88 try:
88 try:
89
89
90 file = repo.opener('bookmarks', 'w', atomictemp=True)
90 file = repo.opener('bookmarks', 'w', atomictemp=True)
91 for refspec, node in refs.iteritems():
91 for refspec, node in refs.iteritems():
92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
93 file.close()
93 file.close()
94
94
95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
96 try:
96 try:
97 os.utime(repo.sjoin('00changelog.i'), None)
97 os.utime(repo.sjoin('00changelog.i'), None)
98 except OSError:
98 except OSError:
99 pass
99 pass
100
100
101 finally:
101 finally:
102 wlock.release()
102 wlock.release()
103
103
104 def setcurrent(repo, mark):
104 def setcurrent(repo, mark):
105 '''Set the name of the bookmark that we are currently on
105 '''Set the name of the bookmark that we are currently on
106
106
107 Set the name of the bookmark that we are on (hg update <bookmark>).
107 Set the name of the bookmark that we are on (hg update <bookmark>).
108 The name is recorded in .hg/bookmarks.current
108 The name is recorded in .hg/bookmarks.current
109 '''
109 '''
110 current = repo._bookmarkcurrent
110 current = repo._bookmarkcurrent
111 if current == mark:
111 if current == mark:
112 return
112 return
113
113
114 if mark not in repo._bookmarks:
114 if mark not in repo._bookmarks:
115 mark = ''
115 mark = ''
116 if not valid(mark):
116 if not valid(mark):
117 raise util.Abort(_("bookmark '%s' contains illegal "
117 raise util.Abort(_("bookmark '%s' contains illegal "
118 "character" % mark))
118 "character" % mark))
119
119
120 wlock = repo.wlock()
120 wlock = repo.wlock()
121 try:
121 try:
122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
123 file.write(encoding.fromlocal(mark))
123 file.write(encoding.fromlocal(mark))
124 file.close()
124 file.close()
125 finally:
125 finally:
126 wlock.release()
126 wlock.release()
127 repo._bookmarkcurrent = mark
127 repo._bookmarkcurrent = mark
128
128
129 def unsetcurrent(repo):
129 def unsetcurrent(repo):
130 wlock = repo.wlock()
130 wlock = repo.wlock()
131 try:
131 try:
132 try:
132 try:
133 util.unlink(repo.join('bookmarks.current'))
133 util.unlink(repo.join('bookmarks.current'))
134 repo._bookmarkcurrent = None
134 repo._bookmarkcurrent = None
135 except OSError, inst:
135 except OSError, inst:
136 if inst.errno != errno.ENOENT:
136 if inst.errno != errno.ENOENT:
137 raise
137 raise
138 finally:
138 finally:
139 wlock.release()
139 wlock.release()
140
140
141 def updatecurrentbookmark(repo, oldnode, curbranch):
141 def updatecurrentbookmark(repo, oldnode, curbranch):
142 try:
142 try:
143 return update(repo, oldnode, repo.branchtip(curbranch))
143 return update(repo, oldnode, repo.branchtip(curbranch))
144 except error.RepoLookupError:
144 except error.RepoLookupError:
145 if curbranch == "default": # no default branch!
145 if curbranch == "default": # no default branch!
146 return update(repo, oldnode, repo.lookup("tip"))
146 return update(repo, oldnode, repo.lookup("tip"))
147 else:
147 else:
148 raise util.Abort(_("branch %s not found") % curbranch)
148 raise util.Abort(_("branch %s not found") % curbranch)
149
149
150 def update(repo, parents, node):
150 def update(repo, parents, node):
151 marks = repo._bookmarks
151 marks = repo._bookmarks
152 update = False
152 update = False
153 cur = repo._bookmarkcurrent
153 cur = repo._bookmarkcurrent
154 if not cur:
154 if not cur:
155 return False
155 return False
156
156
157 toupdate = [b for b in marks if b.split('@', 1)[0] == cur.split('@', 1)[0]]
157 toupdate = [b for b in marks if b.split('@', 1)[0] == cur.split('@', 1)[0]]
158 for mark in toupdate:
158 for mark in toupdate:
159 if mark and marks[mark] in parents:
159 if mark and marks[mark] in parents:
160 old = repo[marks[mark]]
160 old = repo[marks[mark]]
161 new = repo[node]
161 new = repo[node]
162 if old.descendant(new) and mark == cur:
162 if old.descendant(new) and mark == cur:
163 marks[cur] = new.node()
163 marks[cur] = new.node()
164 update = True
164 update = True
165 if mark != cur:
165 if mark != cur:
166 del marks[mark]
166 del marks[mark]
167 if update:
167 if update:
168 repo._writebookmarks(marks)
168 repo._writebookmarks(marks)
169 return update
169 return update
170
170
171 def listbookmarks(repo):
171 def listbookmarks(repo):
172 # We may try to list bookmarks on a repo type that does not
172 # We may try to list bookmarks on a repo type that does not
173 # support it (e.g., statichttprepository).
173 # support it (e.g., statichttprepository).
174 marks = getattr(repo, '_bookmarks', {})
174 marks = getattr(repo, '_bookmarks', {})
175
175
176 d = {}
176 d = {}
177 for k, v in marks.iteritems():
177 for k, v in marks.iteritems():
178 # don't expose local divergent bookmarks
178 # don't expose local divergent bookmarks
179 if '@' not in k or k.endswith('@'):
179 if '@' not in k or k.endswith('@'):
180 d[k] = hex(v)
180 d[k] = hex(v)
181 return d
181 return d
182
182
183 def pushbookmark(repo, key, old, new):
183 def pushbookmark(repo, key, old, new):
184 w = repo.wlock()
184 w = repo.wlock()
185 try:
185 try:
186 marks = repo._bookmarks
186 marks = repo._bookmarks
187 if hex(marks.get(key, '')) != old:
187 if hex(marks.get(key, '')) != old:
188 return False
188 return False
189 if new == '':
189 if new == '':
190 del marks[key]
190 del marks[key]
191 else:
191 else:
192 if new not in repo:
192 if new not in repo:
193 return False
193 return False
194 marks[key] = repo[new].node()
194 marks[key] = repo[new].node()
195 write(repo)
195 write(repo)
196 return True
196 return True
197 finally:
197 finally:
198 w.release()
198 w.release()
199
199
200 def updatefromremote(ui, repo, remote, path):
200 def updatefromremote(ui, repo, remote, path):
201 ui.debug("checking for updated bookmarks\n")
201 ui.debug("checking for updated bookmarks\n")
202 rb = remote.listkeys('bookmarks')
202 rb = remote.listkeys('bookmarks')
203 changed = False
203 changed = False
204 for k in rb.keys():
204 for k in rb.keys():
205 if k in repo._bookmarks:
205 if k in repo._bookmarks:
206 nr, nl = rb[k], repo._bookmarks[k]
206 nr, nl = rb[k], repo._bookmarks[k]
207 if nr in repo:
207 if nr in repo:
208 cr = repo[nr]
208 cr = repo[nr]
209 cl = repo[nl]
209 cl = repo[nl]
210 if cl.rev() >= cr.rev():
210 if cl.rev() >= cr.rev():
211 continue
211 continue
212 if validdest(repo, cl, cr):
212 if validdest(repo, cl, cr):
213 repo._bookmarks[k] = cr.node()
213 repo._bookmarks[k] = cr.node()
214 changed = True
214 changed = True
215 ui.status(_("updating bookmark %s\n") % k)
215 ui.status(_("updating bookmark %s\n") % k)
216 else:
216 else:
217 # find a unique @ suffix
217 # find a unique @ suffix
218 for x in range(1, 100):
218 for x in range(1, 100):
219 n = '%s@%d' % (k, x)
219 n = '%s@%d' % (k, x)
220 if n not in repo._bookmarks:
220 if n not in repo._bookmarks:
221 break
221 break
222 # try to use an @pathalias suffix
222 # try to use an @pathalias suffix
223 # if an @pathalias already exists, we overwrite (update) it
223 # if an @pathalias already exists, we overwrite (update) it
224 for p, u in ui.configitems("paths"):
224 for p, u in ui.configitems("paths"):
225 if path == u:
225 if path == u:
226 n = '%s@%s' % (k, p)
226 n = '%s@%s' % (k, p)
227
227
228 repo._bookmarks[n] = cr.node()
228 repo._bookmarks[n] = cr.node()
229 changed = True
229 changed = True
230 ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
230 ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
231 elif rb[k] in repo:
231 elif rb[k] in repo:
232 # add remote bookmarks for changes we already have
232 # add remote bookmarks for changes we already have
233 repo._bookmarks[k] = repo[rb[k]].node()
233 repo._bookmarks[k] = repo[rb[k]].node()
234 changed = True
234 changed = True
235 ui.status(_("adding remote bookmark %s\n") % k)
235 ui.status(_("adding remote bookmark %s\n") % k)
236
236
237 if changed:
237 if changed:
238 write(repo)
238 write(repo)
239
239
240 def diff(ui, dst, src):
240 def diff(ui, dst, src):
241 ui.status(_("searching for changed bookmarks\n"))
241 ui.status(_("searching for changed bookmarks\n"))
242
242
243 smarks = src.listkeys('bookmarks')
243 smarks = src.listkeys('bookmarks')
244 dmarks = dst.listkeys('bookmarks')
244 dmarks = dst.listkeys('bookmarks')
245
245
246 diff = sorted(set(smarks) - set(dmarks))
246 diff = sorted(set(smarks) - set(dmarks))
247 for k in diff:
247 for k in diff:
248 mark = ui.debugflag and smarks[k] or smarks[k][:12]
248 mark = ui.debugflag and smarks[k] or smarks[k][:12]
249 ui.write(" %-25s %s\n" % (k, mark))
249 ui.write(" %-25s %s\n" % (k, mark))
250
250
251 if len(diff) <= 0:
251 if len(diff) <= 0:
252 ui.status(_("no changed bookmarks found\n"))
252 ui.status(_("no changed bookmarks found\n"))
253 return 1
253 return 1
254 return 0
254 return 0
255
255
256 def validdest(repo, old, new):
256 def validdest(repo, old, new):
257 """Is the new bookmark destination a valid update from the old one"""
257 """Is the new bookmark destination a valid update from the old one"""
258 if old == new:
258 if old == new:
259 # Old == new -> nothing to update.
259 # Old == new -> nothing to update.
260 return False
260 return False
261 elif not old:
261 elif not old:
262 # old is nullrev, anything is valid.
262 # old is nullrev, anything is valid.
263 # (new != nullrev has been excluded by the previous check)
263 # (new != nullrev has been excluded by the previous check)
264 return True
264 return True
265 elif repo.obsstore:
265 elif repo.obsstore:
266 # We only need this complicated logic if there is obsolescence
266 # We only need this complicated logic if there is obsolescence
267 # XXX will probably deserve an optimised rset.
267 # XXX will probably deserve an optimised revset.
268
268
269 validdests = set([old])
269 validdests = set([old])
270 plen = -1
270 plen = -1
271 # compute the whole set of successors or descendants
271 # compute the whole set of successors or descendants
272 while len(validdests) != plen:
272 while len(validdests) != plen:
273 plen = len(validdests)
273 plen = len(validdests)
274 succs = set(c.node() for c in validdests)
274 succs = set(c.node() for c in validdests)
275 for c in validdests:
275 for c in validdests:
276 if c.phase() > phases.public:
276 if c.phase() > phases.public:
277 # obsolescence marker does not apply to public changeset
277 # obsolescence marker does not apply to public changeset
278 succs.update(obsolete.anysuccessors(repo.obsstore,
278 succs.update(obsolete.anysuccessors(repo.obsstore,
279 c.node()))
279 c.node()))
280 validdests = set(repo.set('%ln::', succs))
280 validdests = set(repo.set('%ln::', succs))
281 validdests.remove(old)
281 validdests.remove(old)
282 return new in validdests
282 return new in validdests
283 else:
283 else:
284 return old.descendant(new)
284 return old.descendant(new)
@@ -1,508 +1,508
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import osutil, scmutil, util, parsers
9 import osutil, scmutil, util, parsers
10 import os, stat, errno
10 import os, stat, errno
11
11
12 _sha = util.sha1
12 _sha = util.sha1
13
13
14 # This avoids a collision between a file named foo and a dir named
14 # This avoids a collision between a file named foo and a dir named
15 # foo.i or foo.d
15 # foo.i or foo.d
16 def _encodedir(path):
16 def _encodedir(path):
17 '''
17 '''
18 >>> _encodedir('data/foo.i')
18 >>> _encodedir('data/foo.i')
19 'data/foo.i'
19 'data/foo.i'
20 >>> _encodedir('data/foo.i/bla.i')
20 >>> _encodedir('data/foo.i/bla.i')
21 'data/foo.i.hg/bla.i'
21 'data/foo.i.hg/bla.i'
22 >>> _encodedir('data/foo.i.hg/bla.i')
22 >>> _encodedir('data/foo.i.hg/bla.i')
23 'data/foo.i.hg.hg/bla.i'
23 'data/foo.i.hg.hg/bla.i'
24 >>> _encodedir('data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
24 >>> _encodedir('data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
25 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
25 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
26 '''
26 '''
27 return (path
27 return (path
28 .replace(".hg/", ".hg.hg/")
28 .replace(".hg/", ".hg.hg/")
29 .replace(".i/", ".i.hg/")
29 .replace(".i/", ".i.hg/")
30 .replace(".d/", ".d.hg/"))
30 .replace(".d/", ".d.hg/"))
31
31
32 encodedir = getattr(parsers, 'encodedir', _encodedir)
32 encodedir = getattr(parsers, 'encodedir', _encodedir)
33
33
34 def decodedir(path):
34 def decodedir(path):
35 '''
35 '''
36 >>> decodedir('data/foo.i')
36 >>> decodedir('data/foo.i')
37 'data/foo.i'
37 'data/foo.i'
38 >>> decodedir('data/foo.i.hg/bla.i')
38 >>> decodedir('data/foo.i.hg/bla.i')
39 'data/foo.i/bla.i'
39 'data/foo.i/bla.i'
40 >>> decodedir('data/foo.i.hg.hg/bla.i')
40 >>> decodedir('data/foo.i.hg.hg/bla.i')
41 'data/foo.i.hg/bla.i'
41 'data/foo.i.hg/bla.i'
42 '''
42 '''
43 if ".hg/" not in path:
43 if ".hg/" not in path:
44 return path
44 return path
45 return (path
45 return (path
46 .replace(".d.hg/", ".d/")
46 .replace(".d.hg/", ".d/")
47 .replace(".i.hg/", ".i/")
47 .replace(".i.hg/", ".i/")
48 .replace(".hg.hg/", ".hg/"))
48 .replace(".hg.hg/", ".hg/"))
49
49
50 def _buildencodefun():
50 def _buildencodefun():
51 '''
51 '''
52 >>> enc, dec = _buildencodefun()
52 >>> enc, dec = _buildencodefun()
53
53
54 >>> enc('nothing/special.txt')
54 >>> enc('nothing/special.txt')
55 'nothing/special.txt'
55 'nothing/special.txt'
56 >>> dec('nothing/special.txt')
56 >>> dec('nothing/special.txt')
57 'nothing/special.txt'
57 'nothing/special.txt'
58
58
59 >>> enc('HELLO')
59 >>> enc('HELLO')
60 '_h_e_l_l_o'
60 '_h_e_l_l_o'
61 >>> dec('_h_e_l_l_o')
61 >>> dec('_h_e_l_l_o')
62 'HELLO'
62 'HELLO'
63
63
64 >>> enc('hello:world?')
64 >>> enc('hello:world?')
65 'hello~3aworld~3f'
65 'hello~3aworld~3f'
66 >>> dec('hello~3aworld~3f')
66 >>> dec('hello~3aworld~3f')
67 'hello:world?'
67 'hello:world?'
68
68
69 >>> enc('the\x07quick\xADshot')
69 >>> enc('the\x07quick\xADshot')
70 'the~07quick~adshot'
70 'the~07quick~adshot'
71 >>> dec('the~07quick~adshot')
71 >>> dec('the~07quick~adshot')
72 'the\\x07quick\\xadshot'
72 'the\\x07quick\\xadshot'
73 '''
73 '''
74 e = '_'
74 e = '_'
75 winreserved = [ord(x) for x in '\\:*?"<>|']
75 winreserved = [ord(x) for x in '\\:*?"<>|']
76 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
76 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
77 for x in (range(32) + range(126, 256) + winreserved):
77 for x in (range(32) + range(126, 256) + winreserved):
78 cmap[chr(x)] = "~%02x" % x
78 cmap[chr(x)] = "~%02x" % x
79 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
79 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
80 cmap[chr(x)] = e + chr(x).lower()
80 cmap[chr(x)] = e + chr(x).lower()
81 dmap = {}
81 dmap = {}
82 for k, v in cmap.iteritems():
82 for k, v in cmap.iteritems():
83 dmap[v] = k
83 dmap[v] = k
84 def decode(s):
84 def decode(s):
85 i = 0
85 i = 0
86 while i < len(s):
86 while i < len(s):
87 for l in xrange(1, 4):
87 for l in xrange(1, 4):
88 try:
88 try:
89 yield dmap[s[i:i + l]]
89 yield dmap[s[i:i + l]]
90 i += l
90 i += l
91 break
91 break
92 except KeyError:
92 except KeyError:
93 pass
93 pass
94 else:
94 else:
95 raise KeyError
95 raise KeyError
96 return (lambda s: ''.join([cmap[c] for c in s]),
96 return (lambda s: ''.join([cmap[c] for c in s]),
97 lambda s: ''.join(list(decode(s))))
97 lambda s: ''.join(list(decode(s))))
98
98
99 _encodefname, _decodefname = _buildencodefun()
99 _encodefname, _decodefname = _buildencodefun()
100
100
101 def encodefilename(s):
101 def encodefilename(s):
102 '''
102 '''
103 >>> encodefilename('foo.i/bar.d/bla.hg/hi:world?/HELLO')
103 >>> encodefilename('foo.i/bar.d/bla.hg/hi:world?/HELLO')
104 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
104 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
105 '''
105 '''
106 return _encodefname(encodedir(s))
106 return _encodefname(encodedir(s))
107
107
108 def decodefilename(s):
108 def decodefilename(s):
109 '''
109 '''
110 >>> decodefilename('foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
110 >>> decodefilename('foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
111 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
111 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
112 '''
112 '''
113 return decodedir(_decodefname(s))
113 return decodedir(_decodefname(s))
114
114
115 def _buildlowerencodefun():
115 def _buildlowerencodefun():
116 '''
116 '''
117 >>> f = _buildlowerencodefun()
117 >>> f = _buildlowerencodefun()
118 >>> f('nothing/special.txt')
118 >>> f('nothing/special.txt')
119 'nothing/special.txt'
119 'nothing/special.txt'
120 >>> f('HELLO')
120 >>> f('HELLO')
121 'hello'
121 'hello'
122 >>> f('hello:world?')
122 >>> f('hello:world?')
123 'hello~3aworld~3f'
123 'hello~3aworld~3f'
124 >>> f('the\x07quick\xADshot')
124 >>> f('the\x07quick\xADshot')
125 'the~07quick~adshot'
125 'the~07quick~adshot'
126 '''
126 '''
127 winreserved = [ord(x) for x in '\\:*?"<>|']
127 winreserved = [ord(x) for x in '\\:*?"<>|']
128 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
128 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
129 for x in (range(32) + range(126, 256) + winreserved):
129 for x in (range(32) + range(126, 256) + winreserved):
130 cmap[chr(x)] = "~%02x" % x
130 cmap[chr(x)] = "~%02x" % x
131 for x in range(ord("A"), ord("Z")+1):
131 for x in range(ord("A"), ord("Z")+1):
132 cmap[chr(x)] = chr(x).lower()
132 cmap[chr(x)] = chr(x).lower()
133 return lambda s: "".join([cmap[c] for c in s])
133 return lambda s: "".join([cmap[c] for c in s])
134
134
135 lowerencode = _buildlowerencodefun()
135 lowerencode = _buildlowerencodefun()
136
136
137 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
137 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
138 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
138 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
139 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
139 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
140 def _auxencode(path, dotencode):
140 def _auxencode(path, dotencode):
141 '''
141 '''
142 Encodes filenames containing names reserved by Windows or which end in
142 Encodes filenames containing names reserved by Windows or which end in
143 period or space. Does not touch other single reserved characters c.
143 period or space. Does not touch other single reserved characters c.
144 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
144 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
145 Additionally encodes space or period at the beginning, if dotencode is
145 Additionally encodes space or period at the beginning, if dotencode is
146 True. Parameter path is assumed to be all lowercase.
146 True. Parameter path is assumed to be all lowercase.
147 A segment only needs encoding if a reserved name appears as a
147 A segment only needs encoding if a reserved name appears as a
148 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
148 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
149 doesn't need encoding.
149 doesn't need encoding.
150
150
151 >>> s = '.foo/aux.txt/txt.aux/con/prn/nul/foo.'
151 >>> s = '.foo/aux.txt/txt.aux/con/prn/nul/foo.'
152 >>> _auxencode(s.split('/'), True)
152 >>> _auxencode(s.split('/'), True)
153 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
153 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
154 >>> s = '.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
154 >>> s = '.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
155 >>> _auxencode(s.split('/'), False)
155 >>> _auxencode(s.split('/'), False)
156 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
156 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
157 >>> _auxencode(['foo. '], True)
157 >>> _auxencode(['foo. '], True)
158 ['foo.~20']
158 ['foo.~20']
159 >>> _auxencode([' .foo'], True)
159 >>> _auxencode([' .foo'], True)
160 ['~20.foo']
160 ['~20.foo']
161 '''
161 '''
162 for i, n in enumerate(path):
162 for i, n in enumerate(path):
163 if not n:
163 if not n:
164 continue
164 continue
165 if dotencode and n[0] in '. ':
165 if dotencode and n[0] in '. ':
166 n = "~%02x" % ord(n[0]) + n[1:]
166 n = "~%02x" % ord(n[0]) + n[1:]
167 path[i] = n
167 path[i] = n
168 else:
168 else:
169 l = n.find('.')
169 l = n.find('.')
170 if l == -1:
170 if l == -1:
171 l = len(n)
171 l = len(n)
172 if ((l == 3 and n[:3] in _winres3) or
172 if ((l == 3 and n[:3] in _winres3) or
173 (l == 4 and n[3] <= '9' and n[3] >= '1'
173 (l == 4 and n[3] <= '9' and n[3] >= '1'
174 and n[:3] in _winres4)):
174 and n[:3] in _winres4)):
175 # encode third letter ('aux' -> 'au~78')
175 # encode third letter ('aux' -> 'au~78')
176 ec = "~%02x" % ord(n[2])
176 ec = "~%02x" % ord(n[2])
177 n = n[0:2] + ec + n[3:]
177 n = n[0:2] + ec + n[3:]
178 path[i] = n
178 path[i] = n
179 if n[-1] in '. ':
179 if n[-1] in '. ':
180 # encode last period or space ('foo...' -> 'foo..~2e')
180 # encode last period or space ('foo...' -> 'foo..~2e')
181 path[i] = n[:-1] + "~%02x" % ord(n[-1])
181 path[i] = n[:-1] + "~%02x" % ord(n[-1])
182 return path
182 return path
183
183
184 _maxstorepathlen = 120
184 _maxstorepathlen = 120
185 _dirprefixlen = 8
185 _dirprefixlen = 8
186 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
186 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
187
187
188 def _hashencode(path, dotencode):
188 def _hashencode(path, dotencode):
189 digest = _sha(path).hexdigest()
189 digest = _sha(path).hexdigest()
190 le = lowerencode(path).split('/')[1:]
190 le = lowerencode(path).split('/')[1:]
191 parts = _auxencode(le, dotencode)
191 parts = _auxencode(le, dotencode)
192 basename = parts[-1]
192 basename = parts[-1]
193 _root, ext = os.path.splitext(basename)
193 _root, ext = os.path.splitext(basename)
194 sdirs = []
194 sdirs = []
195 sdirslen = 0
195 sdirslen = 0
196 for p in parts[:-1]:
196 for p in parts[:-1]:
197 d = p[:_dirprefixlen]
197 d = p[:_dirprefixlen]
198 if d[-1] in '. ':
198 if d[-1] in '. ':
199 # Windows can't access dirs ending in period or space
199 # Windows can't access dirs ending in period or space
200 d = d[:-1] + '_'
200 d = d[:-1] + '_'
201 if sdirslen == 0:
201 if sdirslen == 0:
202 t = len(d)
202 t = len(d)
203 else:
203 else:
204 t = sdirslen + 1 + len(d)
204 t = sdirslen + 1 + len(d)
205 if t > _maxshortdirslen:
205 if t > _maxshortdirslen:
206 break
206 break
207 sdirs.append(d)
207 sdirs.append(d)
208 sdirslen = t
208 sdirslen = t
209 dirs = '/'.join(sdirs)
209 dirs = '/'.join(sdirs)
210 if len(dirs) > 0:
210 if len(dirs) > 0:
211 dirs += '/'
211 dirs += '/'
212 res = 'dh/' + dirs + digest + ext
212 res = 'dh/' + dirs + digest + ext
213 spaceleft = _maxstorepathlen - len(res)
213 spaceleft = _maxstorepathlen - len(res)
214 if spaceleft > 0:
214 if spaceleft > 0:
215 filler = basename[:spaceleft]
215 filler = basename[:spaceleft]
216 res = 'dh/' + dirs + filler + digest + ext
216 res = 'dh/' + dirs + filler + digest + ext
217 return res
217 return res
218
218
219 def _hybridencode(path, dotencode):
219 def _hybridencode(path, dotencode):
220 '''encodes path with a length limit
220 '''encodes path with a length limit
221
221
222 Encodes all paths that begin with 'data/', according to the following.
222 Encodes all paths that begin with 'data/', according to the following.
223
223
224 Default encoding (reversible):
224 Default encoding (reversible):
225
225
226 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
226 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
227 characters are encoded as '~xx', where xx is the two digit hex code
227 characters are encoded as '~xx', where xx is the two digit hex code
228 of the character (see encodefilename).
228 of the character (see encodefilename).
229 Relevant path components consisting of Windows reserved filenames are
229 Relevant path components consisting of Windows reserved filenames are
230 masked by encoding the third character ('aux' -> 'au~78', see auxencode).
230 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
231
231
232 Hashed encoding (not reversible):
232 Hashed encoding (not reversible):
233
233
234 If the default-encoded path is longer than _maxstorepathlen, a
234 If the default-encoded path is longer than _maxstorepathlen, a
235 non-reversible hybrid hashing of the path is done instead.
235 non-reversible hybrid hashing of the path is done instead.
236 This encoding uses up to _dirprefixlen characters of all directory
236 This encoding uses up to _dirprefixlen characters of all directory
237 levels of the lowerencoded path, but not more levels than can fit into
237 levels of the lowerencoded path, but not more levels than can fit into
238 _maxshortdirslen.
238 _maxshortdirslen.
239 Then follows the filler followed by the sha digest of the full path.
239 Then follows the filler followed by the sha digest of the full path.
240 The filler is the beginning of the basename of the lowerencoded path
240 The filler is the beginning of the basename of the lowerencoded path
241 (the basename is everything after the last path separator). The filler
241 (the basename is everything after the last path separator). The filler
242 is as long as possible, filling in characters from the basename until
242 is as long as possible, filling in characters from the basename until
243 the encoded path has _maxstorepathlen characters (or all chars of the
243 the encoded path has _maxstorepathlen characters (or all chars of the
244 basename have been taken).
244 basename have been taken).
245 The extension (e.g. '.i' or '.d') is preserved.
245 The extension (e.g. '.i' or '.d') is preserved.
246
246
247 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
247 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
248 encoding was used.
248 encoding was used.
249 '''
249 '''
250 path = encodedir(path)
250 path = encodedir(path)
251 ef = _encodefname(path).split('/')
251 ef = _encodefname(path).split('/')
252 res = '/'.join(_auxencode(ef, dotencode))
252 res = '/'.join(_auxencode(ef, dotencode))
253 if len(res) > _maxstorepathlen:
253 if len(res) > _maxstorepathlen:
254 res = _hashencode(path, dotencode)
254 res = _hashencode(path, dotencode)
255 return res
255 return res
256
256
257 def _pathencode(path):
257 def _pathencode(path):
258 if len(path) > _maxstorepathlen:
258 if len(path) > _maxstorepathlen:
259 return None
259 return None
260 ef = _encodefname(encodedir(path)).split('/')
260 ef = _encodefname(encodedir(path)).split('/')
261 res = '/'.join(_auxencode(ef, True))
261 res = '/'.join(_auxencode(ef, True))
262 if len(res) > _maxstorepathlen:
262 if len(res) > _maxstorepathlen:
263 return None
263 return None
264 return res
264 return res
265
265
266 _pathencode = getattr(parsers, 'pathencode', _pathencode)
266 _pathencode = getattr(parsers, 'pathencode', _pathencode)
267
267
268 def _dothybridencode(f):
268 def _dothybridencode(f):
269 ef = _pathencode(f)
269 ef = _pathencode(f)
270 if ef is None:
270 if ef is None:
271 return _hashencode(encodedir(f), True)
271 return _hashencode(encodedir(f), True)
272 return ef
272 return ef
273
273
274 def _plainhybridencode(f):
274 def _plainhybridencode(f):
275 return _hybridencode(f, False)
275 return _hybridencode(f, False)
276
276
277 def _calcmode(vfs):
277 def _calcmode(vfs):
278 try:
278 try:
279 # files in .hg/ will be created using this mode
279 # files in .hg/ will be created using this mode
280 mode = vfs.stat().st_mode
280 mode = vfs.stat().st_mode
281 # avoid some useless chmods
281 # avoid some useless chmods
282 if (0777 & ~util.umask) == (0777 & mode):
282 if (0777 & ~util.umask) == (0777 & mode):
283 mode = None
283 mode = None
284 except OSError:
284 except OSError:
285 mode = None
285 mode = None
286 return mode
286 return mode
287
287
288 _data = ('data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
288 _data = ('data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
289 ' phaseroots obsstore')
289 ' phaseroots obsstore')
290
290
291 class basicstore(object):
291 class basicstore(object):
292 '''base class for local repository stores'''
292 '''base class for local repository stores'''
293 def __init__(self, path, vfstype):
293 def __init__(self, path, vfstype):
294 vfs = vfstype(path)
294 vfs = vfstype(path)
295 self.path = vfs.base
295 self.path = vfs.base
296 self.createmode = _calcmode(vfs)
296 self.createmode = _calcmode(vfs)
297 vfs.createmode = self.createmode
297 vfs.createmode = self.createmode
298 self.rawvfs = vfs
298 self.rawvfs = vfs
299 self.vfs = scmutil.filtervfs(vfs, encodedir)
299 self.vfs = scmutil.filtervfs(vfs, encodedir)
300 self.opener = self.vfs
300 self.opener = self.vfs
301
301
302 def join(self, f):
302 def join(self, f):
303 return self.path + '/' + encodedir(f)
303 return self.path + '/' + encodedir(f)
304
304
305 def _walk(self, relpath, recurse):
305 def _walk(self, relpath, recurse):
306 '''yields (unencoded, encoded, size)'''
306 '''yields (unencoded, encoded, size)'''
307 path = self.path
307 path = self.path
308 if relpath:
308 if relpath:
309 path += '/' + relpath
309 path += '/' + relpath
310 striplen = len(self.path) + 1
310 striplen = len(self.path) + 1
311 l = []
311 l = []
312 if self.rawvfs.isdir(path):
312 if self.rawvfs.isdir(path):
313 visit = [path]
313 visit = [path]
314 while visit:
314 while visit:
315 p = visit.pop()
315 p = visit.pop()
316 for f, kind, st in osutil.listdir(p, stat=True):
316 for f, kind, st in osutil.listdir(p, stat=True):
317 fp = p + '/' + f
317 fp = p + '/' + f
318 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
318 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
319 n = util.pconvert(fp[striplen:])
319 n = util.pconvert(fp[striplen:])
320 l.append((decodedir(n), n, st.st_size))
320 l.append((decodedir(n), n, st.st_size))
321 elif kind == stat.S_IFDIR and recurse:
321 elif kind == stat.S_IFDIR and recurse:
322 visit.append(fp)
322 visit.append(fp)
323 l.sort()
323 l.sort()
324 return l
324 return l
325
325
326 def datafiles(self):
326 def datafiles(self):
327 return self._walk('data', True)
327 return self._walk('data', True)
328
328
329 def walk(self):
329 def walk(self):
330 '''yields (unencoded, encoded, size)'''
330 '''yields (unencoded, encoded, size)'''
331 # yield data files first
331 # yield data files first
332 for x in self.datafiles():
332 for x in self.datafiles():
333 yield x
333 yield x
334 # yield manifest before changelog
334 # yield manifest before changelog
335 for x in reversed(self._walk('', False)):
335 for x in reversed(self._walk('', False)):
336 yield x
336 yield x
337
337
338 def copylist(self):
338 def copylist(self):
339 return ['requires'] + _data.split()
339 return ['requires'] + _data.split()
340
340
341 def write(self):
341 def write(self):
342 pass
342 pass
343
343
344 class encodedstore(basicstore):
344 class encodedstore(basicstore):
345 def __init__(self, path, vfstype):
345 def __init__(self, path, vfstype):
346 vfs = vfstype(path + '/store')
346 vfs = vfstype(path + '/store')
347 self.path = vfs.base
347 self.path = vfs.base
348 self.createmode = _calcmode(vfs)
348 self.createmode = _calcmode(vfs)
349 vfs.createmode = self.createmode
349 vfs.createmode = self.createmode
350 self.rawvfs = vfs
350 self.rawvfs = vfs
351 self.vfs = scmutil.filtervfs(vfs, encodefilename)
351 self.vfs = scmutil.filtervfs(vfs, encodefilename)
352 self.opener = self.vfs
352 self.opener = self.vfs
353
353
354 def datafiles(self):
354 def datafiles(self):
355 for a, b, size in self._walk('data', True):
355 for a, b, size in self._walk('data', True):
356 try:
356 try:
357 a = decodefilename(a)
357 a = decodefilename(a)
358 except KeyError:
358 except KeyError:
359 a = None
359 a = None
360 yield a, b, size
360 yield a, b, size
361
361
362 def join(self, f):
362 def join(self, f):
363 return self.path + '/' + encodefilename(f)
363 return self.path + '/' + encodefilename(f)
364
364
365 def copylist(self):
365 def copylist(self):
366 return (['requires', '00changelog.i'] +
366 return (['requires', '00changelog.i'] +
367 ['store/' + f for f in _data.split()])
367 ['store/' + f for f in _data.split()])
368
368
369 class fncache(object):
369 class fncache(object):
370 # the filename used to be partially encoded
370 # the filename used to be partially encoded
371 # hence the encodedir/decodedir dance
371 # hence the encodedir/decodedir dance
372 def __init__(self, vfs):
372 def __init__(self, vfs):
373 self.vfs = vfs
373 self.vfs = vfs
374 self.entries = None
374 self.entries = None
375 self._dirty = False
375 self._dirty = False
376
376
377 def _load(self):
377 def _load(self):
378 '''fill the entries from the fncache file'''
378 '''fill the entries from the fncache file'''
379 self._dirty = False
379 self._dirty = False
380 try:
380 try:
381 fp = self.vfs('fncache', mode='rb')
381 fp = self.vfs('fncache', mode='rb')
382 except IOError:
382 except IOError:
383 # skip nonexistent file
383 # skip nonexistent file
384 self.entries = set()
384 self.entries = set()
385 return
385 return
386 self.entries = set(decodedir(fp.read()).splitlines())
386 self.entries = set(decodedir(fp.read()).splitlines())
387 if '' in self.entries:
387 if '' in self.entries:
388 fp.seek(0)
388 fp.seek(0)
389 for n, line in enumerate(fp):
389 for n, line in enumerate(fp):
390 if not line.rstrip('\n'):
390 if not line.rstrip('\n'):
391 t = _('invalid entry in fncache, line %s') % (n + 1)
391 t = _('invalid entry in fncache, line %s') % (n + 1)
392 raise util.Abort(t)
392 raise util.Abort(t)
393 fp.close()
393 fp.close()
394
394
395 def _write(self, files, atomictemp):
395 def _write(self, files, atomictemp):
396 fp = self.vfs('fncache', mode='wb', atomictemp=atomictemp)
396 fp = self.vfs('fncache', mode='wb', atomictemp=atomictemp)
397 if files:
397 if files:
398 fp.write(encodedir('\n'.join(files) + '\n'))
398 fp.write(encodedir('\n'.join(files) + '\n'))
399 fp.close()
399 fp.close()
400 self._dirty = False
400 self._dirty = False
401
401
402 def rewrite(self, files):
402 def rewrite(self, files):
403 self._write(files, False)
403 self._write(files, False)
404 self.entries = set(files)
404 self.entries = set(files)
405
405
406 def write(self):
406 def write(self):
407 if self._dirty:
407 if self._dirty:
408 self._write(self.entries, True)
408 self._write(self.entries, True)
409
409
410 def add(self, fn):
410 def add(self, fn):
411 if self.entries is None:
411 if self.entries is None:
412 self._load()
412 self._load()
413 if fn not in self.entries:
413 if fn not in self.entries:
414 self._dirty = True
414 self._dirty = True
415 self.entries.add(fn)
415 self.entries.add(fn)
416
416
417 def __contains__(self, fn):
417 def __contains__(self, fn):
418 if self.entries is None:
418 if self.entries is None:
419 self._load()
419 self._load()
420 return fn in self.entries
420 return fn in self.entries
421
421
422 def __iter__(self):
422 def __iter__(self):
423 if self.entries is None:
423 if self.entries is None:
424 self._load()
424 self._load()
425 return iter(self.entries)
425 return iter(self.entries)
426
426
427 class _fncachevfs(scmutil.abstractvfs):
427 class _fncachevfs(scmutil.abstractvfs):
428 def __init__(self, vfs, fnc, encode):
428 def __init__(self, vfs, fnc, encode):
429 self.vfs = vfs
429 self.vfs = vfs
430 self.fncache = fnc
430 self.fncache = fnc
431 self.encode = encode
431 self.encode = encode
432
432
433 def _getmustaudit(self):
433 def _getmustaudit(self):
434 return self.vfs.mustaudit
434 return self.vfs.mustaudit
435
435
436 def _setmustaudit(self, onoff):
436 def _setmustaudit(self, onoff):
437 self.vfs.mustaudit = onoff
437 self.vfs.mustaudit = onoff
438
438
439 mustaudit = property(_getmustaudit, _setmustaudit)
439 mustaudit = property(_getmustaudit, _setmustaudit)
440
440
441 def __call__(self, path, mode='r', *args, **kw):
441 def __call__(self, path, mode='r', *args, **kw):
442 if mode not in ('r', 'rb') and path.startswith('data/'):
442 if mode not in ('r', 'rb') and path.startswith('data/'):
443 self.fncache.add(path)
443 self.fncache.add(path)
444 return self.vfs(self.encode(path), mode, *args, **kw)
444 return self.vfs(self.encode(path), mode, *args, **kw)
445
445
446 def join(self, path):
446 def join(self, path):
447 if path:
447 if path:
448 return self.vfs.join(self.encode(path))
448 return self.vfs.join(self.encode(path))
449 else:
449 else:
450 return self.vfs.join(path)
450 return self.vfs.join(path)
451
451
452 class fncachestore(basicstore):
452 class fncachestore(basicstore):
453 def __init__(self, path, vfstype, dotencode):
453 def __init__(self, path, vfstype, dotencode):
454 if dotencode:
454 if dotencode:
455 encode = _dothybridencode
455 encode = _dothybridencode
456 else:
456 else:
457 encode = _plainhybridencode
457 encode = _plainhybridencode
458 self.encode = encode
458 self.encode = encode
459 vfs = vfstype(path + '/store')
459 vfs = vfstype(path + '/store')
460 self.path = vfs.base
460 self.path = vfs.base
461 self.pathsep = self.path + '/'
461 self.pathsep = self.path + '/'
462 self.createmode = _calcmode(vfs)
462 self.createmode = _calcmode(vfs)
463 vfs.createmode = self.createmode
463 vfs.createmode = self.createmode
464 self.rawvfs = vfs
464 self.rawvfs = vfs
465 fnc = fncache(vfs)
465 fnc = fncache(vfs)
466 self.fncache = fnc
466 self.fncache = fnc
467 self.vfs = _fncachevfs(vfs, fnc, encode)
467 self.vfs = _fncachevfs(vfs, fnc, encode)
468 self.opener = self.vfs
468 self.opener = self.vfs
469
469
470 def join(self, f):
470 def join(self, f):
471 return self.pathsep + self.encode(f)
471 return self.pathsep + self.encode(f)
472
472
473 def getsize(self, path):
473 def getsize(self, path):
474 return self.rawvfs.stat(path).st_size
474 return self.rawvfs.stat(path).st_size
475
475
476 def datafiles(self):
476 def datafiles(self):
477 rewrite = False
477 rewrite = False
478 existing = []
478 existing = []
479 for f in sorted(self.fncache):
479 for f in sorted(self.fncache):
480 ef = self.encode(f)
480 ef = self.encode(f)
481 try:
481 try:
482 yield f, ef, self.getsize(ef)
482 yield f, ef, self.getsize(ef)
483 existing.append(f)
483 existing.append(f)
484 except OSError, err:
484 except OSError, err:
485 if err.errno != errno.ENOENT:
485 if err.errno != errno.ENOENT:
486 raise
486 raise
487 # nonexistent entry
487 # nonexistent entry
488 rewrite = True
488 rewrite = True
489 if rewrite:
489 if rewrite:
490 # rewrite fncache to remove nonexistent entries
490 # rewrite fncache to remove nonexistent entries
491 # (may be caused by rollback / strip)
491 # (may be caused by rollback / strip)
492 self.fncache.rewrite(existing)
492 self.fncache.rewrite(existing)
493
493
494 def copylist(self):
494 def copylist(self):
495 d = ('data dh fncache phaseroots obsstore'
495 d = ('data dh fncache phaseroots obsstore'
496 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
496 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
497 return (['requires', '00changelog.i'] +
497 return (['requires', '00changelog.i'] +
498 ['store/' + f for f in d.split()])
498 ['store/' + f for f in d.split()])
499
499
500 def write(self):
500 def write(self):
501 self.fncache.write()
501 self.fncache.write()
502
502
503 def store(requirements, path, vfstype):
503 def store(requirements, path, vfstype):
504 if 'store' in requirements:
504 if 'store' in requirements:
505 if 'fncache' in requirements:
505 if 'fncache' in requirements:
506 return fncachestore(path, vfstype, 'dotencode' in requirements)
506 return fncachestore(path, vfstype, 'dotencode' in requirements)
507 return encodedstore(path, vfstype)
507 return encodedstore(path, vfstype)
508 return basicstore(path, vfstype)
508 return basicstore(path, vfstype)
General Comments 0
You need to be logged in to leave comments. Login now