##// END OF EJS Templates
replace xrange(0, n) with xrange(n)
Martin Geisler -
r8624:2b3dec0e default
parent child Browse files
Show More
@@ -1,270 +1,270
1 1 # color.py color output for the status and qseries commands
2 2 #
3 3 # Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com>
4 4 #
5 5 # This program is free software; you can redistribute it and/or modify it
6 6 # under the terms of the GNU General Public License as published by the
7 7 # Free Software Foundation; either version 2 of the License, or (at your
8 8 # option) any later version.
9 9 #
10 10 # This program is distributed in the hope that it will be useful, but
11 11 # WITHOUT ANY WARRANTY; without even the implied warranty of
12 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
13 13 # Public License for more details.
14 14 #
15 15 # You should have received a copy of the GNU General Public License along
16 16 # with this program; if not, write to the Free Software Foundation, Inc.,
17 17 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 18
19 19 '''add color output to status, qseries, and diff-related commands
20 20
21 21 This extension modifies the status command to add color to its output
22 22 to reflect file status, the qseries command to add color to reflect
23 23 patch status (applied, unapplied, missing), and to diff-related
24 24 commands to highlight additions, removals, diff headers, and trailing
25 25 whitespace.
26 26
27 27 Other effects in addition to color, like bold and underlined text, are
28 28 also available. Effects are rendered with the ECMA-48 SGR control
29 29 function (aka ANSI escape codes). This module also provides the
30 30 render_text function, which can be used to add effects to any text.
31 31
32 32 To enable this extension, add this to your .hgrc file:
33 33 [extensions]
34 34 color =
35 35
36 36 Default effects my be overriden from the .hgrc file:
37 37
38 38 [color]
39 39 status.modified = blue bold underline red_background
40 40 status.added = green bold
41 41 status.removed = red bold blue_background
42 42 status.deleted = cyan bold underline
43 43 status.unknown = magenta bold underline
44 44 status.ignored = black bold
45 45
46 46 # 'none' turns off all effects
47 47 status.clean = none
48 48 status.copied = none
49 49
50 50 qseries.applied = blue bold underline
51 51 qseries.unapplied = black bold
52 52 qseries.missing = red bold
53 53
54 54 diff.diffline = bold
55 55 diff.extended = cyan bold
56 56 diff.file_a = red bold
57 57 diff.file_b = green bold
58 58 diff.hunk = magenta
59 59 diff.deleted = red
60 60 diff.inserted = green
61 61 diff.changed = white
62 62 diff.trailingwhitespace = bold red_background
63 63 '''
64 64
65 65 import os, sys
66 66
67 67 from mercurial import cmdutil, commands, extensions
68 68 from mercurial.i18n import _
69 69
70 70 # start and stop parameters for effects
71 71 _effect_params = {'none': 0,
72 72 'black': 30,
73 73 'red': 31,
74 74 'green': 32,
75 75 'yellow': 33,
76 76 'blue': 34,
77 77 'magenta': 35,
78 78 'cyan': 36,
79 79 'white': 37,
80 80 'bold': 1,
81 81 'italic': 3,
82 82 'underline': 4,
83 83 'inverse': 7,
84 84 'black_background': 40,
85 85 'red_background': 41,
86 86 'green_background': 42,
87 87 'yellow_background': 43,
88 88 'blue_background': 44,
89 89 'purple_background': 45,
90 90 'cyan_background': 46,
91 91 'white_background': 47}
92 92
93 93 def render_effects(text, effects):
94 94 'Wrap text in commands to turn on each effect.'
95 95 start = [str(_effect_params[e]) for e in ['none'] + effects]
96 96 start = '\033[' + ';'.join(start) + 'm'
97 97 stop = '\033[' + str(_effect_params['none']) + 'm'
98 98 return ''.join([start, text, stop])
99 99
100 100 def colorstatus(orig, ui, repo, *pats, **opts):
101 101 '''run the status command with colored output'''
102 102
103 103 delimiter = opts['print0'] and '\0' or '\n'
104 104
105 105 nostatus = opts.get('no_status')
106 106 opts['no_status'] = False
107 107 # run status and capture its output
108 108 ui.pushbuffer()
109 109 retval = orig(ui, repo, *pats, **opts)
110 110 # filter out empty strings
111 111 lines_with_status = [ line for line in ui.popbuffer().split(delimiter) if line ]
112 112
113 113 if nostatus:
114 114 lines = [l[2:] for l in lines_with_status]
115 115 else:
116 116 lines = lines_with_status
117 117
118 118 # apply color to output and display it
119 for i in xrange(0, len(lines)):
119 for i in xrange(len(lines)):
120 120 status = _status_abbreviations[lines_with_status[i][0]]
121 121 effects = _status_effects[status]
122 122 if effects:
123 123 lines[i] = render_effects(lines[i], effects)
124 124 ui.write(lines[i] + delimiter)
125 125 return retval
126 126
127 127 _status_abbreviations = { 'M': 'modified',
128 128 'A': 'added',
129 129 'R': 'removed',
130 130 '!': 'deleted',
131 131 '?': 'unknown',
132 132 'I': 'ignored',
133 133 'C': 'clean',
134 134 ' ': 'copied', }
135 135
136 136 _status_effects = { 'modified': ['blue', 'bold'],
137 137 'added': ['green', 'bold'],
138 138 'removed': ['red', 'bold'],
139 139 'deleted': ['cyan', 'bold', 'underline'],
140 140 'unknown': ['magenta', 'bold', 'underline'],
141 141 'ignored': ['black', 'bold'],
142 142 'clean': ['none'],
143 143 'copied': ['none'], }
144 144
145 145 def colorqseries(orig, ui, repo, *dummy, **opts):
146 146 '''run the qseries command with colored output'''
147 147 ui.pushbuffer()
148 148 retval = orig(ui, repo, **opts)
149 149 patches = ui.popbuffer().splitlines()
150 150 for patch in patches:
151 151 patchname = patch
152 152 if opts['summary']:
153 153 patchname = patchname.split(': ')[0]
154 154 if ui.verbose:
155 155 patchname = patchname.split(' ', 2)[-1]
156 156
157 157 if opts['missing']:
158 158 effects = _patch_effects['missing']
159 159 # Determine if patch is applied.
160 160 elif [ applied for applied in repo.mq.applied
161 161 if patchname == applied.name ]:
162 162 effects = _patch_effects['applied']
163 163 else:
164 164 effects = _patch_effects['unapplied']
165 165 ui.write(render_effects(patch, effects) + '\n')
166 166 return retval
167 167
168 168 _patch_effects = { 'applied': ['blue', 'bold', 'underline'],
169 169 'missing': ['red', 'bold'],
170 170 'unapplied': ['black', 'bold'], }
171 171
172 172 def colorwrap(orig, s):
173 173 '''wrap ui.write for colored diff output'''
174 174 lines = s.split('\n')
175 175 for i, line in enumerate(lines):
176 176 stripline = line
177 177 if line and line[0] in '+-':
178 178 # highlight trailing whitespace, but only in changed lines
179 179 stripline = line.rstrip()
180 180 for prefix, style in _diff_prefixes:
181 181 if stripline.startswith(prefix):
182 182 lines[i] = render_effects(stripline, _diff_effects[style])
183 183 break
184 184 if line != stripline:
185 185 lines[i] += render_effects(
186 186 line[len(stripline):], _diff_effects['trailingwhitespace'])
187 187 orig('\n'.join(lines))
188 188
189 189 def colorshowpatch(orig, self, node):
190 190 '''wrap cmdutil.changeset_printer.showpatch with colored output'''
191 191 oldwrite = extensions.wrapfunction(self.ui, 'write', colorwrap)
192 192 try:
193 193 orig(self, node)
194 194 finally:
195 195 self.ui.write = oldwrite
196 196
197 197 def colordiff(orig, ui, repo, *pats, **opts):
198 198 '''run the diff command with colored output'''
199 199 oldwrite = extensions.wrapfunction(ui, 'write', colorwrap)
200 200 try:
201 201 orig(ui, repo, *pats, **opts)
202 202 finally:
203 203 ui.write = oldwrite
204 204
205 205 _diff_prefixes = [('diff', 'diffline'),
206 206 ('copy', 'extended'),
207 207 ('rename', 'extended'),
208 208 ('old', 'extended'),
209 209 ('new', 'extended'),
210 210 ('deleted', 'extended'),
211 211 ('---', 'file_a'),
212 212 ('+++', 'file_b'),
213 213 ('@', 'hunk'),
214 214 ('-', 'deleted'),
215 215 ('+', 'inserted')]
216 216
217 217 _diff_effects = {'diffline': ['bold',],
218 218 'extended': ['cyan', 'bold'],
219 219 'file_a': ['red', 'bold'],
220 220 'file_b': ['green', 'bold'],
221 221 'hunk': ['magenta',],
222 222 'deleted': ['red',],
223 223 'inserted': ['green',],
224 224 'changed': ['white',],
225 225 'trailingwhitespace': ['bold', 'red_background'],}
226 226
227 227 def uisetup(ui):
228 228 '''Initialize the extension.'''
229 229 _setupcmd(ui, 'diff', commands.table, colordiff, _diff_effects)
230 230 _setupcmd(ui, 'incoming', commands.table, None, _diff_effects)
231 231 _setupcmd(ui, 'log', commands.table, None, _diff_effects)
232 232 _setupcmd(ui, 'outgoing', commands.table, None, _diff_effects)
233 233 _setupcmd(ui, 'tip', commands.table, None, _diff_effects)
234 234 _setupcmd(ui, 'status', commands.table, colorstatus, _status_effects)
235 235 try:
236 236 mq = extensions.find('mq')
237 237 _setupcmd(ui, 'qdiff', mq.cmdtable, colordiff, _diff_effects)
238 238 _setupcmd(ui, 'qseries', mq.cmdtable, colorqseries, _patch_effects)
239 239 except KeyError:
240 240 # The mq extension is not enabled
241 241 pass
242 242
243 243 def _setupcmd(ui, cmd, table, func, effectsmap):
244 244 '''patch in command to command table and load effect map'''
245 245 def nocolor(orig, *args, **opts):
246 246
247 247 if (opts['no_color'] or opts['color'] == 'never' or
248 248 (opts['color'] == 'auto' and (os.environ.get('TERM') == 'dumb'
249 249 or not sys.__stdout__.isatty()))):
250 250 return orig(*args, **opts)
251 251
252 252 oldshowpatch = extensions.wrapfunction(cmdutil.changeset_printer,
253 253 'showpatch', colorshowpatch)
254 254 try:
255 255 if func is not None:
256 256 return func(orig, *args, **opts)
257 257 return orig(*args, **opts)
258 258 finally:
259 259 cmdutil.changeset_printer.showpatch = oldshowpatch
260 260
261 261 entry = extensions.wrapcommand(table, cmd, nocolor)
262 262 entry[1].extend([
263 263 ('', 'color', 'auto', _("when to colorize (always, auto, or never)")),
264 264 ('', 'no-color', None, _("don't colorize output")),
265 265 ])
266 266
267 267 for status in effectsmap:
268 268 effects = ui.configlist('color', cmd + '.' + status)
269 269 if effects:
270 270 effectsmap[status] = effects
@@ -1,359 +1,359
1 1 # Minimal support for git commands on an hg repository
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 '''browsing the repository in a graphical way
9 9
10 10 The hgk extension allows browsing the history of a repository in a
11 11 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
12 12 distributed with Mercurial.)
13 13
14 14 hgk consists of two parts: a Tcl script that does the displaying and
15 15 querying of information, and an extension to mercurial named hgk.py,
16 16 which provides hooks for hgk to get information. hgk can be found in
17 17 the contrib directory, and hgk.py can be found in the hgext directory.
18 18
19 19 To load the hgext.py extension, add it to your .hgrc file (you have to
20 20 use your global $HOME/.hgrc file, not one in a repository). You can
21 21 specify an absolute path:
22 22
23 23 [extensions]
24 24 hgk=/usr/local/lib/hgk.py
25 25
26 26 Mercurial can also scan the default python library path for a file
27 27 named 'hgk.py' if you set hgk empty:
28 28
29 29 [extensions]
30 30 hgk=
31 31
32 32 The hg view command will launch the hgk Tcl script. For this command
33 33 to work, hgk must be in your search path. Alternately, you can specify
34 34 the path to hgk in your .hgrc file:
35 35
36 36 [hgk]
37 37 path=/location/of/hgk
38 38
39 39 hgk can make use of the extdiff extension to visualize revisions.
40 40 Assuming you had already configured extdiff vdiff command, just add:
41 41
42 42 [hgk]
43 43 vdiff=vdiff
44 44
45 45 Revisions context menu will now display additional entries to fire
46 46 vdiff on hovered and selected revisions.'''
47 47
48 48 import os
49 49 from mercurial import commands, util, patch, revlog, cmdutil
50 50 from mercurial.node import nullid, nullrev, short
51 51 from mercurial.i18n import _
52 52
53 53 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
54 54 """diff trees from two commits"""
55 55 def __difftree(repo, node1, node2, files=[]):
56 56 assert node2 is not None
57 57 mmap = repo[node1].manifest()
58 58 mmap2 = repo[node2].manifest()
59 59 m = cmdutil.match(repo, files)
60 60 modified, added, removed = repo.status(node1, node2, m)[:3]
61 61 empty = short(nullid)
62 62
63 63 for f in modified:
64 64 # TODO get file permissions
65 65 ui.write(":100664 100664 %s %s M\t%s\t%s\n" %
66 66 (short(mmap[f]), short(mmap2[f]), f, f))
67 67 for f in added:
68 68 ui.write(":000000 100664 %s %s N\t%s\t%s\n" %
69 69 (empty, short(mmap2[f]), f, f))
70 70 for f in removed:
71 71 ui.write(":100664 000000 %s %s D\t%s\t%s\n" %
72 72 (short(mmap[f]), empty, f, f))
73 73 ##
74 74
75 75 while True:
76 76 if opts['stdin']:
77 77 try:
78 78 line = raw_input().split(' ')
79 79 node1 = line[0]
80 80 if len(line) > 1:
81 81 node2 = line[1]
82 82 else:
83 83 node2 = None
84 84 except EOFError:
85 85 break
86 86 node1 = repo.lookup(node1)
87 87 if node2:
88 88 node2 = repo.lookup(node2)
89 89 else:
90 90 node2 = node1
91 91 node1 = repo.changelog.parents(node1)[0]
92 92 if opts['patch']:
93 93 if opts['pretty']:
94 94 catcommit(ui, repo, node2, "")
95 95 m = cmdutil.match(repo, files)
96 96 chunks = patch.diff(repo, node1, node2, match=m,
97 97 opts=patch.diffopts(ui, {'git': True}))
98 98 for chunk in chunks:
99 99 ui.write(chunk)
100 100 else:
101 101 __difftree(repo, node1, node2, files=files)
102 102 if not opts['stdin']:
103 103 break
104 104
105 105 def catcommit(ui, repo, n, prefix, ctx=None):
106 106 nlprefix = '\n' + prefix;
107 107 if ctx is None:
108 108 ctx = repo[n]
109 109 ui.write("tree %s\n" % short(ctx.changeset()[0])) # use ctx.node() instead ??
110 110 for p in ctx.parents():
111 111 ui.write("parent %s\n" % p)
112 112
113 113 date = ctx.date()
114 114 description = ctx.description().replace("\0", "")
115 115 lines = description.splitlines()
116 116 if lines and lines[-1].startswith('committer:'):
117 117 committer = lines[-1].split(': ')[1].rstrip()
118 118 else:
119 119 committer = ctx.user()
120 120
121 121 ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
122 122 ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
123 123 ui.write("revision %d\n" % ctx.rev())
124 124 ui.write("branch %s\n\n" % ctx.branch())
125 125
126 126 if prefix != "":
127 127 ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip()))
128 128 else:
129 129 ui.write(description + "\n")
130 130 if prefix:
131 131 ui.write('\0')
132 132
133 133 def base(ui, repo, node1, node2):
134 134 """output common ancestor information"""
135 135 node1 = repo.lookup(node1)
136 136 node2 = repo.lookup(node2)
137 137 n = repo.changelog.ancestor(node1, node2)
138 138 ui.write(short(n) + "\n")
139 139
140 140 def catfile(ui, repo, type=None, r=None, **opts):
141 141 """cat a specific revision"""
142 142 # in stdin mode, every line except the commit is prefixed with two
143 143 # spaces. This way the our caller can find the commit without magic
144 144 # strings
145 145 #
146 146 prefix = ""
147 147 if opts['stdin']:
148 148 try:
149 149 (type, r) = raw_input().split(' ');
150 150 prefix = " "
151 151 except EOFError:
152 152 return
153 153
154 154 else:
155 155 if not type or not r:
156 156 ui.warn(_("cat-file: type or revision not supplied\n"))
157 157 commands.help_(ui, 'cat-file')
158 158
159 159 while r:
160 160 if type != "commit":
161 161 ui.warn(_("aborting hg cat-file only understands commits\n"))
162 162 return 1;
163 163 n = repo.lookup(r)
164 164 catcommit(ui, repo, n, prefix)
165 165 if opts['stdin']:
166 166 try:
167 167 (type, r) = raw_input().split(' ');
168 168 except EOFError:
169 169 break
170 170 else:
171 171 break
172 172
173 173 # git rev-tree is a confusing thing. You can supply a number of
174 174 # commit sha1s on the command line, and it walks the commit history
175 175 # telling you which commits are reachable from the supplied ones via
176 176 # a bitmask based on arg position.
177 177 # you can specify a commit to stop at by starting the sha1 with ^
178 178 def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
179 179 def chlogwalk():
180 180 count = len(repo)
181 181 i = count
182 182 l = [0] * 100
183 183 chunk = 100
184 184 while True:
185 185 if chunk > i:
186 186 chunk = i
187 187 i = 0
188 188 else:
189 189 i -= chunk
190 190
191 for x in xrange(0, chunk):
191 for x in xrange(chunk):
192 192 if i + x >= count:
193 193 l[chunk - x:] = [0] * (chunk - x)
194 194 break
195 195 if full != None:
196 196 l[x] = repo[i + x]
197 197 l[x].changeset() # force reading
198 198 else:
199 199 l[x] = 1
200 200 for x in xrange(chunk-1, -1, -1):
201 201 if l[x] != 0:
202 202 yield (i + x, full != None and l[x] or None)
203 203 if i == 0:
204 204 break
205 205
206 206 # calculate and return the reachability bitmask for sha
207 207 def is_reachable(ar, reachable, sha):
208 208 if len(ar) == 0:
209 209 return 1
210 210 mask = 0
211 211 for i in xrange(len(ar)):
212 212 if sha in reachable[i]:
213 213 mask |= 1 << i
214 214
215 215 return mask
216 216
217 217 reachable = []
218 218 stop_sha1 = []
219 219 want_sha1 = []
220 220 count = 0
221 221
222 222 # figure out which commits they are asking for and which ones they
223 223 # want us to stop on
224 224 for i in xrange(len(args)):
225 225 if args[i].startswith('^'):
226 226 s = repo.lookup(args[i][1:])
227 227 stop_sha1.append(s)
228 228 want_sha1.append(s)
229 229 elif args[i] != 'HEAD':
230 230 want_sha1.append(repo.lookup(args[i]))
231 231
232 232 # calculate the graph for the supplied commits
233 233 for i in xrange(len(want_sha1)):
234 234 reachable.append(set());
235 235 n = want_sha1[i];
236 236 visit = [n];
237 237 reachable[i].add(n)
238 238 while visit:
239 239 n = visit.pop(0)
240 240 if n in stop_sha1:
241 241 continue
242 242 for p in repo.changelog.parents(n):
243 243 if p not in reachable[i]:
244 244 reachable[i].add(p)
245 245 visit.append(p)
246 246 if p in stop_sha1:
247 247 continue
248 248
249 249 # walk the repository looking for commits that are in our
250 250 # reachability graph
251 251 for i, ctx in chlogwalk():
252 252 n = repo.changelog.node(i)
253 253 mask = is_reachable(want_sha1, reachable, n)
254 254 if mask:
255 255 parentstr = ""
256 256 if parents:
257 257 pp = repo.changelog.parents(n)
258 258 if pp[0] != nullid:
259 259 parentstr += " " + short(pp[0])
260 260 if pp[1] != nullid:
261 261 parentstr += " " + short(pp[1])
262 262 if not full:
263 263 ui.write("%s%s\n" % (short(n), parentstr))
264 264 elif full == "commit":
265 265 ui.write("%s%s\n" % (short(n), parentstr))
266 266 catcommit(ui, repo, n, ' ', ctx)
267 267 else:
268 268 (p1, p2) = repo.changelog.parents(n)
269 269 (h, h1, h2) = map(short, (n, p1, p2))
270 270 (i1, i2) = map(repo.changelog.rev, (p1, p2))
271 271
272 272 date = ctx.date()[0]
273 273 ui.write("%s %s:%s" % (date, h, mask))
274 274 mask = is_reachable(want_sha1, reachable, p1)
275 275 if i1 != nullrev and mask > 0:
276 276 ui.write("%s:%s " % (h1, mask)),
277 277 mask = is_reachable(want_sha1, reachable, p2)
278 278 if i2 != nullrev and mask > 0:
279 279 ui.write("%s:%s " % (h2, mask))
280 280 ui.write("\n")
281 281 if maxnr and count >= maxnr:
282 282 break
283 283 count += 1
284 284
285 285 def revparse(ui, repo, *revs, **opts):
286 286 """parse given revisions"""
287 287 def revstr(rev):
288 288 if rev == 'HEAD':
289 289 rev = 'tip'
290 290 return revlog.hex(repo.lookup(rev))
291 291
292 292 for r in revs:
293 293 revrange = r.split(':', 1)
294 294 ui.write('%s\n' % revstr(revrange[0]))
295 295 if len(revrange) == 2:
296 296 ui.write('^%s\n' % revstr(revrange[1]))
297 297
298 298 # git rev-list tries to order things by date, and has the ability to stop
299 299 # at a given commit without walking the whole repo. TODO add the stop
300 300 # parameter
301 301 def revlist(ui, repo, *revs, **opts):
302 302 """print revisions"""
303 303 if opts['header']:
304 304 full = "commit"
305 305 else:
306 306 full = None
307 307 copy = [x for x in revs]
308 308 revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
309 309
310 310 def config(ui, repo, **opts):
311 311 """print extension options"""
312 312 def writeopt(name, value):
313 313 ui.write('k=%s\nv=%s\n' % (name, value))
314 314
315 315 writeopt('vdiff', ui.config('hgk', 'vdiff', ''))
316 316
317 317
318 318 def view(ui, repo, *etc, **opts):
319 319 "start interactive history viewer"
320 320 os.chdir(repo.root)
321 321 optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
322 322 cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
323 323 ui.debug(_("running %s\n") % cmd)
324 324 util.system(cmd)
325 325
326 326 cmdtable = {
327 327 "^view":
328 328 (view,
329 329 [('l', 'limit', '', _('limit number of changes displayed'))],
330 330 _('hg view [-l LIMIT] [REVRANGE]')),
331 331 "debug-diff-tree":
332 332 (difftree,
333 333 [('p', 'patch', None, _('generate patch')),
334 334 ('r', 'recursive', None, _('recursive')),
335 335 ('P', 'pretty', None, _('pretty')),
336 336 ('s', 'stdin', None, _('stdin')),
337 337 ('C', 'copy', None, _('detect copies')),
338 338 ('S', 'search', "", _('search'))],
339 339 _('hg git-diff-tree [OPTION]... NODE1 NODE2 [FILE]...')),
340 340 "debug-cat-file":
341 341 (catfile,
342 342 [('s', 'stdin', None, _('stdin'))],
343 343 _('hg debug-cat-file [OPTION]... TYPE FILE')),
344 344 "debug-config":
345 345 (config, [], _('hg debug-config')),
346 346 "debug-merge-base":
347 347 (base, [], _('hg debug-merge-base node node')),
348 348 "debug-rev-parse":
349 349 (revparse,
350 350 [('', 'default', '', _('ignored'))],
351 351 _('hg debug-rev-parse REV')),
352 352 "debug-rev-list":
353 353 (revlist,
354 354 [('H', 'header', None, _('header')),
355 355 ('t', 'topo-order', None, _('topo-order')),
356 356 ('p', 'parents', None, _('parents')),
357 357 ('n', 'max-count', 0, _('max-count'))],
358 358 _('hg debug-rev-list [options] revs')),
359 359 }
@@ -1,2637 +1,2637
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 '''patch management and development
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use "hg help command" for more details):
18 18
19 19 prepare repository to work with patches qinit
20 20 create new patch qnew
21 21 import existing patch qimport
22 22
23 23 print patch series qseries
24 24 print applied patches qapplied
25 25 print name of top applied patch qtop
26 26
27 27 add known patch to applied stack qpush
28 28 remove patch from applied stack qpop
29 29 refresh contents of top applied patch qrefresh
30 30 '''
31 31
32 32 from mercurial.i18n import _
33 33 from mercurial.node import bin, hex, short, nullid, nullrev
34 34 from mercurial.lock import release
35 35 from mercurial import commands, cmdutil, hg, patch, util
36 36 from mercurial import repair, extensions, url, error
37 37 import os, sys, re, errno
38 38
39 39 commands.norepo += " qclone"
40 40
41 41 # Patch names looks like unix-file names.
42 42 # They must be joinable with queue directory and result in the patch path.
43 43 normname = util.normpath
44 44
45 45 class statusentry:
46 46 def __init__(self, rev, name=None):
47 47 if not name:
48 48 fields = rev.split(':', 1)
49 49 if len(fields) == 2:
50 50 self.rev, self.name = fields
51 51 else:
52 52 self.rev, self.name = None, None
53 53 else:
54 54 self.rev, self.name = rev, name
55 55
56 56 def __str__(self):
57 57 return self.rev + ':' + self.name
58 58
59 59 class patchheader(object):
60 60 def __init__(self, message, comments, user, date, haspatch):
61 61 self.message = message
62 62 self.comments = comments
63 63 self.user = user
64 64 self.date = date
65 65 self.haspatch = haspatch
66 66
67 67 def setuser(self, user):
68 68 if not self.setheader(['From: ', '# User '], user):
69 69 try:
70 70 patchheaderat = self.comments.index('# HG changeset patch')
71 71 self.comments.insert(patchheaderat + 1,'# User ' + user)
72 72 except ValueError:
73 73 self.comments = ['From: ' + user, ''] + self.comments
74 74 self.user = user
75 75
76 76 def setdate(self, date):
77 77 if self.setheader(['# Date '], date):
78 78 self.date = date
79 79
80 80 def setmessage(self, message):
81 81 if self.comments:
82 82 self._delmsg()
83 83 self.message = [message]
84 84 self.comments += self.message
85 85
86 86 def setheader(self, prefixes, new):
87 87 '''Update all references to a field in the patch header.
88 88 If none found, add it email style.'''
89 89 res = False
90 90 for prefix in prefixes:
91 91 for i in xrange(len(self.comments)):
92 92 if self.comments[i].startswith(prefix):
93 93 self.comments[i] = prefix + new
94 94 res = True
95 95 break
96 96 return res
97 97
98 98 def __str__(self):
99 99 if not self.comments:
100 100 return ''
101 101 return '\n'.join(self.comments) + '\n\n'
102 102
103 103 def _delmsg(self):
104 104 '''Remove existing message, keeping the rest of the comments fields.
105 105 If comments contains 'subject: ', message will prepend
106 106 the field and a blank line.'''
107 107 if self.message:
108 108 subj = 'subject: ' + self.message[0].lower()
109 109 for i in xrange(len(self.comments)):
110 110 if subj == self.comments[i].lower():
111 111 del self.comments[i]
112 112 self.message = self.message[2:]
113 113 break
114 114 ci = 0
115 115 for mi in xrange(len(self.message)):
116 116 while self.message[mi] != self.comments[ci]:
117 117 ci += 1
118 118 del self.comments[ci]
119 119
120 120 class queue:
121 121 def __init__(self, ui, path, patchdir=None):
122 122 self.basepath = path
123 123 self.path = patchdir or os.path.join(path, "patches")
124 124 self.opener = util.opener(self.path)
125 125 self.ui = ui
126 126 self.applied_dirty = 0
127 127 self.series_dirty = 0
128 128 self.series_path = "series"
129 129 self.status_path = "status"
130 130 self.guards_path = "guards"
131 131 self.active_guards = None
132 132 self.guards_dirty = False
133 133 self._diffopts = None
134 134
135 135 @util.propertycache
136 136 def applied(self):
137 137 if os.path.exists(self.join(self.status_path)):
138 138 lines = self.opener(self.status_path).read().splitlines()
139 139 return [statusentry(l) for l in lines]
140 140 return []
141 141
142 142 @util.propertycache
143 143 def full_series(self):
144 144 if os.path.exists(self.join(self.series_path)):
145 145 return self.opener(self.series_path).read().splitlines()
146 146 return []
147 147
148 148 @util.propertycache
149 149 def series(self):
150 150 self.parse_series()
151 151 return self.series
152 152
153 153 @util.propertycache
154 154 def series_guards(self):
155 155 self.parse_series()
156 156 return self.series_guards
157 157
158 158 def invalidate(self):
159 159 for a in 'applied full_series series series_guards'.split():
160 160 if a in self.__dict__:
161 161 delattr(self, a)
162 162 self.applied_dirty = 0
163 163 self.series_dirty = 0
164 164 self.guards_dirty = False
165 165 self.active_guards = None
166 166
167 167 def diffopts(self):
168 168 if self._diffopts is None:
169 169 self._diffopts = patch.diffopts(self.ui)
170 170 return self._diffopts
171 171
172 172 def join(self, *p):
173 173 return os.path.join(self.path, *p)
174 174
175 175 def find_series(self, patch):
176 176 pre = re.compile("(\s*)([^#]+)")
177 177 index = 0
178 178 for l in self.full_series:
179 179 m = pre.match(l)
180 180 if m:
181 181 s = m.group(2)
182 182 s = s.rstrip()
183 183 if s == patch:
184 184 return index
185 185 index += 1
186 186 return None
187 187
188 188 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
189 189
190 190 def parse_series(self):
191 191 self.series = []
192 192 self.series_guards = []
193 193 for l in self.full_series:
194 194 h = l.find('#')
195 195 if h == -1:
196 196 patch = l
197 197 comment = ''
198 198 elif h == 0:
199 199 continue
200 200 else:
201 201 patch = l[:h]
202 202 comment = l[h:]
203 203 patch = patch.strip()
204 204 if patch:
205 205 if patch in self.series:
206 206 raise util.Abort(_('%s appears more than once in %s') %
207 207 (patch, self.join(self.series_path)))
208 208 self.series.append(patch)
209 209 self.series_guards.append(self.guard_re.findall(comment))
210 210
211 211 def check_guard(self, guard):
212 212 if not guard:
213 213 return _('guard cannot be an empty string')
214 214 bad_chars = '# \t\r\n\f'
215 215 first = guard[0]
216 216 if first in '-+':
217 217 return (_('guard %r starts with invalid character: %r') %
218 218 (guard, first))
219 219 for c in bad_chars:
220 220 if c in guard:
221 221 return _('invalid character in guard %r: %r') % (guard, c)
222 222
223 223 def set_active(self, guards):
224 224 for guard in guards:
225 225 bad = self.check_guard(guard)
226 226 if bad:
227 227 raise util.Abort(bad)
228 228 guards = sorted(set(guards))
229 229 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
230 230 self.active_guards = guards
231 231 self.guards_dirty = True
232 232
233 233 def active(self):
234 234 if self.active_guards is None:
235 235 self.active_guards = []
236 236 try:
237 237 guards = self.opener(self.guards_path).read().split()
238 238 except IOError, err:
239 239 if err.errno != errno.ENOENT: raise
240 240 guards = []
241 241 for i, guard in enumerate(guards):
242 242 bad = self.check_guard(guard)
243 243 if bad:
244 244 self.ui.warn('%s:%d: %s\n' %
245 245 (self.join(self.guards_path), i + 1, bad))
246 246 else:
247 247 self.active_guards.append(guard)
248 248 return self.active_guards
249 249
250 250 def set_guards(self, idx, guards):
251 251 for g in guards:
252 252 if len(g) < 2:
253 253 raise util.Abort(_('guard %r too short') % g)
254 254 if g[0] not in '-+':
255 255 raise util.Abort(_('guard %r starts with invalid char') % g)
256 256 bad = self.check_guard(g[1:])
257 257 if bad:
258 258 raise util.Abort(bad)
259 259 drop = self.guard_re.sub('', self.full_series[idx])
260 260 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
261 261 self.parse_series()
262 262 self.series_dirty = True
263 263
264 264 def pushable(self, idx):
265 265 if isinstance(idx, str):
266 266 idx = self.series.index(idx)
267 267 patchguards = self.series_guards[idx]
268 268 if not patchguards:
269 269 return True, None
270 270 guards = self.active()
271 271 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
272 272 if exactneg:
273 273 return False, exactneg[0]
274 274 pos = [g for g in patchguards if g[0] == '+']
275 275 exactpos = [g for g in pos if g[1:] in guards]
276 276 if pos:
277 277 if exactpos:
278 278 return True, exactpos[0]
279 279 return False, pos
280 280 return True, ''
281 281
282 282 def explain_pushable(self, idx, all_patches=False):
283 283 write = all_patches and self.ui.write or self.ui.warn
284 284 if all_patches or self.ui.verbose:
285 285 if isinstance(idx, str):
286 286 idx = self.series.index(idx)
287 287 pushable, why = self.pushable(idx)
288 288 if all_patches and pushable:
289 289 if why is None:
290 290 write(_('allowing %s - no guards in effect\n') %
291 291 self.series[idx])
292 292 else:
293 293 if not why:
294 294 write(_('allowing %s - no matching negative guards\n') %
295 295 self.series[idx])
296 296 else:
297 297 write(_('allowing %s - guarded by %r\n') %
298 298 (self.series[idx], why))
299 299 if not pushable:
300 300 if why:
301 301 write(_('skipping %s - guarded by %r\n') %
302 302 (self.series[idx], why))
303 303 else:
304 304 write(_('skipping %s - no matching guards\n') %
305 305 self.series[idx])
306 306
307 307 def save_dirty(self):
308 308 def write_list(items, path):
309 309 fp = self.opener(path, 'w')
310 310 for i in items:
311 311 fp.write("%s\n" % i)
312 312 fp.close()
313 313 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
314 314 if self.series_dirty: write_list(self.full_series, self.series_path)
315 315 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
316 316
317 317 def readheaders(self, patch):
318 318 def eatdiff(lines):
319 319 while lines:
320 320 l = lines[-1]
321 321 if (l.startswith("diff -") or
322 322 l.startswith("Index:") or
323 323 l.startswith("===========")):
324 324 del lines[-1]
325 325 else:
326 326 break
327 327 def eatempty(lines):
328 328 while lines:
329 329 l = lines[-1]
330 330 if re.match('\s*$', l):
331 331 del lines[-1]
332 332 else:
333 333 break
334 334
335 335 pf = self.join(patch)
336 336 message = []
337 337 comments = []
338 338 user = None
339 339 date = None
340 340 format = None
341 341 subject = None
342 342 diffstart = 0
343 343
344 344 for line in file(pf):
345 345 line = line.rstrip()
346 346 if line.startswith('diff --git'):
347 347 diffstart = 2
348 348 break
349 349 if diffstart:
350 350 if line.startswith('+++ '):
351 351 diffstart = 2
352 352 break
353 353 if line.startswith("--- "):
354 354 diffstart = 1
355 355 continue
356 356 elif format == "hgpatch":
357 357 # parse values when importing the result of an hg export
358 358 if line.startswith("# User "):
359 359 user = line[7:]
360 360 elif line.startswith("# Date "):
361 361 date = line[7:]
362 362 elif not line.startswith("# ") and line:
363 363 message.append(line)
364 364 format = None
365 365 elif line == '# HG changeset patch':
366 366 format = "hgpatch"
367 367 elif (format != "tagdone" and (line.startswith("Subject: ") or
368 368 line.startswith("subject: "))):
369 369 subject = line[9:]
370 370 format = "tag"
371 371 elif (format != "tagdone" and (line.startswith("From: ") or
372 372 line.startswith("from: "))):
373 373 user = line[6:]
374 374 format = "tag"
375 375 elif format == "tag" and line == "":
376 376 # when looking for tags (subject: from: etc) they
377 377 # end once you find a blank line in the source
378 378 format = "tagdone"
379 379 elif message or line:
380 380 message.append(line)
381 381 comments.append(line)
382 382
383 383 eatdiff(message)
384 384 eatdiff(comments)
385 385 eatempty(message)
386 386 eatempty(comments)
387 387
388 388 # make sure message isn't empty
389 389 if format and format.startswith("tag") and subject:
390 390 message.insert(0, "")
391 391 message.insert(0, subject)
392 392 return patchheader(message, comments, user, date, diffstart > 1)
393 393
394 394 def removeundo(self, repo):
395 395 undo = repo.sjoin('undo')
396 396 if not os.path.exists(undo):
397 397 return
398 398 try:
399 399 os.unlink(undo)
400 400 except OSError, inst:
401 401 self.ui.warn(_('error removing undo: %s\n') % str(inst))
402 402
403 403 def printdiff(self, repo, node1, node2=None, files=None,
404 404 fp=None, changes=None, opts={}):
405 405 m = cmdutil.match(repo, files, opts)
406 406 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
407 407 write = fp is None and repo.ui.write or fp.write
408 408 for chunk in chunks:
409 409 write(chunk)
410 410
411 411 def mergeone(self, repo, mergeq, head, patch, rev):
412 412 # first try just applying the patch
413 413 (err, n) = self.apply(repo, [ patch ], update_status=False,
414 414 strict=True, merge=rev)
415 415
416 416 if err == 0:
417 417 return (err, n)
418 418
419 419 if n is None:
420 420 raise util.Abort(_("apply failed for patch %s") % patch)
421 421
422 422 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
423 423
424 424 # apply failed, strip away that rev and merge.
425 425 hg.clean(repo, head)
426 426 self.strip(repo, n, update=False, backup='strip')
427 427
428 428 ctx = repo[rev]
429 429 ret = hg.merge(repo, rev)
430 430 if ret:
431 431 raise util.Abort(_("update returned %d") % ret)
432 432 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
433 433 if n is None:
434 434 raise util.Abort(_("repo commit failed"))
435 435 try:
436 436 ph = mergeq.readheaders(patch)
437 437 except:
438 438 raise util.Abort(_("unable to read %s") % patch)
439 439
440 440 patchf = self.opener(patch, "w")
441 441 comments = str(ph)
442 442 if comments:
443 443 patchf.write(comments)
444 444 self.printdiff(repo, head, n, fp=patchf)
445 445 patchf.close()
446 446 self.removeundo(repo)
447 447 return (0, n)
448 448
449 449 def qparents(self, repo, rev=None):
450 450 if rev is None:
451 451 (p1, p2) = repo.dirstate.parents()
452 452 if p2 == nullid:
453 453 return p1
454 454 if len(self.applied) == 0:
455 455 return None
456 456 return bin(self.applied[-1].rev)
457 457 pp = repo.changelog.parents(rev)
458 458 if pp[1] != nullid:
459 459 arevs = [ x.rev for x in self.applied ]
460 460 p0 = hex(pp[0])
461 461 p1 = hex(pp[1])
462 462 if p0 in arevs:
463 463 return pp[0]
464 464 if p1 in arevs:
465 465 return pp[1]
466 466 return pp[0]
467 467
468 468 def mergepatch(self, repo, mergeq, series):
469 469 if len(self.applied) == 0:
470 470 # each of the patches merged in will have two parents. This
471 471 # can confuse the qrefresh, qdiff, and strip code because it
472 472 # needs to know which parent is actually in the patch queue.
473 473 # so, we insert a merge marker with only one parent. This way
474 474 # the first patch in the queue is never a merge patch
475 475 #
476 476 pname = ".hg.patches.merge.marker"
477 477 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
478 478 self.removeundo(repo)
479 479 self.applied.append(statusentry(hex(n), pname))
480 480 self.applied_dirty = 1
481 481
482 482 head = self.qparents(repo)
483 483
484 484 for patch in series:
485 485 patch = mergeq.lookup(patch, strict=True)
486 486 if not patch:
487 487 self.ui.warn(_("patch %s does not exist\n") % patch)
488 488 return (1, None)
489 489 pushable, reason = self.pushable(patch)
490 490 if not pushable:
491 491 self.explain_pushable(patch, all_patches=True)
492 492 continue
493 493 info = mergeq.isapplied(patch)
494 494 if not info:
495 495 self.ui.warn(_("patch %s is not applied\n") % patch)
496 496 return (1, None)
497 497 rev = bin(info[1])
498 498 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
499 499 if head:
500 500 self.applied.append(statusentry(hex(head), patch))
501 501 self.applied_dirty = 1
502 502 if err:
503 503 return (err, head)
504 504 self.save_dirty()
505 505 return (0, head)
506 506
507 507 def patch(self, repo, patchfile):
508 508 '''Apply patchfile to the working directory.
509 509 patchfile: file name of patch'''
510 510 files = {}
511 511 try:
512 512 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
513 513 files=files)
514 514 except Exception, inst:
515 515 self.ui.note(str(inst) + '\n')
516 516 if not self.ui.verbose:
517 517 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
518 518 return (False, files, False)
519 519
520 520 return (True, files, fuzz)
521 521
522 522 def apply(self, repo, series, list=False, update_status=True,
523 523 strict=False, patchdir=None, merge=None, all_files={}):
524 524 wlock = lock = tr = None
525 525 try:
526 526 wlock = repo.wlock()
527 527 lock = repo.lock()
528 528 tr = repo.transaction()
529 529 try:
530 530 ret = self._apply(repo, series, list, update_status,
531 531 strict, patchdir, merge, all_files=all_files)
532 532 tr.close()
533 533 self.save_dirty()
534 534 return ret
535 535 except:
536 536 try:
537 537 tr.abort()
538 538 finally:
539 539 repo.invalidate()
540 540 repo.dirstate.invalidate()
541 541 raise
542 542 finally:
543 543 del tr
544 544 release(lock, wlock)
545 545 self.removeundo(repo)
546 546
547 547 def _apply(self, repo, series, list=False, update_status=True,
548 548 strict=False, patchdir=None, merge=None, all_files={}):
549 549 # TODO unify with commands.py
550 550 if not patchdir:
551 551 patchdir = self.path
552 552 err = 0
553 553 n = None
554 554 for patchname in series:
555 555 pushable, reason = self.pushable(patchname)
556 556 if not pushable:
557 557 self.explain_pushable(patchname, all_patches=True)
558 558 continue
559 559 self.ui.warn(_("applying %s\n") % patchname)
560 560 pf = os.path.join(patchdir, patchname)
561 561
562 562 try:
563 563 ph = self.readheaders(patchname)
564 564 except:
565 565 self.ui.warn(_("Unable to read %s\n") % patchname)
566 566 err = 1
567 567 break
568 568
569 569 message = ph.message
570 570 if not message:
571 571 message = _("imported patch %s\n") % patchname
572 572 else:
573 573 if list:
574 574 message.append(_("\nimported patch %s") % patchname)
575 575 message = '\n'.join(message)
576 576
577 577 if ph.haspatch:
578 578 (patcherr, files, fuzz) = self.patch(repo, pf)
579 579 all_files.update(files)
580 580 patcherr = not patcherr
581 581 else:
582 582 self.ui.warn(_("patch %s is empty\n") % patchname)
583 583 patcherr, files, fuzz = 0, [], 0
584 584
585 585 if merge and files:
586 586 # Mark as removed/merged and update dirstate parent info
587 587 removed = []
588 588 merged = []
589 589 for f in files:
590 590 if os.path.exists(repo.wjoin(f)):
591 591 merged.append(f)
592 592 else:
593 593 removed.append(f)
594 594 for f in removed:
595 595 repo.dirstate.remove(f)
596 596 for f in merged:
597 597 repo.dirstate.merge(f)
598 598 p1, p2 = repo.dirstate.parents()
599 599 repo.dirstate.setparents(p1, merge)
600 600
601 601 files = patch.updatedir(self.ui, repo, files)
602 602 match = cmdutil.matchfiles(repo, files or [])
603 603 n = repo.commit(files, message, ph.user, ph.date, match=match,
604 604 force=True)
605 605
606 606 if n is None:
607 607 raise util.Abort(_("repo commit failed"))
608 608
609 609 if update_status:
610 610 self.applied.append(statusentry(hex(n), patchname))
611 611
612 612 if patcherr:
613 613 self.ui.warn(_("patch failed, rejects left in working dir\n"))
614 614 err = 1
615 615 break
616 616
617 617 if fuzz and strict:
618 618 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
619 619 err = 1
620 620 break
621 621 return (err, n)
622 622
623 623 def _clean_series(self, patches):
624 624 for i in sorted([self.find_series(p) for p in patches], reverse=True):
625 625 del self.full_series[i]
626 626 self.parse_series()
627 627 self.series_dirty = 1
628 628
629 629 def finish(self, repo, revs):
630 630 firstrev = repo[self.applied[0].rev].rev()
631 631 appliedbase = 0
632 632 patches = []
633 633 for rev in sorted(revs):
634 634 if rev < firstrev:
635 635 raise util.Abort(_('revision %d is not managed') % rev)
636 636 base = bin(self.applied[appliedbase].rev)
637 637 node = repo.changelog.node(rev)
638 638 if node != base:
639 639 raise util.Abort(_('cannot delete revision %d above '
640 640 'applied patches') % rev)
641 641 patches.append(self.applied[appliedbase].name)
642 642 appliedbase += 1
643 643
644 644 r = self.qrepo()
645 645 if r:
646 646 r.remove(patches, True)
647 647 else:
648 648 for p in patches:
649 649 os.unlink(self.join(p))
650 650
651 651 del self.applied[:appliedbase]
652 652 self.applied_dirty = 1
653 653 self._clean_series(patches)
654 654
655 655 def delete(self, repo, patches, opts):
656 656 if not patches and not opts.get('rev'):
657 657 raise util.Abort(_('qdelete requires at least one revision or '
658 658 'patch name'))
659 659
660 660 realpatches = []
661 661 for patch in patches:
662 662 patch = self.lookup(patch, strict=True)
663 663 info = self.isapplied(patch)
664 664 if info:
665 665 raise util.Abort(_("cannot delete applied patch %s") % patch)
666 666 if patch not in self.series:
667 667 raise util.Abort(_("patch %s not in series file") % patch)
668 668 realpatches.append(patch)
669 669
670 670 appliedbase = 0
671 671 if opts.get('rev'):
672 672 if not self.applied:
673 673 raise util.Abort(_('no patches applied'))
674 674 revs = cmdutil.revrange(repo, opts['rev'])
675 675 if len(revs) > 1 and revs[0] > revs[1]:
676 676 revs.reverse()
677 677 for rev in revs:
678 678 if appliedbase >= len(self.applied):
679 679 raise util.Abort(_("revision %d is not managed") % rev)
680 680
681 681 base = bin(self.applied[appliedbase].rev)
682 682 node = repo.changelog.node(rev)
683 683 if node != base:
684 684 raise util.Abort(_("cannot delete revision %d above "
685 685 "applied patches") % rev)
686 686 realpatches.append(self.applied[appliedbase].name)
687 687 appliedbase += 1
688 688
689 689 if not opts.get('keep'):
690 690 r = self.qrepo()
691 691 if r:
692 692 r.remove(realpatches, True)
693 693 else:
694 694 for p in realpatches:
695 695 os.unlink(self.join(p))
696 696
697 697 if appliedbase:
698 698 del self.applied[:appliedbase]
699 699 self.applied_dirty = 1
700 700 self._clean_series(realpatches)
701 701
702 702 def check_toppatch(self, repo):
703 703 if len(self.applied) > 0:
704 704 top = bin(self.applied[-1].rev)
705 705 pp = repo.dirstate.parents()
706 706 if top not in pp:
707 707 raise util.Abort(_("working directory revision is not qtip"))
708 708 return top
709 709 return None
710 710 def check_localchanges(self, repo, force=False, refresh=True):
711 711 m, a, r, d = repo.status()[:4]
712 712 if m or a or r or d:
713 713 if not force:
714 714 if refresh:
715 715 raise util.Abort(_("local changes found, refresh first"))
716 716 else:
717 717 raise util.Abort(_("local changes found"))
718 718 return m, a, r, d
719 719
720 720 _reserved = ('series', 'status', 'guards')
721 721 def check_reserved_name(self, name):
722 722 if (name in self._reserved or name.startswith('.hg')
723 723 or name.startswith('.mq')):
724 724 raise util.Abort(_('"%s" cannot be used as the name of a patch')
725 725 % name)
726 726
727 727 def new(self, repo, patchfn, *pats, **opts):
728 728 """options:
729 729 msg: a string or a no-argument function returning a string
730 730 """
731 731 msg = opts.get('msg')
732 732 force = opts.get('force')
733 733 user = opts.get('user')
734 734 date = opts.get('date')
735 735 if date:
736 736 date = util.parsedate(date)
737 737 self.check_reserved_name(patchfn)
738 738 if os.path.exists(self.join(patchfn)):
739 739 raise util.Abort(_('patch "%s" already exists') % patchfn)
740 740 if opts.get('include') or opts.get('exclude') or pats:
741 741 match = cmdutil.match(repo, pats, opts)
742 742 # detect missing files in pats
743 743 def badfn(f, msg):
744 744 raise util.Abort('%s: %s' % (f, msg))
745 745 match.bad = badfn
746 746 m, a, r, d = repo.status(match=match)[:4]
747 747 else:
748 748 m, a, r, d = self.check_localchanges(repo, force)
749 749 match = cmdutil.matchfiles(repo, m + a + r)
750 750 commitfiles = m + a + r
751 751 self.check_toppatch(repo)
752 752 insert = self.full_series_end()
753 753 wlock = repo.wlock()
754 754 try:
755 755 # if patch file write fails, abort early
756 756 p = self.opener(patchfn, "w")
757 757 try:
758 758 if date:
759 759 p.write("# HG changeset patch\n")
760 760 if user:
761 761 p.write("# User " + user + "\n")
762 762 p.write("# Date %d %d\n\n" % date)
763 763 elif user:
764 764 p.write("From: " + user + "\n\n")
765 765
766 766 if hasattr(msg, '__call__'):
767 767 msg = msg()
768 768 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
769 769 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
770 770 if n is None:
771 771 raise util.Abort(_("repo commit failed"))
772 772 try:
773 773 self.full_series[insert:insert] = [patchfn]
774 774 self.applied.append(statusentry(hex(n), patchfn))
775 775 self.parse_series()
776 776 self.series_dirty = 1
777 777 self.applied_dirty = 1
778 778 if msg:
779 779 msg = msg + "\n\n"
780 780 p.write(msg)
781 781 if commitfiles:
782 782 diffopts = self.diffopts()
783 783 if opts.get('git'): diffopts.git = True
784 784 parent = self.qparents(repo, n)
785 785 chunks = patch.diff(repo, node1=parent, node2=n,
786 786 match=match, opts=diffopts)
787 787 for chunk in chunks:
788 788 p.write(chunk)
789 789 p.close()
790 790 wlock.release()
791 791 wlock = None
792 792 r = self.qrepo()
793 793 if r: r.add([patchfn])
794 794 except:
795 795 repo.rollback()
796 796 raise
797 797 except Exception:
798 798 patchpath = self.join(patchfn)
799 799 try:
800 800 os.unlink(patchpath)
801 801 except:
802 802 self.ui.warn(_('error unlinking %s\n') % patchpath)
803 803 raise
804 804 self.removeundo(repo)
805 805 finally:
806 806 release(wlock)
807 807
808 808 def strip(self, repo, rev, update=True, backup="all", force=None):
809 809 wlock = lock = None
810 810 try:
811 811 wlock = repo.wlock()
812 812 lock = repo.lock()
813 813
814 814 if update:
815 815 self.check_localchanges(repo, force=force, refresh=False)
816 816 urev = self.qparents(repo, rev)
817 817 hg.clean(repo, urev)
818 818 repo.dirstate.write()
819 819
820 820 self.removeundo(repo)
821 821 repair.strip(self.ui, repo, rev, backup)
822 822 # strip may have unbundled a set of backed up revisions after
823 823 # the actual strip
824 824 self.removeundo(repo)
825 825 finally:
826 826 release(lock, wlock)
827 827
828 828 def isapplied(self, patch):
829 829 """returns (index, rev, patch)"""
830 830 for i in xrange(len(self.applied)):
831 831 a = self.applied[i]
832 832 if a.name == patch:
833 833 return (i, a.rev, a.name)
834 834 return None
835 835
836 836 # if the exact patch name does not exist, we try a few
837 837 # variations. If strict is passed, we try only #1
838 838 #
839 839 # 1) a number to indicate an offset in the series file
840 840 # 2) a unique substring of the patch name was given
841 841 # 3) patchname[-+]num to indicate an offset in the series file
842 842 def lookup(self, patch, strict=False):
843 843 patch = patch and str(patch)
844 844
845 845 def partial_name(s):
846 846 if s in self.series:
847 847 return s
848 848 matches = [x for x in self.series if s in x]
849 849 if len(matches) > 1:
850 850 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
851 851 for m in matches:
852 852 self.ui.warn(' %s\n' % m)
853 853 return None
854 854 if matches:
855 855 return matches[0]
856 856 if len(self.series) > 0 and len(self.applied) > 0:
857 857 if s == 'qtip':
858 858 return self.series[self.series_end(True)-1]
859 859 if s == 'qbase':
860 860 return self.series[0]
861 861 return None
862 862
863 863 if patch is None:
864 864 return None
865 865 if patch in self.series:
866 866 return patch
867 867
868 868 if not os.path.isfile(self.join(patch)):
869 869 try:
870 870 sno = int(patch)
871 871 except(ValueError, OverflowError):
872 872 pass
873 873 else:
874 874 if -len(self.series) <= sno < len(self.series):
875 875 return self.series[sno]
876 876
877 877 if not strict:
878 878 res = partial_name(patch)
879 879 if res:
880 880 return res
881 881 minus = patch.rfind('-')
882 882 if minus >= 0:
883 883 res = partial_name(patch[:minus])
884 884 if res:
885 885 i = self.series.index(res)
886 886 try:
887 887 off = int(patch[minus+1:] or 1)
888 888 except(ValueError, OverflowError):
889 889 pass
890 890 else:
891 891 if i - off >= 0:
892 892 return self.series[i - off]
893 893 plus = patch.rfind('+')
894 894 if plus >= 0:
895 895 res = partial_name(patch[:plus])
896 896 if res:
897 897 i = self.series.index(res)
898 898 try:
899 899 off = int(patch[plus+1:] or 1)
900 900 except(ValueError, OverflowError):
901 901 pass
902 902 else:
903 903 if i + off < len(self.series):
904 904 return self.series[i + off]
905 905 raise util.Abort(_("patch %s not in series") % patch)
906 906
907 907 def push(self, repo, patch=None, force=False, list=False,
908 908 mergeq=None, all=False):
909 909 wlock = repo.wlock()
910 910 if repo.dirstate.parents()[0] not in repo.heads():
911 911 self.ui.status(_("(working directory not at a head)\n"))
912 912
913 913 if not self.series:
914 914 self.ui.warn(_('no patches in series\n'))
915 915 return 0
916 916
917 917 try:
918 918 patch = self.lookup(patch)
919 919 # Suppose our series file is: A B C and the current 'top'
920 920 # patch is B. qpush C should be performed (moving forward)
921 921 # qpush B is a NOP (no change) qpush A is an error (can't
922 922 # go backwards with qpush)
923 923 if patch:
924 924 info = self.isapplied(patch)
925 925 if info:
926 926 if info[0] < len(self.applied) - 1:
927 927 raise util.Abort(
928 928 _("cannot push to a previous patch: %s") % patch)
929 929 self.ui.warn(
930 930 _('qpush: %s is already at the top\n') % patch)
931 931 return
932 932 pushable, reason = self.pushable(patch)
933 933 if not pushable:
934 934 if reason:
935 935 reason = _('guarded by %r') % reason
936 936 else:
937 937 reason = _('no matching guards')
938 938 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
939 939 return 1
940 940 elif all:
941 941 patch = self.series[-1]
942 942 if self.isapplied(patch):
943 943 self.ui.warn(_('all patches are currently applied\n'))
944 944 return 0
945 945
946 946 # Following the above example, starting at 'top' of B:
947 947 # qpush should be performed (pushes C), but a subsequent
948 948 # qpush without an argument is an error (nothing to
949 949 # apply). This allows a loop of "...while hg qpush..." to
950 950 # work as it detects an error when done
951 951 start = self.series_end()
952 952 if start == len(self.series):
953 953 self.ui.warn(_('patch series already fully applied\n'))
954 954 return 1
955 955 if not force:
956 956 self.check_localchanges(repo)
957 957
958 958 self.applied_dirty = 1
959 959 if start > 0:
960 960 self.check_toppatch(repo)
961 961 if not patch:
962 962 patch = self.series[start]
963 963 end = start + 1
964 964 else:
965 965 end = self.series.index(patch, start) + 1
966 966 s = self.series[start:end]
967 967 all_files = {}
968 968 try:
969 969 if mergeq:
970 970 ret = self.mergepatch(repo, mergeq, s)
971 971 else:
972 972 ret = self.apply(repo, s, list, all_files=all_files)
973 973 except:
974 974 self.ui.warn(_('cleaning up working directory...'))
975 975 node = repo.dirstate.parents()[0]
976 976 hg.revert(repo, node, None)
977 977 unknown = repo.status(unknown=True)[4]
978 978 # only remove unknown files that we know we touched or
979 979 # created while patching
980 980 for f in unknown:
981 981 if f in all_files:
982 982 util.unlink(repo.wjoin(f))
983 983 self.ui.warn(_('done\n'))
984 984 raise
985 985 top = self.applied[-1].name
986 986 if ret[0]:
987 987 self.ui.write(_("errors during apply, please fix and "
988 988 "refresh %s\n") % top)
989 989 else:
990 990 self.ui.write(_("now at: %s\n") % top)
991 991 return ret[0]
992 992 finally:
993 993 wlock.release()
994 994
995 995 def pop(self, repo, patch=None, force=False, update=True, all=False):
996 996 def getfile(f, rev, flags):
997 997 t = repo.file(f).read(rev)
998 998 repo.wwrite(f, t, flags)
999 999
1000 1000 wlock = repo.wlock()
1001 1001 try:
1002 1002 if patch:
1003 1003 # index, rev, patch
1004 1004 info = self.isapplied(patch)
1005 1005 if not info:
1006 1006 patch = self.lookup(patch)
1007 1007 info = self.isapplied(patch)
1008 1008 if not info:
1009 1009 raise util.Abort(_("patch %s is not applied") % patch)
1010 1010
1011 1011 if len(self.applied) == 0:
1012 1012 # Allow qpop -a to work repeatedly,
1013 1013 # but not qpop without an argument
1014 1014 self.ui.warn(_("no patches applied\n"))
1015 1015 return not all
1016 1016
1017 1017 if all:
1018 1018 start = 0
1019 1019 elif patch:
1020 1020 start = info[0] + 1
1021 1021 else:
1022 1022 start = len(self.applied) - 1
1023 1023
1024 1024 if start >= len(self.applied):
1025 1025 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1026 1026 return
1027 1027
1028 1028 if not update:
1029 1029 parents = repo.dirstate.parents()
1030 1030 rr = [ bin(x.rev) for x in self.applied ]
1031 1031 for p in parents:
1032 1032 if p in rr:
1033 1033 self.ui.warn(_("qpop: forcing dirstate update\n"))
1034 1034 update = True
1035 1035 else:
1036 1036 parents = [p.hex() for p in repo[None].parents()]
1037 1037 needupdate = False
1038 1038 for entry in self.applied[start:]:
1039 1039 if entry.rev in parents:
1040 1040 needupdate = True
1041 1041 break
1042 1042 update = needupdate
1043 1043
1044 1044 if not force and update:
1045 1045 self.check_localchanges(repo)
1046 1046
1047 1047 self.applied_dirty = 1
1048 1048 end = len(self.applied)
1049 1049 rev = bin(self.applied[start].rev)
1050 1050 if update:
1051 1051 top = self.check_toppatch(repo)
1052 1052
1053 1053 try:
1054 1054 heads = repo.changelog.heads(rev)
1055 1055 except error.LookupError:
1056 1056 node = short(rev)
1057 1057 raise util.Abort(_('trying to pop unknown node %s') % node)
1058 1058
1059 1059 if heads != [bin(self.applied[-1].rev)]:
1060 1060 raise util.Abort(_("popping would remove a revision not "
1061 1061 "managed by this patch queue"))
1062 1062
1063 1063 # we know there are no local changes, so we can make a simplified
1064 1064 # form of hg.update.
1065 1065 if update:
1066 1066 qp = self.qparents(repo, rev)
1067 1067 changes = repo.changelog.read(qp)
1068 1068 mmap = repo.manifest.read(changes[0])
1069 1069 m, a, r, d = repo.status(qp, top)[:4]
1070 1070 if d:
1071 1071 raise util.Abort(_("deletions found between repo revs"))
1072 1072 for f in m:
1073 1073 getfile(f, mmap[f], mmap.flags(f))
1074 1074 for f in r:
1075 1075 getfile(f, mmap[f], mmap.flags(f))
1076 1076 for f in m + r:
1077 1077 repo.dirstate.normal(f)
1078 1078 for f in a:
1079 1079 try:
1080 1080 os.unlink(repo.wjoin(f))
1081 1081 except OSError, e:
1082 1082 if e.errno != errno.ENOENT:
1083 1083 raise
1084 1084 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1085 1085 except: pass
1086 1086 repo.dirstate.forget(f)
1087 1087 repo.dirstate.setparents(qp, nullid)
1088 1088 del self.applied[start:end]
1089 1089 self.strip(repo, rev, update=False, backup='strip')
1090 1090 if len(self.applied):
1091 1091 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1092 1092 else:
1093 1093 self.ui.write(_("patch queue now empty\n"))
1094 1094 finally:
1095 1095 wlock.release()
1096 1096
1097 1097 def diff(self, repo, pats, opts):
1098 1098 top = self.check_toppatch(repo)
1099 1099 if not top:
1100 1100 self.ui.write(_("no patches applied\n"))
1101 1101 return
1102 1102 qp = self.qparents(repo, top)
1103 1103 self._diffopts = patch.diffopts(self.ui, opts)
1104 1104 self.printdiff(repo, qp, files=pats, opts=opts)
1105 1105
1106 1106 def refresh(self, repo, pats=None, **opts):
1107 1107 if len(self.applied) == 0:
1108 1108 self.ui.write(_("no patches applied\n"))
1109 1109 return 1
1110 1110 msg = opts.get('msg', '').rstrip()
1111 1111 newuser = opts.get('user')
1112 1112 newdate = opts.get('date')
1113 1113 if newdate:
1114 1114 newdate = '%d %d' % util.parsedate(newdate)
1115 1115 wlock = repo.wlock()
1116 1116 try:
1117 1117 self.check_toppatch(repo)
1118 1118 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1119 1119 top = bin(top)
1120 1120 if repo.changelog.heads(top) != [top]:
1121 1121 raise util.Abort(_("cannot refresh a revision with children"))
1122 1122 cparents = repo.changelog.parents(top)
1123 1123 patchparent = self.qparents(repo, top)
1124 1124 ph = self.readheaders(patchfn)
1125 1125
1126 1126 patchf = self.opener(patchfn, 'r')
1127 1127
1128 1128 # if the patch was a git patch, refresh it as a git patch
1129 1129 for line in patchf:
1130 1130 if line.startswith('diff --git'):
1131 1131 self.diffopts().git = True
1132 1132 break
1133 1133
1134 1134 if msg:
1135 1135 ph.setmessage(msg)
1136 1136 if newuser:
1137 1137 ph.setuser(newuser)
1138 1138 if newdate:
1139 1139 ph.setdate(newdate)
1140 1140
1141 1141 # only commit new patch when write is complete
1142 1142 patchf = self.opener(patchfn, 'w', atomictemp=True)
1143 1143
1144 1144 patchf.seek(0)
1145 1145 patchf.truncate()
1146 1146
1147 1147 comments = str(ph)
1148 1148 if comments:
1149 1149 patchf.write(comments)
1150 1150
1151 1151 if opts.get('git'):
1152 1152 self.diffopts().git = True
1153 1153 tip = repo.changelog.tip()
1154 1154 if top == tip:
1155 1155 # if the top of our patch queue is also the tip, there is an
1156 1156 # optimization here. We update the dirstate in place and strip
1157 1157 # off the tip commit. Then just commit the current directory
1158 1158 # tree. We can also send repo.commit the list of files
1159 1159 # changed to speed up the diff
1160 1160 #
1161 1161 # in short mode, we only diff the files included in the
1162 1162 # patch already plus specified files
1163 1163 #
1164 1164 # this should really read:
1165 1165 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1166 1166 # but we do it backwards to take advantage of manifest/chlog
1167 1167 # caching against the next repo.status call
1168 1168 #
1169 1169 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1170 1170 changes = repo.changelog.read(tip)
1171 1171 man = repo.manifest.read(changes[0])
1172 1172 aaa = aa[:]
1173 1173 matchfn = cmdutil.match(repo, pats, opts)
1174 1174 if opts.get('short'):
1175 1175 # if amending a patch, we start with existing
1176 1176 # files plus specified files - unfiltered
1177 1177 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1178 1178 # filter with inc/exl options
1179 1179 matchfn = cmdutil.match(repo, opts=opts)
1180 1180 else:
1181 1181 match = cmdutil.matchall(repo)
1182 1182 m, a, r, d = repo.status(match=match)[:4]
1183 1183
1184 1184 # we might end up with files that were added between
1185 1185 # tip and the dirstate parent, but then changed in the
1186 1186 # local dirstate. in this case, we want them to only
1187 1187 # show up in the added section
1188 1188 for x in m:
1189 1189 if x not in aa:
1190 1190 mm.append(x)
1191 1191 # we might end up with files added by the local dirstate that
1192 1192 # were deleted by the patch. In this case, they should only
1193 1193 # show up in the changed section.
1194 1194 for x in a:
1195 1195 if x in dd:
1196 1196 del dd[dd.index(x)]
1197 1197 mm.append(x)
1198 1198 else:
1199 1199 aa.append(x)
1200 1200 # make sure any files deleted in the local dirstate
1201 1201 # are not in the add or change column of the patch
1202 1202 forget = []
1203 1203 for x in d + r:
1204 1204 if x in aa:
1205 1205 del aa[aa.index(x)]
1206 1206 forget.append(x)
1207 1207 continue
1208 1208 elif x in mm:
1209 1209 del mm[mm.index(x)]
1210 1210 dd.append(x)
1211 1211
1212 1212 m = list(set(mm))
1213 1213 r = list(set(dd))
1214 1214 a = list(set(aa))
1215 1215 c = [filter(matchfn, l) for l in (m, a, r)]
1216 1216 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1217 1217 chunks = patch.diff(repo, patchparent, match=match,
1218 1218 changes=c, opts=self.diffopts())
1219 1219 for chunk in chunks:
1220 1220 patchf.write(chunk)
1221 1221
1222 1222 try:
1223 1223 if self.diffopts().git:
1224 1224 copies = {}
1225 1225 for dst in a:
1226 1226 src = repo.dirstate.copied(dst)
1227 1227 # during qfold, the source file for copies may
1228 1228 # be removed. Treat this as a simple add.
1229 1229 if src is not None and src in repo.dirstate:
1230 1230 copies.setdefault(src, []).append(dst)
1231 1231 repo.dirstate.add(dst)
1232 1232 # remember the copies between patchparent and tip
1233 1233 for dst in aaa:
1234 1234 f = repo.file(dst)
1235 1235 src = f.renamed(man[dst])
1236 1236 if src:
1237 1237 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1238 1238 if dst in a:
1239 1239 copies[src[0]].append(dst)
1240 1240 # we can't copy a file created by the patch itself
1241 1241 if dst in copies:
1242 1242 del copies[dst]
1243 1243 for src, dsts in copies.iteritems():
1244 1244 for dst in dsts:
1245 1245 repo.dirstate.copy(src, dst)
1246 1246 else:
1247 1247 for dst in a:
1248 1248 repo.dirstate.add(dst)
1249 1249 # Drop useless copy information
1250 1250 for f in list(repo.dirstate.copies()):
1251 1251 repo.dirstate.copy(None, f)
1252 1252 for f in r:
1253 1253 repo.dirstate.remove(f)
1254 1254 # if the patch excludes a modified file, mark that
1255 1255 # file with mtime=0 so status can see it.
1256 1256 mm = []
1257 1257 for i in xrange(len(m)-1, -1, -1):
1258 1258 if not matchfn(m[i]):
1259 1259 mm.append(m[i])
1260 1260 del m[i]
1261 1261 for f in m:
1262 1262 repo.dirstate.normal(f)
1263 1263 for f in mm:
1264 1264 repo.dirstate.normallookup(f)
1265 1265 for f in forget:
1266 1266 repo.dirstate.forget(f)
1267 1267
1268 1268 if not msg:
1269 1269 if not ph.message:
1270 1270 message = "[mq]: %s\n" % patchfn
1271 1271 else:
1272 1272 message = "\n".join(ph.message)
1273 1273 else:
1274 1274 message = msg
1275 1275
1276 1276 user = ph.user or changes[1]
1277 1277
1278 1278 # assumes strip can roll itself back if interrupted
1279 1279 repo.dirstate.setparents(*cparents)
1280 1280 self.applied.pop()
1281 1281 self.applied_dirty = 1
1282 1282 self.strip(repo, top, update=False,
1283 1283 backup='strip')
1284 1284 except:
1285 1285 repo.dirstate.invalidate()
1286 1286 raise
1287 1287
1288 1288 try:
1289 1289 # might be nice to attempt to roll back strip after this
1290 1290 patchf.rename()
1291 1291 n = repo.commit(match.files(), message, user, ph.date,
1292 1292 match=match, force=1)
1293 1293 self.applied.append(statusentry(hex(n), patchfn))
1294 1294 except:
1295 1295 ctx = repo[cparents[0]]
1296 1296 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1297 1297 self.save_dirty()
1298 1298 self.ui.warn(_('refresh interrupted while patch was popped! '
1299 1299 '(revert --all, qpush to recover)\n'))
1300 1300 raise
1301 1301 else:
1302 1302 self.printdiff(repo, patchparent, fp=patchf)
1303 1303 patchf.rename()
1304 1304 added = repo.status()[1]
1305 1305 for a in added:
1306 1306 f = repo.wjoin(a)
1307 1307 try:
1308 1308 os.unlink(f)
1309 1309 except OSError, e:
1310 1310 if e.errno != errno.ENOENT:
1311 1311 raise
1312 1312 try: os.removedirs(os.path.dirname(f))
1313 1313 except: pass
1314 1314 # forget the file copies in the dirstate
1315 1315 # push should readd the files later on
1316 1316 repo.dirstate.forget(a)
1317 1317 self.pop(repo, force=True)
1318 1318 self.push(repo, force=True)
1319 1319 finally:
1320 1320 wlock.release()
1321 1321 self.removeundo(repo)
1322 1322
1323 1323 def init(self, repo, create=False):
1324 1324 if not create and os.path.isdir(self.path):
1325 1325 raise util.Abort(_("patch queue directory already exists"))
1326 1326 try:
1327 1327 os.mkdir(self.path)
1328 1328 except OSError, inst:
1329 1329 if inst.errno != errno.EEXIST or not create:
1330 1330 raise
1331 1331 if create:
1332 1332 return self.qrepo(create=True)
1333 1333
1334 1334 def unapplied(self, repo, patch=None):
1335 1335 if patch and patch not in self.series:
1336 1336 raise util.Abort(_("patch %s is not in series file") % patch)
1337 1337 if not patch:
1338 1338 start = self.series_end()
1339 1339 else:
1340 1340 start = self.series.index(patch) + 1
1341 1341 unapplied = []
1342 1342 for i in xrange(start, len(self.series)):
1343 1343 pushable, reason = self.pushable(i)
1344 1344 if pushable:
1345 1345 unapplied.append((i, self.series[i]))
1346 1346 self.explain_pushable(i)
1347 1347 return unapplied
1348 1348
1349 1349 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1350 1350 summary=False):
1351 1351 def displayname(patchname):
1352 1352 if summary:
1353 1353 ph = self.readheaders(patchname)
1354 1354 msg = ph.message
1355 1355 msg = msg and ': ' + msg[0] or ': '
1356 1356 else:
1357 1357 msg = ''
1358 1358 return '%s%s' % (patchname, msg)
1359 1359
1360 1360 applied = set([p.name for p in self.applied])
1361 1361 if length is None:
1362 1362 length = len(self.series) - start
1363 1363 if not missing:
1364 1364 for i in xrange(start, start+length):
1365 1365 patch = self.series[i]
1366 1366 if patch in applied:
1367 1367 stat = 'A'
1368 1368 elif self.pushable(i)[0]:
1369 1369 stat = 'U'
1370 1370 else:
1371 1371 stat = 'G'
1372 1372 pfx = ''
1373 1373 if self.ui.verbose:
1374 1374 pfx = '%d %s ' % (i, stat)
1375 1375 elif status and status != stat:
1376 1376 continue
1377 1377 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1378 1378 else:
1379 1379 msng_list = []
1380 1380 for root, dirs, files in os.walk(self.path):
1381 1381 d = root[len(self.path) + 1:]
1382 1382 for f in files:
1383 1383 fl = os.path.join(d, f)
1384 1384 if (fl not in self.series and
1385 1385 fl not in (self.status_path, self.series_path,
1386 1386 self.guards_path)
1387 1387 and not fl.startswith('.')):
1388 1388 msng_list.append(fl)
1389 1389 for x in sorted(msng_list):
1390 1390 pfx = self.ui.verbose and ('D ') or ''
1391 1391 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1392 1392
1393 1393 def issaveline(self, l):
1394 1394 if l.name == '.hg.patches.save.line':
1395 1395 return True
1396 1396
1397 1397 def qrepo(self, create=False):
1398 1398 if create or os.path.isdir(self.join(".hg")):
1399 1399 return hg.repository(self.ui, path=self.path, create=create)
1400 1400
1401 1401 def restore(self, repo, rev, delete=None, qupdate=None):
1402 1402 c = repo.changelog.read(rev)
1403 1403 desc = c[4].strip()
1404 1404 lines = desc.splitlines()
1405 1405 i = 0
1406 1406 datastart = None
1407 1407 series = []
1408 1408 applied = []
1409 1409 qpp = None
1410 for i in xrange(0, len(lines)):
1410 for i in xrange(len(lines)):
1411 1411 if lines[i] == 'Patch Data:':
1412 1412 datastart = i + 1
1413 1413 elif lines[i].startswith('Dirstate:'):
1414 1414 l = lines[i].rstrip()
1415 1415 l = l[10:].split(' ')
1416 1416 qpp = [ bin(x) for x in l ]
1417 1417 elif datastart != None:
1418 1418 l = lines[i].rstrip()
1419 1419 se = statusentry(l)
1420 1420 file_ = se.name
1421 1421 if se.rev:
1422 1422 applied.append(se)
1423 1423 else:
1424 1424 series.append(file_)
1425 1425 if datastart is None:
1426 1426 self.ui.warn(_("No saved patch data found\n"))
1427 1427 return 1
1428 1428 self.ui.warn(_("restoring status: %s\n") % lines[0])
1429 1429 self.full_series = series
1430 1430 self.applied = applied
1431 1431 self.parse_series()
1432 1432 self.series_dirty = 1
1433 1433 self.applied_dirty = 1
1434 1434 heads = repo.changelog.heads()
1435 1435 if delete:
1436 1436 if rev not in heads:
1437 1437 self.ui.warn(_("save entry has children, leaving it alone\n"))
1438 1438 else:
1439 1439 self.ui.warn(_("removing save entry %s\n") % short(rev))
1440 1440 pp = repo.dirstate.parents()
1441 1441 if rev in pp:
1442 1442 update = True
1443 1443 else:
1444 1444 update = False
1445 1445 self.strip(repo, rev, update=update, backup='strip')
1446 1446 if qpp:
1447 1447 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1448 1448 (short(qpp[0]), short(qpp[1])))
1449 1449 if qupdate:
1450 1450 self.ui.status(_("queue directory updating\n"))
1451 1451 r = self.qrepo()
1452 1452 if not r:
1453 1453 self.ui.warn(_("Unable to load queue repository\n"))
1454 1454 return 1
1455 1455 hg.clean(r, qpp[0])
1456 1456
1457 1457 def save(self, repo, msg=None):
1458 1458 if len(self.applied) == 0:
1459 1459 self.ui.warn(_("save: no patches applied, exiting\n"))
1460 1460 return 1
1461 1461 if self.issaveline(self.applied[-1]):
1462 1462 self.ui.warn(_("status is already saved\n"))
1463 1463 return 1
1464 1464
1465 1465 ar = [ ':' + x for x in self.full_series ]
1466 1466 if not msg:
1467 1467 msg = _("hg patches saved state")
1468 1468 else:
1469 1469 msg = "hg patches: " + msg.rstrip('\r\n')
1470 1470 r = self.qrepo()
1471 1471 if r:
1472 1472 pp = r.dirstate.parents()
1473 1473 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1474 1474 msg += "\n\nPatch Data:\n"
1475 1475 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1476 1476 "\n".join(ar) + '\n' or "")
1477 1477 n = repo.commit(None, text, user=None, force=1)
1478 1478 if not n:
1479 1479 self.ui.warn(_("repo commit failed\n"))
1480 1480 return 1
1481 1481 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1482 1482 self.applied_dirty = 1
1483 1483 self.removeundo(repo)
1484 1484
1485 1485 def full_series_end(self):
1486 1486 if len(self.applied) > 0:
1487 1487 p = self.applied[-1].name
1488 1488 end = self.find_series(p)
1489 1489 if end is None:
1490 1490 return len(self.full_series)
1491 1491 return end + 1
1492 1492 return 0
1493 1493
1494 1494 def series_end(self, all_patches=False):
1495 1495 """If all_patches is False, return the index of the next pushable patch
1496 1496 in the series, or the series length. If all_patches is True, return the
1497 1497 index of the first patch past the last applied one.
1498 1498 """
1499 1499 end = 0
1500 1500 def next(start):
1501 1501 if all_patches:
1502 1502 return start
1503 1503 i = start
1504 1504 while i < len(self.series):
1505 1505 p, reason = self.pushable(i)
1506 1506 if p:
1507 1507 break
1508 1508 self.explain_pushable(i)
1509 1509 i += 1
1510 1510 return i
1511 1511 if len(self.applied) > 0:
1512 1512 p = self.applied[-1].name
1513 1513 try:
1514 1514 end = self.series.index(p)
1515 1515 except ValueError:
1516 1516 return 0
1517 1517 return next(end + 1)
1518 1518 return next(end)
1519 1519
1520 1520 def appliedname(self, index):
1521 1521 pname = self.applied[index].name
1522 1522 if not self.ui.verbose:
1523 1523 p = pname
1524 1524 else:
1525 1525 p = str(self.series.index(pname)) + " " + pname
1526 1526 return p
1527 1527
1528 1528 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1529 1529 force=None, git=False):
1530 1530 def checkseries(patchname):
1531 1531 if patchname in self.series:
1532 1532 raise util.Abort(_('patch %s is already in the series file')
1533 1533 % patchname)
1534 1534 def checkfile(patchname):
1535 1535 if not force and os.path.exists(self.join(patchname)):
1536 1536 raise util.Abort(_('patch "%s" already exists')
1537 1537 % patchname)
1538 1538
1539 1539 if rev:
1540 1540 if files:
1541 1541 raise util.Abort(_('option "-r" not valid when importing '
1542 1542 'files'))
1543 1543 rev = cmdutil.revrange(repo, rev)
1544 1544 rev.sort(lambda x, y: cmp(y, x))
1545 1545 if (len(files) > 1 or len(rev) > 1) and patchname:
1546 1546 raise util.Abort(_('option "-n" not valid when importing multiple '
1547 1547 'patches'))
1548 1548 i = 0
1549 1549 added = []
1550 1550 if rev:
1551 1551 # If mq patches are applied, we can only import revisions
1552 1552 # that form a linear path to qbase.
1553 1553 # Otherwise, they should form a linear path to a head.
1554 1554 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1555 1555 if len(heads) > 1:
1556 1556 raise util.Abort(_('revision %d is the root of more than one '
1557 1557 'branch') % rev[-1])
1558 1558 if self.applied:
1559 1559 base = hex(repo.changelog.node(rev[0]))
1560 1560 if base in [n.rev for n in self.applied]:
1561 1561 raise util.Abort(_('revision %d is already managed')
1562 1562 % rev[0])
1563 1563 if heads != [bin(self.applied[-1].rev)]:
1564 1564 raise util.Abort(_('revision %d is not the parent of '
1565 1565 'the queue') % rev[0])
1566 1566 base = repo.changelog.rev(bin(self.applied[0].rev))
1567 1567 lastparent = repo.changelog.parentrevs(base)[0]
1568 1568 else:
1569 1569 if heads != [repo.changelog.node(rev[0])]:
1570 1570 raise util.Abort(_('revision %d has unmanaged children')
1571 1571 % rev[0])
1572 1572 lastparent = None
1573 1573
1574 1574 if git:
1575 1575 self.diffopts().git = True
1576 1576
1577 1577 for r in rev:
1578 1578 p1, p2 = repo.changelog.parentrevs(r)
1579 1579 n = repo.changelog.node(r)
1580 1580 if p2 != nullrev:
1581 1581 raise util.Abort(_('cannot import merge revision %d') % r)
1582 1582 if lastparent and lastparent != r:
1583 1583 raise util.Abort(_('revision %d is not the parent of %d')
1584 1584 % (r, lastparent))
1585 1585 lastparent = p1
1586 1586
1587 1587 if not patchname:
1588 1588 patchname = normname('%d.diff' % r)
1589 1589 self.check_reserved_name(patchname)
1590 1590 checkseries(patchname)
1591 1591 checkfile(patchname)
1592 1592 self.full_series.insert(0, patchname)
1593 1593
1594 1594 patchf = self.opener(patchname, "w")
1595 1595 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1596 1596 patchf.close()
1597 1597
1598 1598 se = statusentry(hex(n), patchname)
1599 1599 self.applied.insert(0, se)
1600 1600
1601 1601 added.append(patchname)
1602 1602 patchname = None
1603 1603 self.parse_series()
1604 1604 self.applied_dirty = 1
1605 1605
1606 1606 for filename in files:
1607 1607 if existing:
1608 1608 if filename == '-':
1609 1609 raise util.Abort(_('-e is incompatible with import from -'))
1610 1610 if not patchname:
1611 1611 patchname = normname(filename)
1612 1612 self.check_reserved_name(patchname)
1613 1613 if not os.path.isfile(self.join(patchname)):
1614 1614 raise util.Abort(_("patch %s does not exist") % patchname)
1615 1615 else:
1616 1616 try:
1617 1617 if filename == '-':
1618 1618 if not patchname:
1619 1619 raise util.Abort(_('need --name to import a patch from -'))
1620 1620 text = sys.stdin.read()
1621 1621 else:
1622 1622 text = url.open(self.ui, filename).read()
1623 1623 except (OSError, IOError):
1624 1624 raise util.Abort(_("unable to read %s") % filename)
1625 1625 if not patchname:
1626 1626 patchname = normname(os.path.basename(filename))
1627 1627 self.check_reserved_name(patchname)
1628 1628 checkfile(patchname)
1629 1629 patchf = self.opener(patchname, "w")
1630 1630 patchf.write(text)
1631 1631 if not force:
1632 1632 checkseries(patchname)
1633 1633 if patchname not in self.series:
1634 1634 index = self.full_series_end() + i
1635 1635 self.full_series[index:index] = [patchname]
1636 1636 self.parse_series()
1637 1637 self.ui.warn(_("adding %s to series file\n") % patchname)
1638 1638 i += 1
1639 1639 added.append(patchname)
1640 1640 patchname = None
1641 1641 self.series_dirty = 1
1642 1642 qrepo = self.qrepo()
1643 1643 if qrepo:
1644 1644 qrepo.add(added)
1645 1645
1646 1646 def delete(ui, repo, *patches, **opts):
1647 1647 """remove patches from queue
1648 1648
1649 1649 The patches must not be applied, unless they are arguments to the
1650 1650 -r/--rev parameter. At least one patch or revision is required.
1651 1651
1652 1652 With --rev, mq will stop managing the named revisions (converting
1653 1653 them to regular mercurial changesets). The qfinish command should
1654 1654 be used as an alternative for qdelete -r, as the latter option is
1655 1655 deprecated.
1656 1656
1657 1657 With -k/--keep, the patch files are preserved in the patch
1658 1658 directory."""
1659 1659 q = repo.mq
1660 1660 q.delete(repo, patches, opts)
1661 1661 q.save_dirty()
1662 1662 return 0
1663 1663
1664 1664 def applied(ui, repo, patch=None, **opts):
1665 1665 """print the patches already applied"""
1666 1666 q = repo.mq
1667 1667 if patch:
1668 1668 if patch not in q.series:
1669 1669 raise util.Abort(_("patch %s is not in series file") % patch)
1670 1670 end = q.series.index(patch) + 1
1671 1671 else:
1672 1672 end = q.series_end(True)
1673 1673 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1674 1674
1675 1675 def unapplied(ui, repo, patch=None, **opts):
1676 1676 """print the patches not yet applied"""
1677 1677 q = repo.mq
1678 1678 if patch:
1679 1679 if patch not in q.series:
1680 1680 raise util.Abort(_("patch %s is not in series file") % patch)
1681 1681 start = q.series.index(patch) + 1
1682 1682 else:
1683 1683 start = q.series_end(True)
1684 1684 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1685 1685
1686 1686 def qimport(ui, repo, *filename, **opts):
1687 1687 """import a patch
1688 1688
1689 1689 The patch is inserted into the series after the last applied
1690 1690 patch. If no patches have been applied, qimport prepends the patch
1691 1691 to the series.
1692 1692
1693 1693 The patch will have the same name as its source file unless you
1694 1694 give it a new one with -n/--name.
1695 1695
1696 1696 You can register an existing patch inside the patch directory with
1697 1697 the -e/--existing flag.
1698 1698
1699 1699 With -f/--force, an existing patch of the same name will be
1700 1700 overwritten.
1701 1701
1702 1702 An existing changeset may be placed under mq control with -r/--rev
1703 1703 (e.g. qimport --rev tip -n patch will place tip under mq control).
1704 1704 With -g/--git, patches imported with --rev will use the git diff
1705 1705 format. See the diffs help topic for information on why this is
1706 1706 important for preserving rename/copy information and permission
1707 1707 changes.
1708 1708
1709 1709 To import a patch from standard input, pass - as the patch file.
1710 1710 When importing from standard input, a patch name must be specified
1711 1711 using the --name flag.
1712 1712 """
1713 1713 q = repo.mq
1714 1714 q.qimport(repo, filename, patchname=opts['name'],
1715 1715 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1716 1716 git=opts['git'])
1717 1717 q.save_dirty()
1718 1718
1719 1719 if opts.get('push') and not opts.get('rev'):
1720 1720 return q.push(repo, None)
1721 1721 return 0
1722 1722
1723 1723 def init(ui, repo, **opts):
1724 1724 """init a new queue repository
1725 1725
1726 1726 The queue repository is unversioned by default. If
1727 1727 -c/--create-repo is specified, qinit will create a separate nested
1728 1728 repository for patches (qinit -c may also be run later to convert
1729 1729 an unversioned patch repository into a versioned one). You can use
1730 1730 qcommit to commit changes to this queue repository."""
1731 1731 q = repo.mq
1732 1732 r = q.init(repo, create=opts['create_repo'])
1733 1733 q.save_dirty()
1734 1734 if r:
1735 1735 if not os.path.exists(r.wjoin('.hgignore')):
1736 1736 fp = r.wopener('.hgignore', 'w')
1737 1737 fp.write('^\\.hg\n')
1738 1738 fp.write('^\\.mq\n')
1739 1739 fp.write('syntax: glob\n')
1740 1740 fp.write('status\n')
1741 1741 fp.write('guards\n')
1742 1742 fp.close()
1743 1743 if not os.path.exists(r.wjoin('series')):
1744 1744 r.wopener('series', 'w').close()
1745 1745 r.add(['.hgignore', 'series'])
1746 1746 commands.add(ui, r)
1747 1747 return 0
1748 1748
1749 1749 def clone(ui, source, dest=None, **opts):
1750 1750 '''clone main and patch repository at same time
1751 1751
1752 1752 If source is local, destination will have no patches applied. If
1753 1753 source is remote, this command can not check if patches are
1754 1754 applied in source, so cannot guarantee that patches are not
1755 1755 applied in destination. If you clone remote repository, be sure
1756 1756 before that it has no patches applied.
1757 1757
1758 1758 Source patch repository is looked for in <src>/.hg/patches by
1759 1759 default. Use -p <url> to change.
1760 1760
1761 1761 The patch directory must be a nested mercurial repository, as
1762 1762 would be created by qinit -c.
1763 1763 '''
1764 1764 def patchdir(repo):
1765 1765 url = repo.url()
1766 1766 if url.endswith('/'):
1767 1767 url = url[:-1]
1768 1768 return url + '/.hg/patches'
1769 1769 if dest is None:
1770 1770 dest = hg.defaultdest(source)
1771 1771 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1772 1772 if opts['patches']:
1773 1773 patchespath = ui.expandpath(opts['patches'])
1774 1774 else:
1775 1775 patchespath = patchdir(sr)
1776 1776 try:
1777 1777 hg.repository(ui, patchespath)
1778 1778 except error.RepoError:
1779 1779 raise util.Abort(_('versioned patch repository not found'
1780 1780 ' (see qinit -c)'))
1781 1781 qbase, destrev = None, None
1782 1782 if sr.local():
1783 1783 if sr.mq.applied:
1784 1784 qbase = bin(sr.mq.applied[0].rev)
1785 1785 if not hg.islocal(dest):
1786 1786 heads = set(sr.heads())
1787 1787 destrev = list(heads.difference(sr.heads(qbase)))
1788 1788 destrev.append(sr.changelog.parents(qbase)[0])
1789 1789 elif sr.capable('lookup'):
1790 1790 try:
1791 1791 qbase = sr.lookup('qbase')
1792 1792 except error.RepoError:
1793 1793 pass
1794 1794 ui.note(_('cloning main repository\n'))
1795 1795 sr, dr = hg.clone(ui, sr.url(), dest,
1796 1796 pull=opts['pull'],
1797 1797 rev=destrev,
1798 1798 update=False,
1799 1799 stream=opts['uncompressed'])
1800 1800 ui.note(_('cloning patch repository\n'))
1801 1801 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1802 1802 pull=opts['pull'], update=not opts['noupdate'],
1803 1803 stream=opts['uncompressed'])
1804 1804 if dr.local():
1805 1805 if qbase:
1806 1806 ui.note(_('stripping applied patches from destination '
1807 1807 'repository\n'))
1808 1808 dr.mq.strip(dr, qbase, update=False, backup=None)
1809 1809 if not opts['noupdate']:
1810 1810 ui.note(_('updating destination repository\n'))
1811 1811 hg.update(dr, dr.changelog.tip())
1812 1812
1813 1813 def commit(ui, repo, *pats, **opts):
1814 1814 """commit changes in the queue repository"""
1815 1815 q = repo.mq
1816 1816 r = q.qrepo()
1817 1817 if not r: raise util.Abort('no queue repository')
1818 1818 commands.commit(r.ui, r, *pats, **opts)
1819 1819
1820 1820 def series(ui, repo, **opts):
1821 1821 """print the entire series file"""
1822 1822 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1823 1823 return 0
1824 1824
1825 1825 def top(ui, repo, **opts):
1826 1826 """print the name of the current patch"""
1827 1827 q = repo.mq
1828 1828 t = q.applied and q.series_end(True) or 0
1829 1829 if t:
1830 1830 return q.qseries(repo, start=t-1, length=1, status='A',
1831 1831 summary=opts.get('summary'))
1832 1832 else:
1833 1833 ui.write(_("no patches applied\n"))
1834 1834 return 1
1835 1835
1836 1836 def next(ui, repo, **opts):
1837 1837 """print the name of the next patch"""
1838 1838 q = repo.mq
1839 1839 end = q.series_end()
1840 1840 if end == len(q.series):
1841 1841 ui.write(_("all patches applied\n"))
1842 1842 return 1
1843 1843 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1844 1844
1845 1845 def prev(ui, repo, **opts):
1846 1846 """print the name of the previous patch"""
1847 1847 q = repo.mq
1848 1848 l = len(q.applied)
1849 1849 if l == 1:
1850 1850 ui.write(_("only one patch applied\n"))
1851 1851 return 1
1852 1852 if not l:
1853 1853 ui.write(_("no patches applied\n"))
1854 1854 return 1
1855 1855 return q.qseries(repo, start=l-2, length=1, status='A',
1856 1856 summary=opts.get('summary'))
1857 1857
1858 1858 def setupheaderopts(ui, opts):
1859 1859 def do(opt,val):
1860 1860 if not opts[opt] and opts['current' + opt]:
1861 1861 opts[opt] = val
1862 1862 do('user', ui.username())
1863 1863 do('date', "%d %d" % util.makedate())
1864 1864
1865 1865 def new(ui, repo, patch, *args, **opts):
1866 1866 """create a new patch
1867 1867
1868 1868 qnew creates a new patch on top of the currently-applied patch (if
1869 1869 any). It will refuse to run if there are any outstanding changes
1870 1870 unless -f/--force is specified, in which case the patch will be
1871 1871 initialized with them. You may also use -I/--include,
1872 1872 -X/--exclude, and/or a list of files after the patch name to add
1873 1873 only changes to matching files to the new patch, leaving the rest
1874 1874 as uncommitted modifications.
1875 1875
1876 1876 -u/--user and -d/--date can be used to set the (given) user and
1877 1877 date, respectively. -U/--currentuser and -D/--currentdate set user
1878 1878 to current user and date to current date.
1879 1879
1880 1880 -e/--edit, -m/--message or -l/--logfile set the patch header as
1881 1881 well as the commit message. If none is specified, the header is
1882 1882 empty and the commit message is '[mq]: PATCH'.
1883 1883
1884 1884 Use the -g/--git option to keep the patch in the git extended diff
1885 1885 format. Read the diffs help topic for more information on why this
1886 1886 is important for preserving permission changes and copy/rename
1887 1887 information.
1888 1888 """
1889 1889 msg = cmdutil.logmessage(opts)
1890 1890 def getmsg(): return ui.edit(msg, ui.username())
1891 1891 q = repo.mq
1892 1892 opts['msg'] = msg
1893 1893 if opts.get('edit'):
1894 1894 opts['msg'] = getmsg
1895 1895 else:
1896 1896 opts['msg'] = msg
1897 1897 setupheaderopts(ui, opts)
1898 1898 q.new(repo, patch, *args, **opts)
1899 1899 q.save_dirty()
1900 1900 return 0
1901 1901
1902 1902 def refresh(ui, repo, *pats, **opts):
1903 1903 """update the current patch
1904 1904
1905 1905 If any file patterns are provided, the refreshed patch will
1906 1906 contain only the modifications that match those patterns; the
1907 1907 remaining modifications will remain in the working directory.
1908 1908
1909 1909 If -s/--short is specified, files currently included in the patch
1910 1910 will be refreshed just like matched files and remain in the patch.
1911 1911
1912 1912 hg add/remove/copy/rename work as usual, though you might want to
1913 1913 use git-style patches (-g/--git or [diff] git=1) to track copies
1914 1914 and renames. See the diffs help topic for more information on the
1915 1915 git diff format.
1916 1916 """
1917 1917 q = repo.mq
1918 1918 message = cmdutil.logmessage(opts)
1919 1919 if opts['edit']:
1920 1920 if not q.applied:
1921 1921 ui.write(_("no patches applied\n"))
1922 1922 return 1
1923 1923 if message:
1924 1924 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1925 1925 patch = q.applied[-1].name
1926 1926 ph = q.readheaders(patch)
1927 1927 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1928 1928 setupheaderopts(ui, opts)
1929 1929 ret = q.refresh(repo, pats, msg=message, **opts)
1930 1930 q.save_dirty()
1931 1931 return ret
1932 1932
1933 1933 def diff(ui, repo, *pats, **opts):
1934 1934 """diff of the current patch and subsequent modifications
1935 1935
1936 1936 Shows a diff which includes the current patch as well as any
1937 1937 changes which have been made in the working directory since the
1938 1938 last refresh (thus showing what the current patch would become
1939 1939 after a qrefresh).
1940 1940
1941 1941 Use 'hg diff' if you only want to see the changes made since the
1942 1942 last qrefresh, or 'hg export qtip' if you want to see changes made
1943 1943 by the current patch without including changes made since the
1944 1944 qrefresh.
1945 1945 """
1946 1946 repo.mq.diff(repo, pats, opts)
1947 1947 return 0
1948 1948
1949 1949 def fold(ui, repo, *files, **opts):
1950 1950 """fold the named patches into the current patch
1951 1951
1952 1952 Patches must not yet be applied. Each patch will be successively
1953 1953 applied to the current patch in the order given. If all the
1954 1954 patches apply successfully, the current patch will be refreshed
1955 1955 with the new cumulative patch, and the folded patches will be
1956 1956 deleted. With -k/--keep, the folded patch files will not be
1957 1957 removed afterwards.
1958 1958
1959 1959 The header for each folded patch will be concatenated with the
1960 1960 current patch header, separated by a line of '* * *'."""
1961 1961
1962 1962 q = repo.mq
1963 1963
1964 1964 if not files:
1965 1965 raise util.Abort(_('qfold requires at least one patch name'))
1966 1966 if not q.check_toppatch(repo):
1967 1967 raise util.Abort(_('No patches applied'))
1968 1968 q.check_localchanges(repo)
1969 1969
1970 1970 message = cmdutil.logmessage(opts)
1971 1971 if opts['edit']:
1972 1972 if message:
1973 1973 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1974 1974
1975 1975 parent = q.lookup('qtip')
1976 1976 patches = []
1977 1977 messages = []
1978 1978 for f in files:
1979 1979 p = q.lookup(f)
1980 1980 if p in patches or p == parent:
1981 1981 ui.warn(_('Skipping already folded patch %s') % p)
1982 1982 if q.isapplied(p):
1983 1983 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1984 1984 patches.append(p)
1985 1985
1986 1986 for p in patches:
1987 1987 if not message:
1988 1988 ph = q.readheaders(p)
1989 1989 if ph.message:
1990 1990 messages.append(ph.message)
1991 1991 pf = q.join(p)
1992 1992 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1993 1993 if not patchsuccess:
1994 1994 raise util.Abort(_('Error folding patch %s') % p)
1995 1995 patch.updatedir(ui, repo, files)
1996 1996
1997 1997 if not message:
1998 1998 ph = q.readheaders(parent)
1999 1999 message, user = ph.message, ph.user
2000 2000 for msg in messages:
2001 2001 message.append('* * *')
2002 2002 message.extend(msg)
2003 2003 message = '\n'.join(message)
2004 2004
2005 2005 if opts['edit']:
2006 2006 message = ui.edit(message, user or ui.username())
2007 2007
2008 2008 q.refresh(repo, msg=message)
2009 2009 q.delete(repo, patches, opts)
2010 2010 q.save_dirty()
2011 2011
2012 2012 def goto(ui, repo, patch, **opts):
2013 2013 '''push or pop patches until named patch is at top of stack'''
2014 2014 q = repo.mq
2015 2015 patch = q.lookup(patch)
2016 2016 if q.isapplied(patch):
2017 2017 ret = q.pop(repo, patch, force=opts['force'])
2018 2018 else:
2019 2019 ret = q.push(repo, patch, force=opts['force'])
2020 2020 q.save_dirty()
2021 2021 return ret
2022 2022
2023 2023 def guard(ui, repo, *args, **opts):
2024 2024 '''set or print guards for a patch
2025 2025
2026 2026 Guards control whether a patch can be pushed. A patch with no
2027 2027 guards is always pushed. A patch with a positive guard ("+foo") is
2028 2028 pushed only if the qselect command has activated it. A patch with
2029 2029 a negative guard ("-foo") is never pushed if the qselect command
2030 2030 has activated it.
2031 2031
2032 2032 With no arguments, print the currently active guards.
2033 2033 With arguments, set guards for the named patch.
2034 2034 NOTE: Specifying negative guards now requires '--'.
2035 2035
2036 2036 To set guards on another patch:
2037 2037 hg qguard -- other.patch +2.6.17 -stable
2038 2038 '''
2039 2039 def status(idx):
2040 2040 guards = q.series_guards[idx] or ['unguarded']
2041 2041 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2042 2042 q = repo.mq
2043 2043 patch = None
2044 2044 args = list(args)
2045 2045 if opts['list']:
2046 2046 if args or opts['none']:
2047 2047 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2048 2048 for i in xrange(len(q.series)):
2049 2049 status(i)
2050 2050 return
2051 2051 if not args or args[0][0:1] in '-+':
2052 2052 if not q.applied:
2053 2053 raise util.Abort(_('no patches applied'))
2054 2054 patch = q.applied[-1].name
2055 2055 if patch is None and args[0][0:1] not in '-+':
2056 2056 patch = args.pop(0)
2057 2057 if patch is None:
2058 2058 raise util.Abort(_('no patch to work with'))
2059 2059 if args or opts['none']:
2060 2060 idx = q.find_series(patch)
2061 2061 if idx is None:
2062 2062 raise util.Abort(_('no patch named %s') % patch)
2063 2063 q.set_guards(idx, args)
2064 2064 q.save_dirty()
2065 2065 else:
2066 2066 status(q.series.index(q.lookup(patch)))
2067 2067
2068 2068 def header(ui, repo, patch=None):
2069 2069 """print the header of the topmost or specified patch"""
2070 2070 q = repo.mq
2071 2071
2072 2072 if patch:
2073 2073 patch = q.lookup(patch)
2074 2074 else:
2075 2075 if not q.applied:
2076 2076 ui.write('no patches applied\n')
2077 2077 return 1
2078 2078 patch = q.lookup('qtip')
2079 2079 ph = repo.mq.readheaders(patch)
2080 2080
2081 2081 ui.write('\n'.join(ph.message) + '\n')
2082 2082
2083 2083 def lastsavename(path):
2084 2084 (directory, base) = os.path.split(path)
2085 2085 names = os.listdir(directory)
2086 2086 namere = re.compile("%s.([0-9]+)" % base)
2087 2087 maxindex = None
2088 2088 maxname = None
2089 2089 for f in names:
2090 2090 m = namere.match(f)
2091 2091 if m:
2092 2092 index = int(m.group(1))
2093 2093 if maxindex is None or index > maxindex:
2094 2094 maxindex = index
2095 2095 maxname = f
2096 2096 if maxname:
2097 2097 return (os.path.join(directory, maxname), maxindex)
2098 2098 return (None, None)
2099 2099
2100 2100 def savename(path):
2101 2101 (last, index) = lastsavename(path)
2102 2102 if last is None:
2103 2103 index = 0
2104 2104 newpath = path + ".%d" % (index + 1)
2105 2105 return newpath
2106 2106
2107 2107 def push(ui, repo, patch=None, **opts):
2108 2108 """push the next patch onto the stack
2109 2109
2110 2110 When -f/--force is applied, all local changes in patched files
2111 2111 will be lost.
2112 2112 """
2113 2113 q = repo.mq
2114 2114 mergeq = None
2115 2115
2116 2116 if opts['merge']:
2117 2117 if opts['name']:
2118 2118 newpath = repo.join(opts['name'])
2119 2119 else:
2120 2120 newpath, i = lastsavename(q.path)
2121 2121 if not newpath:
2122 2122 ui.warn(_("no saved queues found, please use -n\n"))
2123 2123 return 1
2124 2124 mergeq = queue(ui, repo.join(""), newpath)
2125 2125 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2126 2126 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2127 2127 mergeq=mergeq, all=opts.get('all'))
2128 2128 return ret
2129 2129
2130 2130 def pop(ui, repo, patch=None, **opts):
2131 2131 """pop the current patch off the stack
2132 2132
2133 2133 By default, pops off the top of the patch stack. If given a patch
2134 2134 name, keeps popping off patches until the named patch is at the
2135 2135 top of the stack.
2136 2136 """
2137 2137 localupdate = True
2138 2138 if opts['name']:
2139 2139 q = queue(ui, repo.join(""), repo.join(opts['name']))
2140 2140 ui.warn(_('using patch queue: %s\n') % q.path)
2141 2141 localupdate = False
2142 2142 else:
2143 2143 q = repo.mq
2144 2144 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2145 2145 all=opts['all'])
2146 2146 q.save_dirty()
2147 2147 return ret
2148 2148
2149 2149 def rename(ui, repo, patch, name=None, **opts):
2150 2150 """rename a patch
2151 2151
2152 2152 With one argument, renames the current patch to PATCH1.
2153 2153 With two arguments, renames PATCH1 to PATCH2."""
2154 2154
2155 2155 q = repo.mq
2156 2156
2157 2157 if not name:
2158 2158 name = patch
2159 2159 patch = None
2160 2160
2161 2161 if patch:
2162 2162 patch = q.lookup(patch)
2163 2163 else:
2164 2164 if not q.applied:
2165 2165 ui.write(_('no patches applied\n'))
2166 2166 return
2167 2167 patch = q.lookup('qtip')
2168 2168 absdest = q.join(name)
2169 2169 if os.path.isdir(absdest):
2170 2170 name = normname(os.path.join(name, os.path.basename(patch)))
2171 2171 absdest = q.join(name)
2172 2172 if os.path.exists(absdest):
2173 2173 raise util.Abort(_('%s already exists') % absdest)
2174 2174
2175 2175 if name in q.series:
2176 2176 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2177 2177
2178 2178 if ui.verbose:
2179 2179 ui.write('renaming %s to %s\n' % (patch, name))
2180 2180 i = q.find_series(patch)
2181 2181 guards = q.guard_re.findall(q.full_series[i])
2182 2182 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2183 2183 q.parse_series()
2184 2184 q.series_dirty = 1
2185 2185
2186 2186 info = q.isapplied(patch)
2187 2187 if info:
2188 2188 q.applied[info[0]] = statusentry(info[1], name)
2189 2189 q.applied_dirty = 1
2190 2190
2191 2191 util.rename(q.join(patch), absdest)
2192 2192 r = q.qrepo()
2193 2193 if r:
2194 2194 wlock = r.wlock()
2195 2195 try:
2196 2196 if r.dirstate[patch] == 'a':
2197 2197 r.dirstate.forget(patch)
2198 2198 r.dirstate.add(name)
2199 2199 else:
2200 2200 if r.dirstate[name] == 'r':
2201 2201 r.undelete([name])
2202 2202 r.copy(patch, name)
2203 2203 r.remove([patch], False)
2204 2204 finally:
2205 2205 wlock.release()
2206 2206
2207 2207 q.save_dirty()
2208 2208
2209 2209 def restore(ui, repo, rev, **opts):
2210 2210 """restore the queue state saved by a revision"""
2211 2211 rev = repo.lookup(rev)
2212 2212 q = repo.mq
2213 2213 q.restore(repo, rev, delete=opts['delete'],
2214 2214 qupdate=opts['update'])
2215 2215 q.save_dirty()
2216 2216 return 0
2217 2217
2218 2218 def save(ui, repo, **opts):
2219 2219 """save current queue state"""
2220 2220 q = repo.mq
2221 2221 message = cmdutil.logmessage(opts)
2222 2222 ret = q.save(repo, msg=message)
2223 2223 if ret:
2224 2224 return ret
2225 2225 q.save_dirty()
2226 2226 if opts['copy']:
2227 2227 path = q.path
2228 2228 if opts['name']:
2229 2229 newpath = os.path.join(q.basepath, opts['name'])
2230 2230 if os.path.exists(newpath):
2231 2231 if not os.path.isdir(newpath):
2232 2232 raise util.Abort(_('destination %s exists and is not '
2233 2233 'a directory') % newpath)
2234 2234 if not opts['force']:
2235 2235 raise util.Abort(_('destination %s exists, '
2236 2236 'use -f to force') % newpath)
2237 2237 else:
2238 2238 newpath = savename(path)
2239 2239 ui.warn(_("copy %s to %s\n") % (path, newpath))
2240 2240 util.copyfiles(path, newpath)
2241 2241 if opts['empty']:
2242 2242 try:
2243 2243 os.unlink(q.join(q.status_path))
2244 2244 except:
2245 2245 pass
2246 2246 return 0
2247 2247
2248 2248 def strip(ui, repo, rev, **opts):
2249 2249 """strip a revision and all its descendants from the repository
2250 2250
2251 2251 If one of the working directory's parent revisions is stripped, the
2252 2252 working directory will be updated to the parent of the stripped
2253 2253 revision.
2254 2254 """
2255 2255 backup = 'all'
2256 2256 if opts['backup']:
2257 2257 backup = 'strip'
2258 2258 elif opts['nobackup']:
2259 2259 backup = 'none'
2260 2260
2261 2261 rev = repo.lookup(rev)
2262 2262 p = repo.dirstate.parents()
2263 2263 cl = repo.changelog
2264 2264 update = True
2265 2265 if p[0] == nullid:
2266 2266 update = False
2267 2267 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2268 2268 update = False
2269 2269 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2270 2270 update = False
2271 2271
2272 2272 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2273 2273 return 0
2274 2274
2275 2275 def select(ui, repo, *args, **opts):
2276 2276 '''set or print guarded patches to push
2277 2277
2278 2278 Use the qguard command to set or print guards on patch, then use
2279 2279 qselect to tell mq which guards to use. A patch will be pushed if
2280 2280 it has no guards or any positive guards match the currently
2281 2281 selected guard, but will not be pushed if any negative guards
2282 2282 match the current guard. For example:
2283 2283
2284 2284 qguard foo.patch -stable (negative guard)
2285 2285 qguard bar.patch +stable (positive guard)
2286 2286 qselect stable
2287 2287
2288 2288 This activates the "stable" guard. mq will skip foo.patch (because
2289 2289 it has a negative match) but push bar.patch (because it has a
2290 2290 positive match).
2291 2291
2292 2292 With no arguments, prints the currently active guards.
2293 2293 With one argument, sets the active guard.
2294 2294
2295 2295 Use -n/--none to deactivate guards (no other arguments needed).
2296 2296 When no guards are active, patches with positive guards are
2297 2297 skipped and patches with negative guards are pushed.
2298 2298
2299 2299 qselect can change the guards on applied patches. It does not pop
2300 2300 guarded patches by default. Use --pop to pop back to the last
2301 2301 applied patch that is not guarded. Use --reapply (which implies
2302 2302 --pop) to push back to the current patch afterwards, but skip
2303 2303 guarded patches.
2304 2304
2305 2305 Use -s/--series to print a list of all guards in the series file
2306 2306 (no other arguments needed). Use -v for more information.'''
2307 2307
2308 2308 q = repo.mq
2309 2309 guards = q.active()
2310 2310 if args or opts['none']:
2311 2311 old_unapplied = q.unapplied(repo)
2312 2312 old_guarded = [i for i in xrange(len(q.applied)) if
2313 2313 not q.pushable(i)[0]]
2314 2314 q.set_active(args)
2315 2315 q.save_dirty()
2316 2316 if not args:
2317 2317 ui.status(_('guards deactivated\n'))
2318 2318 if not opts['pop'] and not opts['reapply']:
2319 2319 unapplied = q.unapplied(repo)
2320 2320 guarded = [i for i in xrange(len(q.applied))
2321 2321 if not q.pushable(i)[0]]
2322 2322 if len(unapplied) != len(old_unapplied):
2323 2323 ui.status(_('number of unguarded, unapplied patches has '
2324 2324 'changed from %d to %d\n') %
2325 2325 (len(old_unapplied), len(unapplied)))
2326 2326 if len(guarded) != len(old_guarded):
2327 2327 ui.status(_('number of guarded, applied patches has changed '
2328 2328 'from %d to %d\n') %
2329 2329 (len(old_guarded), len(guarded)))
2330 2330 elif opts['series']:
2331 2331 guards = {}
2332 2332 noguards = 0
2333 2333 for gs in q.series_guards:
2334 2334 if not gs:
2335 2335 noguards += 1
2336 2336 for g in gs:
2337 2337 guards.setdefault(g, 0)
2338 2338 guards[g] += 1
2339 2339 if ui.verbose:
2340 2340 guards['NONE'] = noguards
2341 2341 guards = guards.items()
2342 2342 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2343 2343 if guards:
2344 2344 ui.note(_('guards in series file:\n'))
2345 2345 for guard, count in guards:
2346 2346 ui.note('%2d ' % count)
2347 2347 ui.write(guard, '\n')
2348 2348 else:
2349 2349 ui.note(_('no guards in series file\n'))
2350 2350 else:
2351 2351 if guards:
2352 2352 ui.note(_('active guards:\n'))
2353 2353 for g in guards:
2354 2354 ui.write(g, '\n')
2355 2355 else:
2356 2356 ui.write(_('no active guards\n'))
2357 2357 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2358 2358 popped = False
2359 2359 if opts['pop'] or opts['reapply']:
2360 2360 for i in xrange(len(q.applied)):
2361 2361 pushable, reason = q.pushable(i)
2362 2362 if not pushable:
2363 2363 ui.status(_('popping guarded patches\n'))
2364 2364 popped = True
2365 2365 if i == 0:
2366 2366 q.pop(repo, all=True)
2367 2367 else:
2368 2368 q.pop(repo, i-1)
2369 2369 break
2370 2370 if popped:
2371 2371 try:
2372 2372 if reapply:
2373 2373 ui.status(_('reapplying unguarded patches\n'))
2374 2374 q.push(repo, reapply)
2375 2375 finally:
2376 2376 q.save_dirty()
2377 2377
2378 2378 def finish(ui, repo, *revrange, **opts):
2379 2379 """move applied patches into repository history
2380 2380
2381 2381 Finishes the specified revisions (corresponding to applied
2382 2382 patches) by moving them out of mq control into regular repository
2383 2383 history.
2384 2384
2385 2385 Accepts a revision range or the -a/--applied option. If --applied
2386 2386 is specified, all applied mq revisions are removed from mq
2387 2387 control. Otherwise, the given revisions must be at the base of the
2388 2388 stack of applied patches.
2389 2389
2390 2390 This can be especially useful if your changes have been applied to
2391 2391 an upstream repository, or if you are about to push your changes
2392 2392 to upstream.
2393 2393 """
2394 2394 if not opts['applied'] and not revrange:
2395 2395 raise util.Abort(_('no revisions specified'))
2396 2396 elif opts['applied']:
2397 2397 revrange = ('qbase:qtip',) + revrange
2398 2398
2399 2399 q = repo.mq
2400 2400 if not q.applied:
2401 2401 ui.status(_('no patches applied\n'))
2402 2402 return 0
2403 2403
2404 2404 revs = cmdutil.revrange(repo, revrange)
2405 2405 q.finish(repo, revs)
2406 2406 q.save_dirty()
2407 2407 return 0
2408 2408
2409 2409 def reposetup(ui, repo):
2410 2410 class mqrepo(repo.__class__):
2411 2411 @util.propertycache
2412 2412 def mq(self):
2413 2413 return queue(self.ui, self.join(""))
2414 2414
2415 2415 def abort_if_wdir_patched(self, errmsg, force=False):
2416 2416 if self.mq.applied and not force:
2417 2417 parent = hex(self.dirstate.parents()[0])
2418 2418 if parent in [s.rev for s in self.mq.applied]:
2419 2419 raise util.Abort(errmsg)
2420 2420
2421 2421 def commit(self, *args, **opts):
2422 2422 if len(args) >= 6:
2423 2423 force = args[5]
2424 2424 else:
2425 2425 force = opts.get('force')
2426 2426 self.abort_if_wdir_patched(
2427 2427 _('cannot commit over an applied mq patch'),
2428 2428 force)
2429 2429
2430 2430 return super(mqrepo, self).commit(*args, **opts)
2431 2431
2432 2432 def push(self, remote, force=False, revs=None):
2433 2433 if self.mq.applied and not force and not revs:
2434 2434 raise util.Abort(_('source has mq patches applied'))
2435 2435 return super(mqrepo, self).push(remote, force, revs)
2436 2436
2437 2437 def tags(self):
2438 2438 if self.tagscache:
2439 2439 return self.tagscache
2440 2440
2441 2441 tagscache = super(mqrepo, self).tags()
2442 2442
2443 2443 q = self.mq
2444 2444 if not q.applied:
2445 2445 return tagscache
2446 2446
2447 2447 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2448 2448
2449 2449 if mqtags[-1][0] not in self.changelog.nodemap:
2450 2450 self.ui.warn(_('mq status file refers to unknown node %s\n')
2451 2451 % short(mqtags[-1][0]))
2452 2452 return tagscache
2453 2453
2454 2454 mqtags.append((mqtags[-1][0], 'qtip'))
2455 2455 mqtags.append((mqtags[0][0], 'qbase'))
2456 2456 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2457 2457 for patch in mqtags:
2458 2458 if patch[1] in tagscache:
2459 2459 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2460 2460 % patch[1])
2461 2461 else:
2462 2462 tagscache[patch[1]] = patch[0]
2463 2463
2464 2464 return tagscache
2465 2465
2466 2466 def _branchtags(self, partial, lrev):
2467 2467 q = self.mq
2468 2468 if not q.applied:
2469 2469 return super(mqrepo, self)._branchtags(partial, lrev)
2470 2470
2471 2471 cl = self.changelog
2472 2472 qbasenode = bin(q.applied[0].rev)
2473 2473 if qbasenode not in cl.nodemap:
2474 2474 self.ui.warn(_('mq status file refers to unknown node %s\n')
2475 2475 % short(qbasenode))
2476 2476 return super(mqrepo, self)._branchtags(partial, lrev)
2477 2477
2478 2478 qbase = cl.rev(qbasenode)
2479 2479 start = lrev + 1
2480 2480 if start < qbase:
2481 2481 # update the cache (excluding the patches) and save it
2482 2482 self._updatebranchcache(partial, lrev+1, qbase)
2483 2483 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2484 2484 start = qbase
2485 2485 # if start = qbase, the cache is as updated as it should be.
2486 2486 # if start > qbase, the cache includes (part of) the patches.
2487 2487 # we might as well use it, but we won't save it.
2488 2488
2489 2489 # update the cache up to the tip
2490 2490 self._updatebranchcache(partial, start, len(cl))
2491 2491
2492 2492 return partial
2493 2493
2494 2494 if repo.local():
2495 2495 repo.__class__ = mqrepo
2496 2496
2497 2497 def mqimport(orig, ui, repo, *args, **kwargs):
2498 2498 if hasattr(repo, 'abort_if_wdir_patched'):
2499 2499 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2500 2500 kwargs.get('force'))
2501 2501 return orig(ui, repo, *args, **kwargs)
2502 2502
2503 2503 def uisetup(ui):
2504 2504 extensions.wrapcommand(commands.table, 'import', mqimport)
2505 2505
2506 2506 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2507 2507
2508 2508 cmdtable = {
2509 2509 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2510 2510 "qclone":
2511 2511 (clone,
2512 2512 [('', 'pull', None, _('use pull protocol to copy metadata')),
2513 2513 ('U', 'noupdate', None, _('do not update the new working directories')),
2514 2514 ('', 'uncompressed', None,
2515 2515 _('use uncompressed transfer (fast over LAN)')),
2516 2516 ('p', 'patches', '', _('location of source patch repository')),
2517 2517 ] + commands.remoteopts,
2518 2518 _('hg qclone [OPTION]... SOURCE [DEST]')),
2519 2519 "qcommit|qci":
2520 2520 (commit,
2521 2521 commands.table["^commit|ci"][1],
2522 2522 _('hg qcommit [OPTION]... [FILE]...')),
2523 2523 "^qdiff":
2524 2524 (diff,
2525 2525 commands.diffopts + commands.diffopts2 + commands.walkopts,
2526 2526 _('hg qdiff [OPTION]... [FILE]...')),
2527 2527 "qdelete|qremove|qrm":
2528 2528 (delete,
2529 2529 [('k', 'keep', None, _('keep patch file')),
2530 2530 ('r', 'rev', [], _('stop managing a revision'))],
2531 2531 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2532 2532 'qfold':
2533 2533 (fold,
2534 2534 [('e', 'edit', None, _('edit patch header')),
2535 2535 ('k', 'keep', None, _('keep folded patch files')),
2536 2536 ] + commands.commitopts,
2537 2537 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2538 2538 'qgoto':
2539 2539 (goto,
2540 2540 [('f', 'force', None, _('overwrite any local changes'))],
2541 2541 _('hg qgoto [OPTION]... PATCH')),
2542 2542 'qguard':
2543 2543 (guard,
2544 2544 [('l', 'list', None, _('list all patches and guards')),
2545 2545 ('n', 'none', None, _('drop all guards'))],
2546 2546 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2547 2547 'qheader': (header, [], _('hg qheader [PATCH]')),
2548 2548 "^qimport":
2549 2549 (qimport,
2550 2550 [('e', 'existing', None, _('import file in patch directory')),
2551 2551 ('n', 'name', '', _('patch file name')),
2552 2552 ('f', 'force', None, _('overwrite existing files')),
2553 2553 ('r', 'rev', [], _('place existing revisions under mq control')),
2554 2554 ('g', 'git', None, _('use git extended diff format')),
2555 2555 ('P', 'push', None, _('qpush after importing'))],
2556 2556 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2557 2557 "^qinit":
2558 2558 (init,
2559 2559 [('c', 'create-repo', None, _('create queue repository'))],
2560 2560 _('hg qinit [-c]')),
2561 2561 "qnew":
2562 2562 (new,
2563 2563 [('e', 'edit', None, _('edit commit message')),
2564 2564 ('f', 'force', None, _('import uncommitted changes into patch')),
2565 2565 ('g', 'git', None, _('use git extended diff format')),
2566 2566 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2567 2567 ('u', 'user', '', _('add "From: <given user>" to patch')),
2568 2568 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2569 2569 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2570 2570 ] + commands.walkopts + commands.commitopts,
2571 2571 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2572 2572 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2573 2573 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2574 2574 "^qpop":
2575 2575 (pop,
2576 2576 [('a', 'all', None, _('pop all patches')),
2577 2577 ('n', 'name', '', _('queue name to pop')),
2578 2578 ('f', 'force', None, _('forget any local changes'))],
2579 2579 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2580 2580 "^qpush":
2581 2581 (push,
2582 2582 [('f', 'force', None, _('apply if the patch has rejects')),
2583 2583 ('l', 'list', None, _('list patch name in commit text')),
2584 2584 ('a', 'all', None, _('apply all patches')),
2585 2585 ('m', 'merge', None, _('merge from another queue')),
2586 2586 ('n', 'name', '', _('merge queue name'))],
2587 2587 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2588 2588 "^qrefresh":
2589 2589 (refresh,
2590 2590 [('e', 'edit', None, _('edit commit message')),
2591 2591 ('g', 'git', None, _('use git extended diff format')),
2592 2592 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2593 2593 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2594 2594 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2595 2595 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2596 2596 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2597 2597 ] + commands.walkopts + commands.commitopts,
2598 2598 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2599 2599 'qrename|qmv':
2600 2600 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2601 2601 "qrestore":
2602 2602 (restore,
2603 2603 [('d', 'delete', None, _('delete save entry')),
2604 2604 ('u', 'update', None, _('update queue working directory'))],
2605 2605 _('hg qrestore [-d] [-u] REV')),
2606 2606 "qsave":
2607 2607 (save,
2608 2608 [('c', 'copy', None, _('copy patch directory')),
2609 2609 ('n', 'name', '', _('copy directory name')),
2610 2610 ('e', 'empty', None, _('clear queue status file')),
2611 2611 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2612 2612 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2613 2613 "qselect":
2614 2614 (select,
2615 2615 [('n', 'none', None, _('disable all guards')),
2616 2616 ('s', 'series', None, _('list all guards in series file')),
2617 2617 ('', 'pop', None, _('pop to before first guarded applied patch')),
2618 2618 ('', 'reapply', None, _('pop, then reapply patches'))],
2619 2619 _('hg qselect [OPTION]... [GUARD]...')),
2620 2620 "qseries":
2621 2621 (series,
2622 2622 [('m', 'missing', None, _('print patches not in series')),
2623 2623 ] + seriesopts,
2624 2624 _('hg qseries [-ms]')),
2625 2625 "^strip":
2626 2626 (strip,
2627 2627 [('f', 'force', None, _('force removal with local changes')),
2628 2628 ('b', 'backup', None, _('bundle unrelated changesets')),
2629 2629 ('n', 'nobackup', None, _('no backups'))],
2630 2630 _('hg strip [-f] [-b] [-n] REV')),
2631 2631 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2632 2632 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2633 2633 "qfinish":
2634 2634 (finish,
2635 2635 [('a', 'applied', None, _('finish all applied changesets'))],
2636 2636 _('hg qfinish [-a] [REV...]')),
2637 2637 }
@@ -1,145 +1,145
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2, incorporated herein by reference.
8 8
9 9 import changegroup
10 10 from node import nullrev, short
11 11 from i18n import _
12 12 import os
13 13
14 14 def _bundle(repo, bases, heads, node, suffix, extranodes=None):
15 15 """create a bundle with the specified revisions as a backup"""
16 16 cg = repo.changegroupsubset(bases, heads, 'strip', extranodes)
17 17 backupdir = repo.join("strip-backup")
18 18 if not os.path.isdir(backupdir):
19 19 os.mkdir(backupdir)
20 20 name = os.path.join(backupdir, "%s-%s" % (short(node), suffix))
21 21 repo.ui.warn(_("saving bundle to %s\n") % name)
22 22 return changegroup.writebundle(cg, name, "HG10BZ")
23 23
24 24 def _collectfiles(repo, striprev):
25 25 """find out the filelogs affected by the strip"""
26 26 files = set()
27 27
28 28 for x in xrange(striprev, len(repo)):
29 29 files.update(repo[x].files())
30 30
31 31 return sorted(files)
32 32
33 33 def _collectextranodes(repo, files, link):
34 34 """return the nodes that have to be saved before the strip"""
35 35 def collectone(revlog):
36 36 extra = []
37 37 startrev = count = len(revlog)
38 38 # find the truncation point of the revlog
39 for i in xrange(0, count):
39 for i in xrange(count):
40 40 lrev = revlog.linkrev(i)
41 41 if lrev >= link:
42 42 startrev = i + 1
43 43 break
44 44
45 45 # see if any revision after that point has a linkrev less than link
46 46 # (we have to manually save these guys)
47 47 for i in xrange(startrev, count):
48 48 node = revlog.node(i)
49 49 lrev = revlog.linkrev(i)
50 50 if lrev < link:
51 51 extra.append((node, cl.node(lrev)))
52 52
53 53 return extra
54 54
55 55 extranodes = {}
56 56 cl = repo.changelog
57 57 extra = collectone(repo.manifest)
58 58 if extra:
59 59 extranodes[1] = extra
60 60 for fname in files:
61 61 f = repo.file(fname)
62 62 extra = collectone(f)
63 63 if extra:
64 64 extranodes[fname] = extra
65 65
66 66 return extranodes
67 67
68 68 def strip(ui, repo, node, backup="all"):
69 69 cl = repo.changelog
70 70 # TODO delete the undo files, and handle undo of merge sets
71 71 striprev = cl.rev(node)
72 72
73 73 # Some revisions with rev > striprev may not be descendants of striprev.
74 74 # We have to find these revisions and put them in a bundle, so that
75 75 # we can restore them after the truncations.
76 76 # To create the bundle we use repo.changegroupsubset which requires
77 77 # the list of heads and bases of the set of interesting revisions.
78 78 # (head = revision in the set that has no descendant in the set;
79 79 # base = revision in the set that has no ancestor in the set)
80 80 tostrip = set((striprev,))
81 81 saveheads = set()
82 82 savebases = []
83 83 for r in xrange(striprev + 1, len(cl)):
84 84 parents = cl.parentrevs(r)
85 85 if parents[0] in tostrip or parents[1] in tostrip:
86 86 # r is a descendant of striprev
87 87 tostrip.add(r)
88 88 # if this is a merge and one of the parents does not descend
89 89 # from striprev, mark that parent as a savehead.
90 90 if parents[1] != nullrev:
91 91 for p in parents:
92 92 if p not in tostrip and p > striprev:
93 93 saveheads.add(p)
94 94 else:
95 95 # if no parents of this revision will be stripped, mark it as
96 96 # a savebase
97 97 if parents[0] < striprev and parents[1] < striprev:
98 98 savebases.append(cl.node(r))
99 99
100 100 saveheads.difference_update(parents)
101 101 saveheads.add(r)
102 102
103 103 saveheads = [cl.node(r) for r in saveheads]
104 104 files = _collectfiles(repo, striprev)
105 105
106 106 extranodes = _collectextranodes(repo, files, striprev)
107 107
108 108 # create a changegroup for all the branches we need to keep
109 109 if backup == "all":
110 110 _bundle(repo, [node], cl.heads(), node, 'backup')
111 111 if saveheads or extranodes:
112 112 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
113 113 extranodes)
114 114
115 115 fs = [repo.file(name) for name in files]
116 116 mfst = repo.manifest
117 117
118 118 tr = repo.transaction()
119 119 offset = len(tr.entries)
120 120
121 121 tr.startgroup()
122 122 cl.strip(striprev, tr)
123 123 mfst.strip(striprev, tr)
124 124 for f in fs:
125 125 f.strip(striprev, tr)
126 126 tr.endgroup()
127 127
128 128 try:
129 129 for i in xrange(offset, len(tr.entries)):
130 130 file, troffset, ignore = tr.entries[i]
131 131 repo.sopener(file, 'a').truncate(troffset)
132 132 tr.close()
133 133 except:
134 134 tr.abort()
135 135 raise
136 136
137 137 if saveheads or extranodes:
138 138 ui.status(_("adding branch\n"))
139 139 f = open(chgrpfile, "rb")
140 140 gen = changegroup.readbundle(f, chgrpfile)
141 141 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
142 142 f.close()
143 143 if backup != "strip":
144 144 os.unlink(chgrpfile)
145 145
@@ -1,1390 +1,1390
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2, incorporated herein by reference.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 # import stuff from node for others to import from revlog
15 15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 16 from i18n import _
17 17 import changegroup, ancestor, mdiff, parsers, error, util
18 18 import struct, zlib, errno
19 19
20 20 _pack = struct.pack
21 21 _unpack = struct.unpack
22 22 _compress = zlib.compress
23 23 _decompress = zlib.decompress
24 24 _sha = util.sha1
25 25
26 26 # revlog flags
27 27 REVLOGV0 = 0
28 28 REVLOGNG = 1
29 29 REVLOGNGINLINEDATA = (1 << 16)
30 30 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 31 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 32 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 33
34 34 _prereadsize = 1048576
35 35
36 36 RevlogError = error.RevlogError
37 37 LookupError = error.LookupError
38 38
39 39 def getoffset(q):
40 40 return int(q >> 16)
41 41
42 42 def gettype(q):
43 43 return int(q & 0xFFFF)
44 44
45 45 def offset_type(offset, type):
46 46 return long(long(offset) << 16 | type)
47 47
48 48 nullhash = _sha(nullid)
49 49
50 50 def hash(text, p1, p2):
51 51 """generate a hash from the given text and its parent hashes
52 52
53 53 This hash combines both the current file contents and its history
54 54 in a manner that makes it easy to distinguish nodes with the same
55 55 content in the revision graph.
56 56 """
57 57 # As of now, if one of the parent node is null, p2 is null
58 58 if p2 == nullid:
59 59 # deep copy of a hash is faster than creating one
60 60 s = nullhash.copy()
61 61 s.update(p1)
62 62 else:
63 63 # none of the parent nodes are nullid
64 64 l = [p1, p2]
65 65 l.sort()
66 66 s = _sha(l[0])
67 67 s.update(l[1])
68 68 s.update(text)
69 69 return s.digest()
70 70
71 71 def compress(text):
72 72 """ generate a possibly-compressed representation of text """
73 73 if not text:
74 74 return ("", text)
75 75 l = len(text)
76 76 bin = None
77 77 if l < 44:
78 78 pass
79 79 elif l > 1000000:
80 80 # zlib makes an internal copy, thus doubling memory usage for
81 81 # large files, so lets do this in pieces
82 82 z = zlib.compressobj()
83 83 p = []
84 84 pos = 0
85 85 while pos < l:
86 86 pos2 = pos + 2**20
87 87 p.append(z.compress(text[pos:pos2]))
88 88 pos = pos2
89 89 p.append(z.flush())
90 90 if sum(map(len, p)) < l:
91 91 bin = "".join(p)
92 92 else:
93 93 bin = _compress(text)
94 94 if bin is None or len(bin) > l:
95 95 if text[0] == '\0':
96 96 return ("", text)
97 97 return ('u', text)
98 98 return ("", bin)
99 99
100 100 def decompress(bin):
101 101 """ decompress the given input """
102 102 if not bin:
103 103 return bin
104 104 t = bin[0]
105 105 if t == '\0':
106 106 return bin
107 107 if t == 'x':
108 108 return _decompress(bin)
109 109 if t == 'u':
110 110 return bin[1:]
111 111 raise RevlogError(_("unknown compression type %r") % t)
112 112
113 113 class lazyparser(object):
114 114 """
115 115 this class avoids the need to parse the entirety of large indices
116 116 """
117 117
118 118 # lazyparser is not safe to use on windows if win32 extensions not
119 119 # available. it keeps file handle open, which make it not possible
120 120 # to break hardlinks on local cloned repos.
121 121
122 122 def __init__(self, dataf, size):
123 123 self.dataf = dataf
124 124 self.s = struct.calcsize(indexformatng)
125 125 self.datasize = size
126 126 self.l = size/self.s
127 127 self.index = [None] * self.l
128 128 self.map = {nullid: nullrev}
129 129 self.allmap = 0
130 130 self.all = 0
131 131 self.mapfind_count = 0
132 132
133 133 def loadmap(self):
134 134 """
135 135 during a commit, we need to make sure the rev being added is
136 136 not a duplicate. This requires loading the entire index,
137 137 which is fairly slow. loadmap can load up just the node map,
138 138 which takes much less time.
139 139 """
140 140 if self.allmap:
141 141 return
142 142 end = self.datasize
143 143 self.allmap = 1
144 144 cur = 0
145 145 count = 0
146 146 blocksize = self.s * 256
147 147 self.dataf.seek(0)
148 148 while cur < end:
149 149 data = self.dataf.read(blocksize)
150 150 off = 0
151 151 for x in xrange(256):
152 152 n = data[off + ngshaoffset:off + ngshaoffset + 20]
153 153 self.map[n] = count
154 154 count += 1
155 155 if count >= self.l:
156 156 break
157 157 off += self.s
158 158 cur += blocksize
159 159
160 160 def loadblock(self, blockstart, blocksize, data=None):
161 161 if self.all:
162 162 return
163 163 if data is None:
164 164 self.dataf.seek(blockstart)
165 165 if blockstart + blocksize > self.datasize:
166 166 # the revlog may have grown since we've started running,
167 167 # but we don't have space in self.index for more entries.
168 168 # limit blocksize so that we don't get too much data.
169 169 blocksize = max(self.datasize - blockstart, 0)
170 170 data = self.dataf.read(blocksize)
171 171 lend = len(data) / self.s
172 172 i = blockstart / self.s
173 173 off = 0
174 174 # lazyindex supports __delitem__
175 175 if lend > len(self.index) - i:
176 176 lend = len(self.index) - i
177 177 for x in xrange(lend):
178 178 if self.index[i + x] is None:
179 179 b = data[off : off + self.s]
180 180 self.index[i + x] = b
181 181 n = b[ngshaoffset:ngshaoffset + 20]
182 182 self.map[n] = i + x
183 183 off += self.s
184 184
185 185 def findnode(self, node):
186 186 """search backwards through the index file for a specific node"""
187 187 if self.allmap:
188 188 return None
189 189
190 190 # hg log will cause many many searches for the manifest
191 191 # nodes. After we get called a few times, just load the whole
192 192 # thing.
193 193 if self.mapfind_count > 8:
194 194 self.loadmap()
195 195 if node in self.map:
196 196 return node
197 197 return None
198 198 self.mapfind_count += 1
199 199 last = self.l - 1
200 200 while self.index[last] != None:
201 201 if last == 0:
202 202 self.all = 1
203 203 self.allmap = 1
204 204 return None
205 205 last -= 1
206 206 end = (last + 1) * self.s
207 207 blocksize = self.s * 256
208 208 while end >= 0:
209 209 start = max(end - blocksize, 0)
210 210 self.dataf.seek(start)
211 211 data = self.dataf.read(end - start)
212 212 findend = end - start
213 213 while True:
214 214 # we're searching backwards, so we have to make sure
215 215 # we don't find a changeset where this node is a parent
216 216 off = data.find(node, 0, findend)
217 217 findend = off
218 218 if off >= 0:
219 219 i = off / self.s
220 220 off = i * self.s
221 221 n = data[off + ngshaoffset:off + ngshaoffset + 20]
222 222 if n == node:
223 223 self.map[n] = i + start / self.s
224 224 return node
225 225 else:
226 226 break
227 227 end -= blocksize
228 228 return None
229 229
230 230 def loadindex(self, i=None, end=None):
231 231 if self.all:
232 232 return
233 233 all = False
234 234 if i is None:
235 235 blockstart = 0
236 236 blocksize = (65536 / self.s) * self.s
237 237 end = self.datasize
238 238 all = True
239 239 else:
240 240 if end:
241 241 blockstart = i * self.s
242 242 end = end * self.s
243 243 blocksize = end - blockstart
244 244 else:
245 245 blockstart = (i & ~1023) * self.s
246 246 blocksize = self.s * 1024
247 247 end = blockstart + blocksize
248 248 while blockstart < end:
249 249 self.loadblock(blockstart, blocksize)
250 250 blockstart += blocksize
251 251 if all:
252 252 self.all = True
253 253
254 254 class lazyindex(object):
255 255 """a lazy version of the index array"""
256 256 def __init__(self, parser):
257 257 self.p = parser
258 258 def __len__(self):
259 259 return len(self.p.index)
260 260 def load(self, pos):
261 261 if pos < 0:
262 262 pos += len(self.p.index)
263 263 self.p.loadindex(pos)
264 264 return self.p.index[pos]
265 265 def __getitem__(self, pos):
266 266 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
267 267 def __setitem__(self, pos, item):
268 268 self.p.index[pos] = _pack(indexformatng, *item)
269 269 def __delitem__(self, pos):
270 270 del self.p.index[pos]
271 271 def insert(self, pos, e):
272 272 self.p.index.insert(pos, _pack(indexformatng, *e))
273 273 def append(self, e):
274 274 self.p.index.append(_pack(indexformatng, *e))
275 275
276 276 class lazymap(object):
277 277 """a lazy version of the node map"""
278 278 def __init__(self, parser):
279 279 self.p = parser
280 280 def load(self, key):
281 281 n = self.p.findnode(key)
282 282 if n is None:
283 283 raise KeyError(key)
284 284 def __contains__(self, key):
285 285 if key in self.p.map:
286 286 return True
287 287 self.p.loadmap()
288 288 return key in self.p.map
289 289 def __iter__(self):
290 290 yield nullid
291 291 for i in xrange(self.p.l):
292 292 ret = self.p.index[i]
293 293 if not ret:
294 294 self.p.loadindex(i)
295 295 ret = self.p.index[i]
296 296 if isinstance(ret, str):
297 297 ret = _unpack(indexformatng, ret)
298 298 yield ret[7]
299 299 def __getitem__(self, key):
300 300 try:
301 301 return self.p.map[key]
302 302 except KeyError:
303 303 try:
304 304 self.load(key)
305 305 return self.p.map[key]
306 306 except KeyError:
307 307 raise KeyError("node " + hex(key))
308 308 def __setitem__(self, key, val):
309 309 self.p.map[key] = val
310 310 def __delitem__(self, key):
311 311 del self.p.map[key]
312 312
313 313 indexformatv0 = ">4l20s20s20s"
314 314 v0shaoffset = 56
315 315
316 316 class revlogoldio(object):
317 317 def __init__(self):
318 318 self.size = struct.calcsize(indexformatv0)
319 319
320 320 def parseindex(self, fp, data, inline):
321 321 s = self.size
322 322 index = []
323 323 nodemap = {nullid: nullrev}
324 324 n = off = 0
325 325 if len(data) == _prereadsize:
326 326 data += fp.read() # read the rest
327 327 l = len(data)
328 328 while off + s <= l:
329 329 cur = data[off:off + s]
330 330 off += s
331 331 e = _unpack(indexformatv0, cur)
332 332 # transform to revlogv1 format
333 333 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
334 334 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
335 335 index.append(e2)
336 336 nodemap[e[6]] = n
337 337 n += 1
338 338
339 339 return index, nodemap, None
340 340
341 341 def packentry(self, entry, node, version, rev):
342 342 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
343 343 node(entry[5]), node(entry[6]), entry[7])
344 344 return _pack(indexformatv0, *e2)
345 345
346 346 # index ng:
347 347 # 6 bytes offset
348 348 # 2 bytes flags
349 349 # 4 bytes compressed length
350 350 # 4 bytes uncompressed length
351 351 # 4 bytes: base rev
352 352 # 4 bytes link rev
353 353 # 4 bytes parent 1 rev
354 354 # 4 bytes parent 2 rev
355 355 # 32 bytes: nodeid
356 356 indexformatng = ">Qiiiiii20s12x"
357 357 ngshaoffset = 32
358 358 versionformat = ">I"
359 359
360 360 class revlogio(object):
361 361 def __init__(self):
362 362 self.size = struct.calcsize(indexformatng)
363 363
364 364 def parseindex(self, fp, data, inline):
365 365 size = len(data)
366 366 if size == _prereadsize:
367 367 if util.openhardlinks() and not inline:
368 368 try:
369 369 size = util.fstat(fp).st_size
370 370 except AttributeError:
371 371 size = 0
372 372 # big index, let's parse it on demand
373 373 parser = lazyparser(fp, size)
374 374 index = lazyindex(parser)
375 375 nodemap = lazymap(parser)
376 376 e = list(index[0])
377 377 type = gettype(e[0])
378 378 e[0] = offset_type(0, type)
379 379 index[0] = e
380 380 return index, nodemap, None
381 381 else:
382 382 data += fp.read()
383 383
384 384 # call the C implementation to parse the index data
385 385 index, nodemap, cache = parsers.parse_index(data, inline)
386 386 return index, nodemap, cache
387 387
388 388 def packentry(self, entry, node, version, rev):
389 389 p = _pack(indexformatng, *entry)
390 390 if rev == 0:
391 391 p = _pack(versionformat, version) + p[4:]
392 392 return p
393 393
394 394 class revlog(object):
395 395 """
396 396 the underlying revision storage object
397 397
398 398 A revlog consists of two parts, an index and the revision data.
399 399
400 400 The index is a file with a fixed record size containing
401 401 information on each revision, including its nodeid (hash), the
402 402 nodeids of its parents, the position and offset of its data within
403 403 the data file, and the revision it's based on. Finally, each entry
404 404 contains a linkrev entry that can serve as a pointer to external
405 405 data.
406 406
407 407 The revision data itself is a linear collection of data chunks.
408 408 Each chunk represents a revision and is usually represented as a
409 409 delta against the previous chunk. To bound lookup time, runs of
410 410 deltas are limited to about 2 times the length of the original
411 411 version data. This makes retrieval of a version proportional to
412 412 its size, or O(1) relative to the number of revisions.
413 413
414 414 Both pieces of the revlog are written to in an append-only
415 415 fashion, which means we never need to rewrite a file to insert or
416 416 remove data, and can use some simple techniques to avoid the need
417 417 for locking while reading.
418 418 """
419 419 def __init__(self, opener, indexfile):
420 420 """
421 421 create a revlog object
422 422
423 423 opener is a function that abstracts the file opening operation
424 424 and can be used to implement COW semantics or the like.
425 425 """
426 426 self.indexfile = indexfile
427 427 self.datafile = indexfile[:-2] + ".d"
428 428 self.opener = opener
429 429 self._cache = None
430 430 self._chunkcache = (0, '')
431 431 self.nodemap = {nullid: nullrev}
432 432 self.index = []
433 433
434 434 v = REVLOG_DEFAULT_VERSION
435 435 if hasattr(opener, "defversion"):
436 436 v = opener.defversion
437 437 if v & REVLOGNG:
438 438 v |= REVLOGNGINLINEDATA
439 439
440 440 i = ''
441 441 try:
442 442 f = self.opener(self.indexfile)
443 443 i = f.read(_prereadsize)
444 444 if len(i) > 0:
445 445 v = struct.unpack(versionformat, i[:4])[0]
446 446 except IOError, inst:
447 447 if inst.errno != errno.ENOENT:
448 448 raise
449 449
450 450 self.version = v
451 451 self._inline = v & REVLOGNGINLINEDATA
452 452 flags = v & ~0xFFFF
453 453 fmt = v & 0xFFFF
454 454 if fmt == REVLOGV0 and flags:
455 455 raise RevlogError(_("index %s unknown flags %#04x for format v0")
456 456 % (self.indexfile, flags >> 16))
457 457 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
458 458 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
459 459 % (self.indexfile, flags >> 16))
460 460 elif fmt > REVLOGNG:
461 461 raise RevlogError(_("index %s unknown format %d")
462 462 % (self.indexfile, fmt))
463 463
464 464 self._io = revlogio()
465 465 if self.version == REVLOGV0:
466 466 self._io = revlogoldio()
467 467 if i:
468 468 try:
469 469 d = self._io.parseindex(f, i, self._inline)
470 470 except (ValueError, IndexError), e:
471 471 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
472 472 self.index, self.nodemap, self._chunkcache = d
473 473 if not self._chunkcache:
474 474 self._chunkcache = (0, '')
475 475
476 476 # add the magic null revision at -1 (if it hasn't been done already)
477 477 if (self.index == [] or isinstance(self.index, lazyindex) or
478 478 self.index[-1][7] != nullid) :
479 479 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
480 480
481 481 def _loadindex(self, start, end):
482 482 """load a block of indexes all at once from the lazy parser"""
483 483 if isinstance(self.index, lazyindex):
484 484 self.index.p.loadindex(start, end)
485 485
486 486 def _loadindexmap(self):
487 487 """loads both the map and the index from the lazy parser"""
488 488 if isinstance(self.index, lazyindex):
489 489 p = self.index.p
490 490 p.loadindex()
491 491 self.nodemap = p.map
492 492
493 493 def _loadmap(self):
494 494 """loads the map from the lazy parser"""
495 495 if isinstance(self.nodemap, lazymap):
496 496 self.nodemap.p.loadmap()
497 497 self.nodemap = self.nodemap.p.map
498 498
499 499 def tip(self):
500 500 return self.node(len(self.index) - 2)
501 501 def __len__(self):
502 502 return len(self.index) - 1
503 503 def __iter__(self):
504 504 for i in xrange(len(self)):
505 505 yield i
506 506 def rev(self, node):
507 507 try:
508 508 return self.nodemap[node]
509 509 except KeyError:
510 510 raise LookupError(node, self.indexfile, _('no node'))
511 511 def node(self, rev):
512 512 return self.index[rev][7]
513 513 def linkrev(self, rev):
514 514 return self.index[rev][4]
515 515 def parents(self, node):
516 516 i = self.index
517 517 d = i[self.rev(node)]
518 518 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
519 519 def parentrevs(self, rev):
520 520 return self.index[rev][5:7]
521 521 def start(self, rev):
522 522 return int(self.index[rev][0] >> 16)
523 523 def end(self, rev):
524 524 return self.start(rev) + self.length(rev)
525 525 def length(self, rev):
526 526 return self.index[rev][1]
527 527 def base(self, rev):
528 528 return self.index[rev][3]
529 529
530 530 def size(self, rev):
531 531 """return the length of the uncompressed text for a given revision"""
532 532 l = self.index[rev][2]
533 533 if l >= 0:
534 534 return l
535 535
536 536 t = self.revision(self.node(rev))
537 537 return len(t)
538 538
539 539 # alternate implementation, The advantage to this code is it
540 540 # will be faster for a single revision. But, the results are not
541 541 # cached, so finding the size of every revision will be slower.
542 542 """
543 543 if self.cache and self.cache[1] == rev:
544 544 return len(self.cache[2])
545 545
546 546 base = self.base(rev)
547 547 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
548 548 base = self.cache[1]
549 549 text = self.cache[2]
550 550 else:
551 551 text = self.revision(self.node(base))
552 552
553 553 l = len(text)
554 554 for x in xrange(base + 1, rev + 1):
555 555 l = mdiff.patchedsize(l, self.chunk(x))
556 556 return l
557 557 """
558 558
559 559 def reachable(self, node, stop=None):
560 560 """return the set of all nodes ancestral to a given node, including
561 561 the node itself, stopping when stop is matched"""
562 562 reachable = set((node,))
563 563 visit = [node]
564 564 if stop:
565 565 stopn = self.rev(stop)
566 566 else:
567 567 stopn = 0
568 568 while visit:
569 569 n = visit.pop(0)
570 570 if n == stop:
571 571 continue
572 572 if n == nullid:
573 573 continue
574 574 for p in self.parents(n):
575 575 if self.rev(p) < stopn:
576 576 continue
577 577 if p not in reachable:
578 578 reachable.add(p)
579 579 visit.append(p)
580 580 return reachable
581 581
582 582 def ancestors(self, *revs):
583 583 'Generate the ancestors of revs using a breadth-first visit'
584 584 visit = list(revs)
585 585 seen = set([nullrev])
586 586 while visit:
587 587 for parent in self.parentrevs(visit.pop(0)):
588 588 if parent not in seen:
589 589 visit.append(parent)
590 590 seen.add(parent)
591 591 yield parent
592 592
593 593 def descendants(self, *revs):
594 594 'Generate the descendants of revs in topological order'
595 595 seen = set(revs)
596 596 for i in xrange(min(revs) + 1, len(self)):
597 597 for x in self.parentrevs(i):
598 598 if x != nullrev and x in seen:
599 599 seen.add(i)
600 600 yield i
601 601 break
602 602
603 603 def findmissing(self, common=None, heads=None):
604 604 '''
605 605 returns the topologically sorted list of nodes from the set:
606 606 missing = (ancestors(heads) \ ancestors(common))
607 607
608 608 where ancestors() is the set of ancestors from heads, heads included
609 609
610 610 if heads is None, the heads of the revlog are used
611 611 if common is None, nullid is assumed to be a common node
612 612 '''
613 613 if common is None:
614 614 common = [nullid]
615 615 if heads is None:
616 616 heads = self.heads()
617 617
618 618 common = [self.rev(n) for n in common]
619 619 heads = [self.rev(n) for n in heads]
620 620
621 621 # we want the ancestors, but inclusive
622 622 has = set(self.ancestors(*common))
623 623 has.add(nullrev)
624 624 has.update(common)
625 625
626 626 # take all ancestors from heads that aren't in has
627 627 missing = set()
628 628 visit = [r for r in heads if r not in has]
629 629 while visit:
630 630 r = visit.pop(0)
631 631 if r in missing:
632 632 continue
633 633 else:
634 634 missing.add(r)
635 635 for p in self.parentrevs(r):
636 636 if p not in has:
637 637 visit.append(p)
638 638 missing = list(missing)
639 639 missing.sort()
640 640 return [self.node(r) for r in missing]
641 641
642 642 def nodesbetween(self, roots=None, heads=None):
643 643 """Return a tuple containing three elements. Elements 1 and 2 contain
644 644 a final list bases and heads after all the unreachable ones have been
645 645 pruned. Element 0 contains a topologically sorted list of all
646 646
647 647 nodes that satisfy these constraints:
648 648 1. All nodes must be descended from a node in roots (the nodes on
649 649 roots are considered descended from themselves).
650 650 2. All nodes must also be ancestors of a node in heads (the nodes in
651 651 heads are considered to be their own ancestors).
652 652
653 653 If roots is unspecified, nullid is assumed as the only root.
654 654 If heads is unspecified, it is taken to be the output of the
655 655 heads method (i.e. a list of all nodes in the repository that
656 656 have no children)."""
657 657 nonodes = ([], [], [])
658 658 if roots is not None:
659 659 roots = list(roots)
660 660 if not roots:
661 661 return nonodes
662 662 lowestrev = min([self.rev(n) for n in roots])
663 663 else:
664 664 roots = [nullid] # Everybody's a descendent of nullid
665 665 lowestrev = nullrev
666 666 if (lowestrev == nullrev) and (heads is None):
667 667 # We want _all_ the nodes!
668 668 return ([self.node(r) for r in self], [nullid], list(self.heads()))
669 669 if heads is None:
670 670 # All nodes are ancestors, so the latest ancestor is the last
671 671 # node.
672 672 highestrev = len(self) - 1
673 673 # Set ancestors to None to signal that every node is an ancestor.
674 674 ancestors = None
675 675 # Set heads to an empty dictionary for later discovery of heads
676 676 heads = {}
677 677 else:
678 678 heads = list(heads)
679 679 if not heads:
680 680 return nonodes
681 681 ancestors = set()
682 682 # Turn heads into a dictionary so we can remove 'fake' heads.
683 683 # Also, later we will be using it to filter out the heads we can't
684 684 # find from roots.
685 685 heads = dict.fromkeys(heads, 0)
686 686 # Start at the top and keep marking parents until we're done.
687 687 nodestotag = set(heads)
688 688 # Remember where the top was so we can use it as a limit later.
689 689 highestrev = max([self.rev(n) for n in nodestotag])
690 690 while nodestotag:
691 691 # grab a node to tag
692 692 n = nodestotag.pop()
693 693 # Never tag nullid
694 694 if n == nullid:
695 695 continue
696 696 # A node's revision number represents its place in a
697 697 # topologically sorted list of nodes.
698 698 r = self.rev(n)
699 699 if r >= lowestrev:
700 700 if n not in ancestors:
701 701 # If we are possibly a descendent of one of the roots
702 702 # and we haven't already been marked as an ancestor
703 703 ancestors.add(n) # Mark as ancestor
704 704 # Add non-nullid parents to list of nodes to tag.
705 705 nodestotag.update([p for p in self.parents(n) if
706 706 p != nullid])
707 707 elif n in heads: # We've seen it before, is it a fake head?
708 708 # So it is, real heads should not be the ancestors of
709 709 # any other heads.
710 710 heads.pop(n)
711 711 if not ancestors:
712 712 return nonodes
713 713 # Now that we have our set of ancestors, we want to remove any
714 714 # roots that are not ancestors.
715 715
716 716 # If one of the roots was nullid, everything is included anyway.
717 717 if lowestrev > nullrev:
718 718 # But, since we weren't, let's recompute the lowest rev to not
719 719 # include roots that aren't ancestors.
720 720
721 721 # Filter out roots that aren't ancestors of heads
722 722 roots = [n for n in roots if n in ancestors]
723 723 # Recompute the lowest revision
724 724 if roots:
725 725 lowestrev = min([self.rev(n) for n in roots])
726 726 else:
727 727 # No more roots? Return empty list
728 728 return nonodes
729 729 else:
730 730 # We are descending from nullid, and don't need to care about
731 731 # any other roots.
732 732 lowestrev = nullrev
733 733 roots = [nullid]
734 734 # Transform our roots list into a set.
735 735 descendents = set(roots)
736 736 # Also, keep the original roots so we can filter out roots that aren't
737 737 # 'real' roots (i.e. are descended from other roots).
738 738 roots = descendents.copy()
739 739 # Our topologically sorted list of output nodes.
740 740 orderedout = []
741 741 # Don't start at nullid since we don't want nullid in our output list,
742 742 # and if nullid shows up in descedents, empty parents will look like
743 743 # they're descendents.
744 744 for r in xrange(max(lowestrev, 0), highestrev + 1):
745 745 n = self.node(r)
746 746 isdescendent = False
747 747 if lowestrev == nullrev: # Everybody is a descendent of nullid
748 748 isdescendent = True
749 749 elif n in descendents:
750 750 # n is already a descendent
751 751 isdescendent = True
752 752 # This check only needs to be done here because all the roots
753 753 # will start being marked is descendents before the loop.
754 754 if n in roots:
755 755 # If n was a root, check if it's a 'real' root.
756 756 p = tuple(self.parents(n))
757 757 # If any of its parents are descendents, it's not a root.
758 758 if (p[0] in descendents) or (p[1] in descendents):
759 759 roots.remove(n)
760 760 else:
761 761 p = tuple(self.parents(n))
762 762 # A node is a descendent if either of its parents are
763 763 # descendents. (We seeded the dependents list with the roots
764 764 # up there, remember?)
765 765 if (p[0] in descendents) or (p[1] in descendents):
766 766 descendents.add(n)
767 767 isdescendent = True
768 768 if isdescendent and ((ancestors is None) or (n in ancestors)):
769 769 # Only include nodes that are both descendents and ancestors.
770 770 orderedout.append(n)
771 771 if (ancestors is not None) and (n in heads):
772 772 # We're trying to figure out which heads are reachable
773 773 # from roots.
774 774 # Mark this head as having been reached
775 775 heads[n] = 1
776 776 elif ancestors is None:
777 777 # Otherwise, we're trying to discover the heads.
778 778 # Assume this is a head because if it isn't, the next step
779 779 # will eventually remove it.
780 780 heads[n] = 1
781 781 # But, obviously its parents aren't.
782 782 for p in self.parents(n):
783 783 heads.pop(p, None)
784 784 heads = [n for n in heads.iterkeys() if heads[n] != 0]
785 785 roots = list(roots)
786 786 assert orderedout
787 787 assert roots
788 788 assert heads
789 789 return (orderedout, roots, heads)
790 790
791 791 def heads(self, start=None, stop=None):
792 792 """return the list of all nodes that have no children
793 793
794 794 if start is specified, only heads that are descendants of
795 795 start will be returned
796 796 if stop is specified, it will consider all the revs from stop
797 797 as if they had no children
798 798 """
799 799 if start is None and stop is None:
800 800 count = len(self)
801 801 if not count:
802 802 return [nullid]
803 803 ishead = [1] * (count + 1)
804 804 index = self.index
805 805 for r in xrange(count):
806 806 e = index[r]
807 807 ishead[e[5]] = ishead[e[6]] = 0
808 808 return [self.node(r) for r in xrange(count) if ishead[r]]
809 809
810 810 if start is None:
811 811 start = nullid
812 812 if stop is None:
813 813 stop = []
814 814 stoprevs = set([self.rev(n) for n in stop])
815 815 startrev = self.rev(start)
816 816 reachable = set((startrev,))
817 817 heads = set((startrev,))
818 818
819 819 parentrevs = self.parentrevs
820 820 for r in xrange(startrev + 1, len(self)):
821 821 for p in parentrevs(r):
822 822 if p in reachable:
823 823 if r not in stoprevs:
824 824 reachable.add(r)
825 825 heads.add(r)
826 826 if p in heads and p not in stoprevs:
827 827 heads.remove(p)
828 828
829 829 return [self.node(r) for r in heads]
830 830
831 831 def children(self, node):
832 832 """find the children of a given node"""
833 833 c = []
834 834 p = self.rev(node)
835 835 for r in range(p + 1, len(self)):
836 836 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
837 837 if prevs:
838 838 for pr in prevs:
839 839 if pr == p:
840 840 c.append(self.node(r))
841 841 elif p == nullrev:
842 842 c.append(self.node(r))
843 843 return c
844 844
845 845 def _match(self, id):
846 846 if isinstance(id, (long, int)):
847 847 # rev
848 848 return self.node(id)
849 849 if len(id) == 20:
850 850 # possibly a binary node
851 851 # odds of a binary node being all hex in ASCII are 1 in 10**25
852 852 try:
853 853 node = id
854 854 self.rev(node) # quick search the index
855 855 return node
856 856 except LookupError:
857 857 pass # may be partial hex id
858 858 try:
859 859 # str(rev)
860 860 rev = int(id)
861 861 if str(rev) != id:
862 862 raise ValueError
863 863 if rev < 0:
864 864 rev = len(self) + rev
865 865 if rev < 0 or rev >= len(self):
866 866 raise ValueError
867 867 return self.node(rev)
868 868 except (ValueError, OverflowError):
869 869 pass
870 870 if len(id) == 40:
871 871 try:
872 872 # a full hex nodeid?
873 873 node = bin(id)
874 874 self.rev(node)
875 875 return node
876 876 except (TypeError, LookupError):
877 877 pass
878 878
879 879 def _partialmatch(self, id):
880 880 if len(id) < 40:
881 881 try:
882 882 # hex(node)[:...]
883 883 l = len(id) / 2 # grab an even number of digits
884 884 bin_id = bin(id[:l*2])
885 885 nl = [n for n in self.nodemap if n[:l] == bin_id]
886 886 nl = [n for n in nl if hex(n).startswith(id)]
887 887 if len(nl) > 0:
888 888 if len(nl) == 1:
889 889 return nl[0]
890 890 raise LookupError(id, self.indexfile,
891 891 _('ambiguous identifier'))
892 892 return None
893 893 except TypeError:
894 894 pass
895 895
896 896 def lookup(self, id):
897 897 """locate a node based on:
898 898 - revision number or str(revision number)
899 899 - nodeid or subset of hex nodeid
900 900 """
901 901 n = self._match(id)
902 902 if n is not None:
903 903 return n
904 904 n = self._partialmatch(id)
905 905 if n:
906 906 return n
907 907
908 908 raise LookupError(id, self.indexfile, _('no match found'))
909 909
910 910 def cmp(self, node, text):
911 911 """compare text with a given file revision"""
912 912 p1, p2 = self.parents(node)
913 913 return hash(text, p1, p2) != node
914 914
915 915 def _addchunk(self, offset, data):
916 916 o, d = self._chunkcache
917 917 # try to add to existing cache
918 918 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
919 919 self._chunkcache = o, d + data
920 920 else:
921 921 self._chunkcache = offset, data
922 922
923 923 def _loadchunk(self, offset, length, df=None):
924 924 if not df:
925 925 if self._inline:
926 926 df = self.opener(self.indexfile)
927 927 else:
928 928 df = self.opener(self.datafile)
929 929
930 930 readahead = max(65536, length)
931 931 df.seek(offset)
932 932 d = df.read(readahead)
933 933 self._addchunk(offset, d)
934 934 if readahead > length:
935 935 return d[:length]
936 936 return d
937 937
938 938 def _getchunk(self, offset, length, df=None):
939 939 o, d = self._chunkcache
940 940 l = len(d)
941 941
942 942 # is it in the cache?
943 943 cachestart = offset - o
944 944 cacheend = cachestart + length
945 945 if cachestart >= 0 and cacheend <= l:
946 946 if cachestart == 0 and cacheend == l:
947 947 return d # avoid a copy
948 948 return d[cachestart:cacheend]
949 949
950 950 return self._loadchunk(offset, length, df)
951 951
952 952 def _prime(self, startrev, endrev, df):
953 953 start = self.start(startrev)
954 954 end = self.end(endrev)
955 955 if self._inline:
956 956 start += (startrev + 1) * self._io.size
957 957 end += (startrev + 1) * self._io.size
958 958 self._loadchunk(start, end - start, df)
959 959
960 960 def chunk(self, rev, df=None):
961 961 start, length = self.start(rev), self.length(rev)
962 962 if self._inline:
963 963 start += (rev + 1) * self._io.size
964 964 return decompress(self._getchunk(start, length, df))
965 965
966 966 def revdiff(self, rev1, rev2):
967 967 """return or calculate a delta between two revisions"""
968 968 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
969 969 return self.chunk(rev2)
970 970
971 971 return mdiff.textdiff(self.revision(self.node(rev1)),
972 972 self.revision(self.node(rev2)))
973 973
974 974 def revision(self, node):
975 975 """return an uncompressed revision of a given node"""
976 976 if node == nullid:
977 977 return ""
978 978 if self._cache and self._cache[0] == node:
979 979 return str(self._cache[2])
980 980
981 981 # look up what we need to read
982 982 text = None
983 983 rev = self.rev(node)
984 984 base = self.base(rev)
985 985
986 986 # check rev flags
987 987 if self.index[rev][0] & 0xFFFF:
988 988 raise RevlogError(_('incompatible revision flag %x') %
989 989 (self.index[rev][0] & 0xFFFF))
990 990
991 991 df = None
992 992
993 993 # do we have useful data cached?
994 994 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
995 995 base = self._cache[1]
996 996 text = str(self._cache[2])
997 997 self._loadindex(base, rev + 1)
998 998 if not self._inline and rev > base + 1:
999 999 df = self.opener(self.datafile)
1000 1000 self._prime(base, rev, df)
1001 1001 else:
1002 1002 self._loadindex(base, rev + 1)
1003 1003 if not self._inline and rev > base:
1004 1004 df = self.opener(self.datafile)
1005 1005 self._prime(base, rev, df)
1006 1006 text = self.chunk(base, df=df)
1007 1007
1008 1008 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
1009 1009 text = mdiff.patches(text, bins)
1010 1010 p1, p2 = self.parents(node)
1011 1011 if node != hash(text, p1, p2):
1012 1012 raise RevlogError(_("integrity check failed on %s:%d")
1013 1013 % (self.datafile, rev))
1014 1014
1015 1015 self._cache = (node, rev, text)
1016 1016 return text
1017 1017
1018 1018 def checkinlinesize(self, tr, fp=None):
1019 1019 if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
1020 1020 return
1021 1021
1022 1022 trinfo = tr.find(self.indexfile)
1023 1023 if trinfo is None:
1024 1024 raise RevlogError(_("%s not found in the transaction")
1025 1025 % self.indexfile)
1026 1026
1027 1027 trindex = trinfo[2]
1028 1028 dataoff = self.start(trindex)
1029 1029
1030 1030 tr.add(self.datafile, dataoff)
1031 1031
1032 1032 if fp:
1033 1033 fp.flush()
1034 1034 fp.close()
1035 1035
1036 1036 df = self.opener(self.datafile, 'w')
1037 1037 try:
1038 1038 calc = self._io.size
1039 1039 for r in self:
1040 1040 start = self.start(r) + (r + 1) * calc
1041 1041 length = self.length(r)
1042 1042 d = self._getchunk(start, length)
1043 1043 df.write(d)
1044 1044 finally:
1045 1045 df.close()
1046 1046
1047 1047 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1048 1048 self.version &= ~(REVLOGNGINLINEDATA)
1049 1049 self._inline = False
1050 1050 for i in self:
1051 1051 e = self._io.packentry(self.index[i], self.node, self.version, i)
1052 1052 fp.write(e)
1053 1053
1054 1054 # if we don't call rename, the temp file will never replace the
1055 1055 # real index
1056 1056 fp.rename()
1057 1057
1058 1058 tr.replace(self.indexfile, trindex * calc)
1059 1059 self._chunkcache = (0, '')
1060 1060
1061 1061 def addrevision(self, text, transaction, link, p1, p2, d=None):
1062 1062 """add a revision to the log
1063 1063
1064 1064 text - the revision data to add
1065 1065 transaction - the transaction object used for rollback
1066 1066 link - the linkrev data to add
1067 1067 p1, p2 - the parent nodeids of the revision
1068 1068 d - an optional precomputed delta
1069 1069 """
1070 1070 dfh = None
1071 1071 if not self._inline:
1072 1072 dfh = self.opener(self.datafile, "a")
1073 1073 ifh = self.opener(self.indexfile, "a+")
1074 1074 try:
1075 1075 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1076 1076 finally:
1077 1077 if dfh:
1078 1078 dfh.close()
1079 1079 ifh.close()
1080 1080
1081 1081 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1082 1082 node = hash(text, p1, p2)
1083 1083 if node in self.nodemap:
1084 1084 return node
1085 1085
1086 1086 curr = len(self)
1087 1087 prev = curr - 1
1088 1088 base = self.base(prev)
1089 1089 offset = self.end(prev)
1090 1090
1091 1091 if curr:
1092 1092 if not d:
1093 1093 ptext = self.revision(self.node(prev))
1094 1094 d = mdiff.textdiff(ptext, text)
1095 1095 data = compress(d)
1096 1096 l = len(data[1]) + len(data[0])
1097 1097 dist = l + offset - self.start(base)
1098 1098
1099 1099 # full versions are inserted when the needed deltas
1100 1100 # become comparable to the uncompressed text
1101 1101 if not curr or dist > len(text) * 2:
1102 1102 data = compress(text)
1103 1103 l = len(data[1]) + len(data[0])
1104 1104 base = curr
1105 1105
1106 1106 e = (offset_type(offset, 0), l, len(text),
1107 1107 base, link, self.rev(p1), self.rev(p2), node)
1108 1108 self.index.insert(-1, e)
1109 1109 self.nodemap[node] = curr
1110 1110
1111 1111 entry = self._io.packentry(e, self.node, self.version, curr)
1112 1112 if not self._inline:
1113 1113 transaction.add(self.datafile, offset)
1114 1114 transaction.add(self.indexfile, curr * len(entry))
1115 1115 if data[0]:
1116 1116 dfh.write(data[0])
1117 1117 dfh.write(data[1])
1118 1118 dfh.flush()
1119 1119 ifh.write(entry)
1120 1120 else:
1121 1121 offset += curr * self._io.size
1122 1122 transaction.add(self.indexfile, offset, curr)
1123 1123 ifh.write(entry)
1124 1124 ifh.write(data[0])
1125 1125 ifh.write(data[1])
1126 1126 self.checkinlinesize(transaction, ifh)
1127 1127
1128 1128 self._cache = (node, curr, text)
1129 1129 return node
1130 1130
1131 1131 def ancestor(self, a, b):
1132 1132 """calculate the least common ancestor of nodes a and b"""
1133 1133
1134 1134 def parents(rev):
1135 1135 return [p for p in self.parentrevs(rev) if p != nullrev]
1136 1136
1137 1137 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1138 1138 if c is None:
1139 1139 return nullid
1140 1140
1141 1141 return self.node(c)
1142 1142
1143 1143 def group(self, nodelist, lookup, infocollect=None):
1144 1144 """calculate a delta group
1145 1145
1146 1146 Given a list of changeset revs, return a set of deltas and
1147 1147 metadata corresponding to nodes. the first delta is
1148 1148 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1149 1149 have this parent as it has all history before these
1150 1150 changesets. parent is parent[0]
1151 1151 """
1152 1152
1153 1153 # if we don't have any revisions touched by these changesets, bail
1154 1154 if not nodelist:
1155 1155 yield changegroup.closechunk()
1156 1156 return
1157 1157
1158 1158 revs = [self.rev(n) for n in nodelist]
1159 1159
1160 1160 # add the parent of the first rev
1161 1161 p = self.parentrevs(revs[0])[0]
1162 1162 revs.insert(0, p)
1163 1163
1164 1164 # build deltas
1165 for d in xrange(0, len(revs) - 1):
1165 for d in xrange(len(revs) - 1):
1166 1166 a, b = revs[d], revs[d + 1]
1167 1167 nb = self.node(b)
1168 1168
1169 1169 if infocollect is not None:
1170 1170 infocollect(nb)
1171 1171
1172 1172 p = self.parents(nb)
1173 1173 meta = nb + p[0] + p[1] + lookup(nb)
1174 1174 if a == -1:
1175 1175 d = self.revision(nb)
1176 1176 meta += mdiff.trivialdiffheader(len(d))
1177 1177 else:
1178 1178 d = self.revdiff(a, b)
1179 1179 yield changegroup.chunkheader(len(meta) + len(d))
1180 1180 yield meta
1181 1181 if len(d) > 2**20:
1182 1182 pos = 0
1183 1183 while pos < len(d):
1184 1184 pos2 = pos + 2 ** 18
1185 1185 yield d[pos:pos2]
1186 1186 pos = pos2
1187 1187 else:
1188 1188 yield d
1189 1189
1190 1190 yield changegroup.closechunk()
1191 1191
1192 1192 def addgroup(self, revs, linkmapper, transaction):
1193 1193 """
1194 1194 add a delta group
1195 1195
1196 1196 given a set of deltas, add them to the revision log. the
1197 1197 first delta is against its parent, which should be in our
1198 1198 log, the rest are against the previous delta.
1199 1199 """
1200 1200
1201 1201 #track the base of the current delta log
1202 1202 r = len(self)
1203 1203 t = r - 1
1204 1204 node = None
1205 1205
1206 1206 base = prev = nullrev
1207 1207 start = end = textlen = 0
1208 1208 if r:
1209 1209 end = self.end(t)
1210 1210
1211 1211 ifh = self.opener(self.indexfile, "a+")
1212 1212 isize = r * self._io.size
1213 1213 if self._inline:
1214 1214 transaction.add(self.indexfile, end + isize, r)
1215 1215 dfh = None
1216 1216 else:
1217 1217 transaction.add(self.indexfile, isize, r)
1218 1218 transaction.add(self.datafile, end)
1219 1219 dfh = self.opener(self.datafile, "a")
1220 1220
1221 1221 try:
1222 1222 # loop through our set of deltas
1223 1223 chain = None
1224 1224 for chunk in revs:
1225 1225 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1226 1226 link = linkmapper(cs)
1227 1227 if node in self.nodemap:
1228 1228 # this can happen if two branches make the same change
1229 1229 chain = node
1230 1230 continue
1231 1231 delta = buffer(chunk, 80)
1232 1232 del chunk
1233 1233
1234 1234 for p in (p1, p2):
1235 1235 if not p in self.nodemap:
1236 1236 raise LookupError(p, self.indexfile, _('unknown parent'))
1237 1237
1238 1238 if not chain:
1239 1239 # retrieve the parent revision of the delta chain
1240 1240 chain = p1
1241 1241 if not chain in self.nodemap:
1242 1242 raise LookupError(chain, self.indexfile, _('unknown base'))
1243 1243
1244 1244 # full versions are inserted when the needed deltas become
1245 1245 # comparable to the uncompressed text or when the previous
1246 1246 # version is not the one we have a delta against. We use
1247 1247 # the size of the previous full rev as a proxy for the
1248 1248 # current size.
1249 1249
1250 1250 if chain == prev:
1251 1251 cdelta = compress(delta)
1252 1252 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1253 1253 textlen = mdiff.patchedsize(textlen, delta)
1254 1254
1255 1255 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1256 1256 # flush our writes here so we can read it in revision
1257 1257 if dfh:
1258 1258 dfh.flush()
1259 1259 ifh.flush()
1260 1260 text = self.revision(chain)
1261 1261 if len(text) == 0:
1262 1262 # skip over trivial delta header
1263 1263 text = buffer(delta, 12)
1264 1264 else:
1265 1265 text = mdiff.patches(text, [delta])
1266 1266 del delta
1267 1267 chk = self._addrevision(text, transaction, link, p1, p2, None,
1268 1268 ifh, dfh)
1269 1269 if not dfh and not self._inline:
1270 1270 # addrevision switched from inline to conventional
1271 1271 # reopen the index
1272 1272 dfh = self.opener(self.datafile, "a")
1273 1273 ifh = self.opener(self.indexfile, "a")
1274 1274 if chk != node:
1275 1275 raise RevlogError(_("consistency error adding group"))
1276 1276 textlen = len(text)
1277 1277 else:
1278 1278 e = (offset_type(end, 0), cdeltalen, textlen, base,
1279 1279 link, self.rev(p1), self.rev(p2), node)
1280 1280 self.index.insert(-1, e)
1281 1281 self.nodemap[node] = r
1282 1282 entry = self._io.packentry(e, self.node, self.version, r)
1283 1283 if self._inline:
1284 1284 ifh.write(entry)
1285 1285 ifh.write(cdelta[0])
1286 1286 ifh.write(cdelta[1])
1287 1287 self.checkinlinesize(transaction, ifh)
1288 1288 if not self._inline:
1289 1289 dfh = self.opener(self.datafile, "a")
1290 1290 ifh = self.opener(self.indexfile, "a")
1291 1291 else:
1292 1292 dfh.write(cdelta[0])
1293 1293 dfh.write(cdelta[1])
1294 1294 ifh.write(entry)
1295 1295
1296 1296 t, r, chain, prev = r, r + 1, node, node
1297 1297 base = self.base(t)
1298 1298 start = self.start(base)
1299 1299 end = self.end(t)
1300 1300 finally:
1301 1301 if dfh:
1302 1302 dfh.close()
1303 1303 ifh.close()
1304 1304
1305 1305 return node
1306 1306
1307 1307 def strip(self, minlink, transaction):
1308 1308 """truncate the revlog on the first revision with a linkrev >= minlink
1309 1309
1310 1310 This function is called when we're stripping revision minlink and
1311 1311 its descendants from the repository.
1312 1312
1313 1313 We have to remove all revisions with linkrev >= minlink, because
1314 1314 the equivalent changelog revisions will be renumbered after the
1315 1315 strip.
1316 1316
1317 1317 So we truncate the revlog on the first of these revisions, and
1318 1318 trust that the caller has saved the revisions that shouldn't be
1319 1319 removed and that it'll readd them after this truncation.
1320 1320 """
1321 1321 if len(self) == 0:
1322 1322 return
1323 1323
1324 1324 if isinstance(self.index, lazyindex):
1325 1325 self._loadindexmap()
1326 1326
1327 1327 for rev in self:
1328 1328 if self.index[rev][4] >= minlink:
1329 1329 break
1330 1330 else:
1331 1331 return
1332 1332
1333 1333 # first truncate the files on disk
1334 1334 end = self.start(rev)
1335 1335 if not self._inline:
1336 1336 transaction.add(self.datafile, end)
1337 1337 end = rev * self._io.size
1338 1338 else:
1339 1339 end += rev * self._io.size
1340 1340
1341 1341 transaction.add(self.indexfile, end)
1342 1342
1343 1343 # then reset internal state in memory to forget those revisions
1344 1344 self._cache = None
1345 1345 self._chunkcache = (0, '')
1346 1346 for x in xrange(rev, len(self)):
1347 1347 del self.nodemap[self.node(x)]
1348 1348
1349 1349 del self.index[rev:-1]
1350 1350
1351 1351 def checksize(self):
1352 1352 expected = 0
1353 1353 if len(self):
1354 1354 expected = max(0, self.end(len(self) - 1))
1355 1355
1356 1356 try:
1357 1357 f = self.opener(self.datafile)
1358 1358 f.seek(0, 2)
1359 1359 actual = f.tell()
1360 1360 dd = actual - expected
1361 1361 except IOError, inst:
1362 1362 if inst.errno != errno.ENOENT:
1363 1363 raise
1364 1364 dd = 0
1365 1365
1366 1366 try:
1367 1367 f = self.opener(self.indexfile)
1368 1368 f.seek(0, 2)
1369 1369 actual = f.tell()
1370 1370 s = self._io.size
1371 1371 i = max(0, actual / s)
1372 1372 di = actual - (i * s)
1373 1373 if self._inline:
1374 1374 databytes = 0
1375 1375 for r in self:
1376 1376 databytes += max(0, self.length(r))
1377 1377 dd = 0
1378 1378 di = actual - len(self) * s - databytes
1379 1379 except IOError, inst:
1380 1380 if inst.errno != errno.ENOENT:
1381 1381 raise
1382 1382 di = 0
1383 1383
1384 1384 return (dd, di)
1385 1385
1386 1386 def files(self):
1387 1387 res = [ self.indexfile ]
1388 1388 if not self._inline:
1389 1389 res.append(self.datafile)
1390 1390 return res
General Comments 0
You need to be logged in to leave comments. Login now