##// END OF EJS Templates
replace xrange(0, n) with xrange(n)
Martin Geisler -
r8624:2b3dec0e default
parent child Browse files
Show More
@@ -1,270 +1,270 b''
1 # color.py color output for the status and qseries commands
1 # color.py color output for the status and qseries commands
2 #
2 #
3 # Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com>
3 # Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com>
4 #
4 #
5 # This program is free software; you can redistribute it and/or modify it
5 # This program is free software; you can redistribute it and/or modify it
6 # under the terms of the GNU General Public License as published by the
6 # under the terms of the GNU General Public License as published by the
7 # Free Software Foundation; either version 2 of the License, or (at your
7 # Free Software Foundation; either version 2 of the License, or (at your
8 # option) any later version.
8 # option) any later version.
9 #
9 #
10 # This program is distributed in the hope that it will be useful, but
10 # This program is distributed in the hope that it will be useful, but
11 # WITHOUT ANY WARRANTY; without even the implied warranty of
11 # WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
13 # Public License for more details.
13 # Public License for more details.
14 #
14 #
15 # You should have received a copy of the GNU General Public License along
15 # You should have received a copy of the GNU General Public License along
16 # with this program; if not, write to the Free Software Foundation, Inc.,
16 # with this program; if not, write to the Free Software Foundation, Inc.,
17 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
18
19 '''add color output to status, qseries, and diff-related commands
19 '''add color output to status, qseries, and diff-related commands
20
20
21 This extension modifies the status command to add color to its output
21 This extension modifies the status command to add color to its output
22 to reflect file status, the qseries command to add color to reflect
22 to reflect file status, the qseries command to add color to reflect
23 patch status (applied, unapplied, missing), and to diff-related
23 patch status (applied, unapplied, missing), and to diff-related
24 commands to highlight additions, removals, diff headers, and trailing
24 commands to highlight additions, removals, diff headers, and trailing
25 whitespace.
25 whitespace.
26
26
27 Other effects in addition to color, like bold and underlined text, are
27 Other effects in addition to color, like bold and underlined text, are
28 also available. Effects are rendered with the ECMA-48 SGR control
28 also available. Effects are rendered with the ECMA-48 SGR control
29 function (aka ANSI escape codes). This module also provides the
29 function (aka ANSI escape codes). This module also provides the
30 render_text function, which can be used to add effects to any text.
30 render_text function, which can be used to add effects to any text.
31
31
32 To enable this extension, add this to your .hgrc file:
32 To enable this extension, add this to your .hgrc file:
33 [extensions]
33 [extensions]
34 color =
34 color =
35
35
36 Default effects my be overriden from the .hgrc file:
36 Default effects my be overriden from the .hgrc file:
37
37
38 [color]
38 [color]
39 status.modified = blue bold underline red_background
39 status.modified = blue bold underline red_background
40 status.added = green bold
40 status.added = green bold
41 status.removed = red bold blue_background
41 status.removed = red bold blue_background
42 status.deleted = cyan bold underline
42 status.deleted = cyan bold underline
43 status.unknown = magenta bold underline
43 status.unknown = magenta bold underline
44 status.ignored = black bold
44 status.ignored = black bold
45
45
46 # 'none' turns off all effects
46 # 'none' turns off all effects
47 status.clean = none
47 status.clean = none
48 status.copied = none
48 status.copied = none
49
49
50 qseries.applied = blue bold underline
50 qseries.applied = blue bold underline
51 qseries.unapplied = black bold
51 qseries.unapplied = black bold
52 qseries.missing = red bold
52 qseries.missing = red bold
53
53
54 diff.diffline = bold
54 diff.diffline = bold
55 diff.extended = cyan bold
55 diff.extended = cyan bold
56 diff.file_a = red bold
56 diff.file_a = red bold
57 diff.file_b = green bold
57 diff.file_b = green bold
58 diff.hunk = magenta
58 diff.hunk = magenta
59 diff.deleted = red
59 diff.deleted = red
60 diff.inserted = green
60 diff.inserted = green
61 diff.changed = white
61 diff.changed = white
62 diff.trailingwhitespace = bold red_background
62 diff.trailingwhitespace = bold red_background
63 '''
63 '''
64
64
65 import os, sys
65 import os, sys
66
66
67 from mercurial import cmdutil, commands, extensions
67 from mercurial import cmdutil, commands, extensions
68 from mercurial.i18n import _
68 from mercurial.i18n import _
69
69
70 # start and stop parameters for effects
70 # start and stop parameters for effects
71 _effect_params = {'none': 0,
71 _effect_params = {'none': 0,
72 'black': 30,
72 'black': 30,
73 'red': 31,
73 'red': 31,
74 'green': 32,
74 'green': 32,
75 'yellow': 33,
75 'yellow': 33,
76 'blue': 34,
76 'blue': 34,
77 'magenta': 35,
77 'magenta': 35,
78 'cyan': 36,
78 'cyan': 36,
79 'white': 37,
79 'white': 37,
80 'bold': 1,
80 'bold': 1,
81 'italic': 3,
81 'italic': 3,
82 'underline': 4,
82 'underline': 4,
83 'inverse': 7,
83 'inverse': 7,
84 'black_background': 40,
84 'black_background': 40,
85 'red_background': 41,
85 'red_background': 41,
86 'green_background': 42,
86 'green_background': 42,
87 'yellow_background': 43,
87 'yellow_background': 43,
88 'blue_background': 44,
88 'blue_background': 44,
89 'purple_background': 45,
89 'purple_background': 45,
90 'cyan_background': 46,
90 'cyan_background': 46,
91 'white_background': 47}
91 'white_background': 47}
92
92
93 def render_effects(text, effects):
93 def render_effects(text, effects):
94 'Wrap text in commands to turn on each effect.'
94 'Wrap text in commands to turn on each effect.'
95 start = [str(_effect_params[e]) for e in ['none'] + effects]
95 start = [str(_effect_params[e]) for e in ['none'] + effects]
96 start = '\033[' + ';'.join(start) + 'm'
96 start = '\033[' + ';'.join(start) + 'm'
97 stop = '\033[' + str(_effect_params['none']) + 'm'
97 stop = '\033[' + str(_effect_params['none']) + 'm'
98 return ''.join([start, text, stop])
98 return ''.join([start, text, stop])
99
99
100 def colorstatus(orig, ui, repo, *pats, **opts):
100 def colorstatus(orig, ui, repo, *pats, **opts):
101 '''run the status command with colored output'''
101 '''run the status command with colored output'''
102
102
103 delimiter = opts['print0'] and '\0' or '\n'
103 delimiter = opts['print0'] and '\0' or '\n'
104
104
105 nostatus = opts.get('no_status')
105 nostatus = opts.get('no_status')
106 opts['no_status'] = False
106 opts['no_status'] = False
107 # run status and capture its output
107 # run status and capture its output
108 ui.pushbuffer()
108 ui.pushbuffer()
109 retval = orig(ui, repo, *pats, **opts)
109 retval = orig(ui, repo, *pats, **opts)
110 # filter out empty strings
110 # filter out empty strings
111 lines_with_status = [ line for line in ui.popbuffer().split(delimiter) if line ]
111 lines_with_status = [ line for line in ui.popbuffer().split(delimiter) if line ]
112
112
113 if nostatus:
113 if nostatus:
114 lines = [l[2:] for l in lines_with_status]
114 lines = [l[2:] for l in lines_with_status]
115 else:
115 else:
116 lines = lines_with_status
116 lines = lines_with_status
117
117
118 # apply color to output and display it
118 # apply color to output and display it
119 for i in xrange(0, len(lines)):
119 for i in xrange(len(lines)):
120 status = _status_abbreviations[lines_with_status[i][0]]
120 status = _status_abbreviations[lines_with_status[i][0]]
121 effects = _status_effects[status]
121 effects = _status_effects[status]
122 if effects:
122 if effects:
123 lines[i] = render_effects(lines[i], effects)
123 lines[i] = render_effects(lines[i], effects)
124 ui.write(lines[i] + delimiter)
124 ui.write(lines[i] + delimiter)
125 return retval
125 return retval
126
126
127 _status_abbreviations = { 'M': 'modified',
127 _status_abbreviations = { 'M': 'modified',
128 'A': 'added',
128 'A': 'added',
129 'R': 'removed',
129 'R': 'removed',
130 '!': 'deleted',
130 '!': 'deleted',
131 '?': 'unknown',
131 '?': 'unknown',
132 'I': 'ignored',
132 'I': 'ignored',
133 'C': 'clean',
133 'C': 'clean',
134 ' ': 'copied', }
134 ' ': 'copied', }
135
135
136 _status_effects = { 'modified': ['blue', 'bold'],
136 _status_effects = { 'modified': ['blue', 'bold'],
137 'added': ['green', 'bold'],
137 'added': ['green', 'bold'],
138 'removed': ['red', 'bold'],
138 'removed': ['red', 'bold'],
139 'deleted': ['cyan', 'bold', 'underline'],
139 'deleted': ['cyan', 'bold', 'underline'],
140 'unknown': ['magenta', 'bold', 'underline'],
140 'unknown': ['magenta', 'bold', 'underline'],
141 'ignored': ['black', 'bold'],
141 'ignored': ['black', 'bold'],
142 'clean': ['none'],
142 'clean': ['none'],
143 'copied': ['none'], }
143 'copied': ['none'], }
144
144
145 def colorqseries(orig, ui, repo, *dummy, **opts):
145 def colorqseries(orig, ui, repo, *dummy, **opts):
146 '''run the qseries command with colored output'''
146 '''run the qseries command with colored output'''
147 ui.pushbuffer()
147 ui.pushbuffer()
148 retval = orig(ui, repo, **opts)
148 retval = orig(ui, repo, **opts)
149 patches = ui.popbuffer().splitlines()
149 patches = ui.popbuffer().splitlines()
150 for patch in patches:
150 for patch in patches:
151 patchname = patch
151 patchname = patch
152 if opts['summary']:
152 if opts['summary']:
153 patchname = patchname.split(': ')[0]
153 patchname = patchname.split(': ')[0]
154 if ui.verbose:
154 if ui.verbose:
155 patchname = patchname.split(' ', 2)[-1]
155 patchname = patchname.split(' ', 2)[-1]
156
156
157 if opts['missing']:
157 if opts['missing']:
158 effects = _patch_effects['missing']
158 effects = _patch_effects['missing']
159 # Determine if patch is applied.
159 # Determine if patch is applied.
160 elif [ applied for applied in repo.mq.applied
160 elif [ applied for applied in repo.mq.applied
161 if patchname == applied.name ]:
161 if patchname == applied.name ]:
162 effects = _patch_effects['applied']
162 effects = _patch_effects['applied']
163 else:
163 else:
164 effects = _patch_effects['unapplied']
164 effects = _patch_effects['unapplied']
165 ui.write(render_effects(patch, effects) + '\n')
165 ui.write(render_effects(patch, effects) + '\n')
166 return retval
166 return retval
167
167
168 _patch_effects = { 'applied': ['blue', 'bold', 'underline'],
168 _patch_effects = { 'applied': ['blue', 'bold', 'underline'],
169 'missing': ['red', 'bold'],
169 'missing': ['red', 'bold'],
170 'unapplied': ['black', 'bold'], }
170 'unapplied': ['black', 'bold'], }
171
171
172 def colorwrap(orig, s):
172 def colorwrap(orig, s):
173 '''wrap ui.write for colored diff output'''
173 '''wrap ui.write for colored diff output'''
174 lines = s.split('\n')
174 lines = s.split('\n')
175 for i, line in enumerate(lines):
175 for i, line in enumerate(lines):
176 stripline = line
176 stripline = line
177 if line and line[0] in '+-':
177 if line and line[0] in '+-':
178 # highlight trailing whitespace, but only in changed lines
178 # highlight trailing whitespace, but only in changed lines
179 stripline = line.rstrip()
179 stripline = line.rstrip()
180 for prefix, style in _diff_prefixes:
180 for prefix, style in _diff_prefixes:
181 if stripline.startswith(prefix):
181 if stripline.startswith(prefix):
182 lines[i] = render_effects(stripline, _diff_effects[style])
182 lines[i] = render_effects(stripline, _diff_effects[style])
183 break
183 break
184 if line != stripline:
184 if line != stripline:
185 lines[i] += render_effects(
185 lines[i] += render_effects(
186 line[len(stripline):], _diff_effects['trailingwhitespace'])
186 line[len(stripline):], _diff_effects['trailingwhitespace'])
187 orig('\n'.join(lines))
187 orig('\n'.join(lines))
188
188
189 def colorshowpatch(orig, self, node):
189 def colorshowpatch(orig, self, node):
190 '''wrap cmdutil.changeset_printer.showpatch with colored output'''
190 '''wrap cmdutil.changeset_printer.showpatch with colored output'''
191 oldwrite = extensions.wrapfunction(self.ui, 'write', colorwrap)
191 oldwrite = extensions.wrapfunction(self.ui, 'write', colorwrap)
192 try:
192 try:
193 orig(self, node)
193 orig(self, node)
194 finally:
194 finally:
195 self.ui.write = oldwrite
195 self.ui.write = oldwrite
196
196
197 def colordiff(orig, ui, repo, *pats, **opts):
197 def colordiff(orig, ui, repo, *pats, **opts):
198 '''run the diff command with colored output'''
198 '''run the diff command with colored output'''
199 oldwrite = extensions.wrapfunction(ui, 'write', colorwrap)
199 oldwrite = extensions.wrapfunction(ui, 'write', colorwrap)
200 try:
200 try:
201 orig(ui, repo, *pats, **opts)
201 orig(ui, repo, *pats, **opts)
202 finally:
202 finally:
203 ui.write = oldwrite
203 ui.write = oldwrite
204
204
205 _diff_prefixes = [('diff', 'diffline'),
205 _diff_prefixes = [('diff', 'diffline'),
206 ('copy', 'extended'),
206 ('copy', 'extended'),
207 ('rename', 'extended'),
207 ('rename', 'extended'),
208 ('old', 'extended'),
208 ('old', 'extended'),
209 ('new', 'extended'),
209 ('new', 'extended'),
210 ('deleted', 'extended'),
210 ('deleted', 'extended'),
211 ('---', 'file_a'),
211 ('---', 'file_a'),
212 ('+++', 'file_b'),
212 ('+++', 'file_b'),
213 ('@', 'hunk'),
213 ('@', 'hunk'),
214 ('-', 'deleted'),
214 ('-', 'deleted'),
215 ('+', 'inserted')]
215 ('+', 'inserted')]
216
216
217 _diff_effects = {'diffline': ['bold',],
217 _diff_effects = {'diffline': ['bold',],
218 'extended': ['cyan', 'bold'],
218 'extended': ['cyan', 'bold'],
219 'file_a': ['red', 'bold'],
219 'file_a': ['red', 'bold'],
220 'file_b': ['green', 'bold'],
220 'file_b': ['green', 'bold'],
221 'hunk': ['magenta',],
221 'hunk': ['magenta',],
222 'deleted': ['red',],
222 'deleted': ['red',],
223 'inserted': ['green',],
223 'inserted': ['green',],
224 'changed': ['white',],
224 'changed': ['white',],
225 'trailingwhitespace': ['bold', 'red_background'],}
225 'trailingwhitespace': ['bold', 'red_background'],}
226
226
227 def uisetup(ui):
227 def uisetup(ui):
228 '''Initialize the extension.'''
228 '''Initialize the extension.'''
229 _setupcmd(ui, 'diff', commands.table, colordiff, _diff_effects)
229 _setupcmd(ui, 'diff', commands.table, colordiff, _diff_effects)
230 _setupcmd(ui, 'incoming', commands.table, None, _diff_effects)
230 _setupcmd(ui, 'incoming', commands.table, None, _diff_effects)
231 _setupcmd(ui, 'log', commands.table, None, _diff_effects)
231 _setupcmd(ui, 'log', commands.table, None, _diff_effects)
232 _setupcmd(ui, 'outgoing', commands.table, None, _diff_effects)
232 _setupcmd(ui, 'outgoing', commands.table, None, _diff_effects)
233 _setupcmd(ui, 'tip', commands.table, None, _diff_effects)
233 _setupcmd(ui, 'tip', commands.table, None, _diff_effects)
234 _setupcmd(ui, 'status', commands.table, colorstatus, _status_effects)
234 _setupcmd(ui, 'status', commands.table, colorstatus, _status_effects)
235 try:
235 try:
236 mq = extensions.find('mq')
236 mq = extensions.find('mq')
237 _setupcmd(ui, 'qdiff', mq.cmdtable, colordiff, _diff_effects)
237 _setupcmd(ui, 'qdiff', mq.cmdtable, colordiff, _diff_effects)
238 _setupcmd(ui, 'qseries', mq.cmdtable, colorqseries, _patch_effects)
238 _setupcmd(ui, 'qseries', mq.cmdtable, colorqseries, _patch_effects)
239 except KeyError:
239 except KeyError:
240 # The mq extension is not enabled
240 # The mq extension is not enabled
241 pass
241 pass
242
242
243 def _setupcmd(ui, cmd, table, func, effectsmap):
243 def _setupcmd(ui, cmd, table, func, effectsmap):
244 '''patch in command to command table and load effect map'''
244 '''patch in command to command table and load effect map'''
245 def nocolor(orig, *args, **opts):
245 def nocolor(orig, *args, **opts):
246
246
247 if (opts['no_color'] or opts['color'] == 'never' or
247 if (opts['no_color'] or opts['color'] == 'never' or
248 (opts['color'] == 'auto' and (os.environ.get('TERM') == 'dumb'
248 (opts['color'] == 'auto' and (os.environ.get('TERM') == 'dumb'
249 or not sys.__stdout__.isatty()))):
249 or not sys.__stdout__.isatty()))):
250 return orig(*args, **opts)
250 return orig(*args, **opts)
251
251
252 oldshowpatch = extensions.wrapfunction(cmdutil.changeset_printer,
252 oldshowpatch = extensions.wrapfunction(cmdutil.changeset_printer,
253 'showpatch', colorshowpatch)
253 'showpatch', colorshowpatch)
254 try:
254 try:
255 if func is not None:
255 if func is not None:
256 return func(orig, *args, **opts)
256 return func(orig, *args, **opts)
257 return orig(*args, **opts)
257 return orig(*args, **opts)
258 finally:
258 finally:
259 cmdutil.changeset_printer.showpatch = oldshowpatch
259 cmdutil.changeset_printer.showpatch = oldshowpatch
260
260
261 entry = extensions.wrapcommand(table, cmd, nocolor)
261 entry = extensions.wrapcommand(table, cmd, nocolor)
262 entry[1].extend([
262 entry[1].extend([
263 ('', 'color', 'auto', _("when to colorize (always, auto, or never)")),
263 ('', 'color', 'auto', _("when to colorize (always, auto, or never)")),
264 ('', 'no-color', None, _("don't colorize output")),
264 ('', 'no-color', None, _("don't colorize output")),
265 ])
265 ])
266
266
267 for status in effectsmap:
267 for status in effectsmap:
268 effects = ui.configlist('color', cmd + '.' + status)
268 effects = ui.configlist('color', cmd + '.' + status)
269 if effects:
269 if effects:
270 effectsmap[status] = effects
270 effectsmap[status] = effects
@@ -1,359 +1,359 b''
1 # Minimal support for git commands on an hg repository
1 # Minimal support for git commands on an hg repository
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 '''browsing the repository in a graphical way
8 '''browsing the repository in a graphical way
9
9
10 The hgk extension allows browsing the history of a repository in a
10 The hgk extension allows browsing the history of a repository in a
11 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
11 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
12 distributed with Mercurial.)
12 distributed with Mercurial.)
13
13
14 hgk consists of two parts: a Tcl script that does the displaying and
14 hgk consists of two parts: a Tcl script that does the displaying and
15 querying of information, and an extension to mercurial named hgk.py,
15 querying of information, and an extension to mercurial named hgk.py,
16 which provides hooks for hgk to get information. hgk can be found in
16 which provides hooks for hgk to get information. hgk can be found in
17 the contrib directory, and hgk.py can be found in the hgext directory.
17 the contrib directory, and hgk.py can be found in the hgext directory.
18
18
19 To load the hgext.py extension, add it to your .hgrc file (you have to
19 To load the hgext.py extension, add it to your .hgrc file (you have to
20 use your global $HOME/.hgrc file, not one in a repository). You can
20 use your global $HOME/.hgrc file, not one in a repository). You can
21 specify an absolute path:
21 specify an absolute path:
22
22
23 [extensions]
23 [extensions]
24 hgk=/usr/local/lib/hgk.py
24 hgk=/usr/local/lib/hgk.py
25
25
26 Mercurial can also scan the default python library path for a file
26 Mercurial can also scan the default python library path for a file
27 named 'hgk.py' if you set hgk empty:
27 named 'hgk.py' if you set hgk empty:
28
28
29 [extensions]
29 [extensions]
30 hgk=
30 hgk=
31
31
32 The hg view command will launch the hgk Tcl script. For this command
32 The hg view command will launch the hgk Tcl script. For this command
33 to work, hgk must be in your search path. Alternately, you can specify
33 to work, hgk must be in your search path. Alternately, you can specify
34 the path to hgk in your .hgrc file:
34 the path to hgk in your .hgrc file:
35
35
36 [hgk]
36 [hgk]
37 path=/location/of/hgk
37 path=/location/of/hgk
38
38
39 hgk can make use of the extdiff extension to visualize revisions.
39 hgk can make use of the extdiff extension to visualize revisions.
40 Assuming you had already configured extdiff vdiff command, just add:
40 Assuming you had already configured extdiff vdiff command, just add:
41
41
42 [hgk]
42 [hgk]
43 vdiff=vdiff
43 vdiff=vdiff
44
44
45 Revisions context menu will now display additional entries to fire
45 Revisions context menu will now display additional entries to fire
46 vdiff on hovered and selected revisions.'''
46 vdiff on hovered and selected revisions.'''
47
47
48 import os
48 import os
49 from mercurial import commands, util, patch, revlog, cmdutil
49 from mercurial import commands, util, patch, revlog, cmdutil
50 from mercurial.node import nullid, nullrev, short
50 from mercurial.node import nullid, nullrev, short
51 from mercurial.i18n import _
51 from mercurial.i18n import _
52
52
53 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
53 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
54 """diff trees from two commits"""
54 """diff trees from two commits"""
55 def __difftree(repo, node1, node2, files=[]):
55 def __difftree(repo, node1, node2, files=[]):
56 assert node2 is not None
56 assert node2 is not None
57 mmap = repo[node1].manifest()
57 mmap = repo[node1].manifest()
58 mmap2 = repo[node2].manifest()
58 mmap2 = repo[node2].manifest()
59 m = cmdutil.match(repo, files)
59 m = cmdutil.match(repo, files)
60 modified, added, removed = repo.status(node1, node2, m)[:3]
60 modified, added, removed = repo.status(node1, node2, m)[:3]
61 empty = short(nullid)
61 empty = short(nullid)
62
62
63 for f in modified:
63 for f in modified:
64 # TODO get file permissions
64 # TODO get file permissions
65 ui.write(":100664 100664 %s %s M\t%s\t%s\n" %
65 ui.write(":100664 100664 %s %s M\t%s\t%s\n" %
66 (short(mmap[f]), short(mmap2[f]), f, f))
66 (short(mmap[f]), short(mmap2[f]), f, f))
67 for f in added:
67 for f in added:
68 ui.write(":000000 100664 %s %s N\t%s\t%s\n" %
68 ui.write(":000000 100664 %s %s N\t%s\t%s\n" %
69 (empty, short(mmap2[f]), f, f))
69 (empty, short(mmap2[f]), f, f))
70 for f in removed:
70 for f in removed:
71 ui.write(":100664 000000 %s %s D\t%s\t%s\n" %
71 ui.write(":100664 000000 %s %s D\t%s\t%s\n" %
72 (short(mmap[f]), empty, f, f))
72 (short(mmap[f]), empty, f, f))
73 ##
73 ##
74
74
75 while True:
75 while True:
76 if opts['stdin']:
76 if opts['stdin']:
77 try:
77 try:
78 line = raw_input().split(' ')
78 line = raw_input().split(' ')
79 node1 = line[0]
79 node1 = line[0]
80 if len(line) > 1:
80 if len(line) > 1:
81 node2 = line[1]
81 node2 = line[1]
82 else:
82 else:
83 node2 = None
83 node2 = None
84 except EOFError:
84 except EOFError:
85 break
85 break
86 node1 = repo.lookup(node1)
86 node1 = repo.lookup(node1)
87 if node2:
87 if node2:
88 node2 = repo.lookup(node2)
88 node2 = repo.lookup(node2)
89 else:
89 else:
90 node2 = node1
90 node2 = node1
91 node1 = repo.changelog.parents(node1)[0]
91 node1 = repo.changelog.parents(node1)[0]
92 if opts['patch']:
92 if opts['patch']:
93 if opts['pretty']:
93 if opts['pretty']:
94 catcommit(ui, repo, node2, "")
94 catcommit(ui, repo, node2, "")
95 m = cmdutil.match(repo, files)
95 m = cmdutil.match(repo, files)
96 chunks = patch.diff(repo, node1, node2, match=m,
96 chunks = patch.diff(repo, node1, node2, match=m,
97 opts=patch.diffopts(ui, {'git': True}))
97 opts=patch.diffopts(ui, {'git': True}))
98 for chunk in chunks:
98 for chunk in chunks:
99 ui.write(chunk)
99 ui.write(chunk)
100 else:
100 else:
101 __difftree(repo, node1, node2, files=files)
101 __difftree(repo, node1, node2, files=files)
102 if not opts['stdin']:
102 if not opts['stdin']:
103 break
103 break
104
104
105 def catcommit(ui, repo, n, prefix, ctx=None):
105 def catcommit(ui, repo, n, prefix, ctx=None):
106 nlprefix = '\n' + prefix;
106 nlprefix = '\n' + prefix;
107 if ctx is None:
107 if ctx is None:
108 ctx = repo[n]
108 ctx = repo[n]
109 ui.write("tree %s\n" % short(ctx.changeset()[0])) # use ctx.node() instead ??
109 ui.write("tree %s\n" % short(ctx.changeset()[0])) # use ctx.node() instead ??
110 for p in ctx.parents():
110 for p in ctx.parents():
111 ui.write("parent %s\n" % p)
111 ui.write("parent %s\n" % p)
112
112
113 date = ctx.date()
113 date = ctx.date()
114 description = ctx.description().replace("\0", "")
114 description = ctx.description().replace("\0", "")
115 lines = description.splitlines()
115 lines = description.splitlines()
116 if lines and lines[-1].startswith('committer:'):
116 if lines and lines[-1].startswith('committer:'):
117 committer = lines[-1].split(': ')[1].rstrip()
117 committer = lines[-1].split(': ')[1].rstrip()
118 else:
118 else:
119 committer = ctx.user()
119 committer = ctx.user()
120
120
121 ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
121 ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
122 ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
122 ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
123 ui.write("revision %d\n" % ctx.rev())
123 ui.write("revision %d\n" % ctx.rev())
124 ui.write("branch %s\n\n" % ctx.branch())
124 ui.write("branch %s\n\n" % ctx.branch())
125
125
126 if prefix != "":
126 if prefix != "":
127 ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip()))
127 ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip()))
128 else:
128 else:
129 ui.write(description + "\n")
129 ui.write(description + "\n")
130 if prefix:
130 if prefix:
131 ui.write('\0')
131 ui.write('\0')
132
132
133 def base(ui, repo, node1, node2):
133 def base(ui, repo, node1, node2):
134 """output common ancestor information"""
134 """output common ancestor information"""
135 node1 = repo.lookup(node1)
135 node1 = repo.lookup(node1)
136 node2 = repo.lookup(node2)
136 node2 = repo.lookup(node2)
137 n = repo.changelog.ancestor(node1, node2)
137 n = repo.changelog.ancestor(node1, node2)
138 ui.write(short(n) + "\n")
138 ui.write(short(n) + "\n")
139
139
140 def catfile(ui, repo, type=None, r=None, **opts):
140 def catfile(ui, repo, type=None, r=None, **opts):
141 """cat a specific revision"""
141 """cat a specific revision"""
142 # in stdin mode, every line except the commit is prefixed with two
142 # in stdin mode, every line except the commit is prefixed with two
143 # spaces. This way the our caller can find the commit without magic
143 # spaces. This way the our caller can find the commit without magic
144 # strings
144 # strings
145 #
145 #
146 prefix = ""
146 prefix = ""
147 if opts['stdin']:
147 if opts['stdin']:
148 try:
148 try:
149 (type, r) = raw_input().split(' ');
149 (type, r) = raw_input().split(' ');
150 prefix = " "
150 prefix = " "
151 except EOFError:
151 except EOFError:
152 return
152 return
153
153
154 else:
154 else:
155 if not type or not r:
155 if not type or not r:
156 ui.warn(_("cat-file: type or revision not supplied\n"))
156 ui.warn(_("cat-file: type or revision not supplied\n"))
157 commands.help_(ui, 'cat-file')
157 commands.help_(ui, 'cat-file')
158
158
159 while r:
159 while r:
160 if type != "commit":
160 if type != "commit":
161 ui.warn(_("aborting hg cat-file only understands commits\n"))
161 ui.warn(_("aborting hg cat-file only understands commits\n"))
162 return 1;
162 return 1;
163 n = repo.lookup(r)
163 n = repo.lookup(r)
164 catcommit(ui, repo, n, prefix)
164 catcommit(ui, repo, n, prefix)
165 if opts['stdin']:
165 if opts['stdin']:
166 try:
166 try:
167 (type, r) = raw_input().split(' ');
167 (type, r) = raw_input().split(' ');
168 except EOFError:
168 except EOFError:
169 break
169 break
170 else:
170 else:
171 break
171 break
172
172
173 # git rev-tree is a confusing thing. You can supply a number of
173 # git rev-tree is a confusing thing. You can supply a number of
174 # commit sha1s on the command line, and it walks the commit history
174 # commit sha1s on the command line, and it walks the commit history
175 # telling you which commits are reachable from the supplied ones via
175 # telling you which commits are reachable from the supplied ones via
176 # a bitmask based on arg position.
176 # a bitmask based on arg position.
177 # you can specify a commit to stop at by starting the sha1 with ^
177 # you can specify a commit to stop at by starting the sha1 with ^
178 def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
178 def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
179 def chlogwalk():
179 def chlogwalk():
180 count = len(repo)
180 count = len(repo)
181 i = count
181 i = count
182 l = [0] * 100
182 l = [0] * 100
183 chunk = 100
183 chunk = 100
184 while True:
184 while True:
185 if chunk > i:
185 if chunk > i:
186 chunk = i
186 chunk = i
187 i = 0
187 i = 0
188 else:
188 else:
189 i -= chunk
189 i -= chunk
190
190
191 for x in xrange(0, chunk):
191 for x in xrange(chunk):
192 if i + x >= count:
192 if i + x >= count:
193 l[chunk - x:] = [0] * (chunk - x)
193 l[chunk - x:] = [0] * (chunk - x)
194 break
194 break
195 if full != None:
195 if full != None:
196 l[x] = repo[i + x]
196 l[x] = repo[i + x]
197 l[x].changeset() # force reading
197 l[x].changeset() # force reading
198 else:
198 else:
199 l[x] = 1
199 l[x] = 1
200 for x in xrange(chunk-1, -1, -1):
200 for x in xrange(chunk-1, -1, -1):
201 if l[x] != 0:
201 if l[x] != 0:
202 yield (i + x, full != None and l[x] or None)
202 yield (i + x, full != None and l[x] or None)
203 if i == 0:
203 if i == 0:
204 break
204 break
205
205
206 # calculate and return the reachability bitmask for sha
206 # calculate and return the reachability bitmask for sha
207 def is_reachable(ar, reachable, sha):
207 def is_reachable(ar, reachable, sha):
208 if len(ar) == 0:
208 if len(ar) == 0:
209 return 1
209 return 1
210 mask = 0
210 mask = 0
211 for i in xrange(len(ar)):
211 for i in xrange(len(ar)):
212 if sha in reachable[i]:
212 if sha in reachable[i]:
213 mask |= 1 << i
213 mask |= 1 << i
214
214
215 return mask
215 return mask
216
216
217 reachable = []
217 reachable = []
218 stop_sha1 = []
218 stop_sha1 = []
219 want_sha1 = []
219 want_sha1 = []
220 count = 0
220 count = 0
221
221
222 # figure out which commits they are asking for and which ones they
222 # figure out which commits they are asking for and which ones they
223 # want us to stop on
223 # want us to stop on
224 for i in xrange(len(args)):
224 for i in xrange(len(args)):
225 if args[i].startswith('^'):
225 if args[i].startswith('^'):
226 s = repo.lookup(args[i][1:])
226 s = repo.lookup(args[i][1:])
227 stop_sha1.append(s)
227 stop_sha1.append(s)
228 want_sha1.append(s)
228 want_sha1.append(s)
229 elif args[i] != 'HEAD':
229 elif args[i] != 'HEAD':
230 want_sha1.append(repo.lookup(args[i]))
230 want_sha1.append(repo.lookup(args[i]))
231
231
232 # calculate the graph for the supplied commits
232 # calculate the graph for the supplied commits
233 for i in xrange(len(want_sha1)):
233 for i in xrange(len(want_sha1)):
234 reachable.append(set());
234 reachable.append(set());
235 n = want_sha1[i];
235 n = want_sha1[i];
236 visit = [n];
236 visit = [n];
237 reachable[i].add(n)
237 reachable[i].add(n)
238 while visit:
238 while visit:
239 n = visit.pop(0)
239 n = visit.pop(0)
240 if n in stop_sha1:
240 if n in stop_sha1:
241 continue
241 continue
242 for p in repo.changelog.parents(n):
242 for p in repo.changelog.parents(n):
243 if p not in reachable[i]:
243 if p not in reachable[i]:
244 reachable[i].add(p)
244 reachable[i].add(p)
245 visit.append(p)
245 visit.append(p)
246 if p in stop_sha1:
246 if p in stop_sha1:
247 continue
247 continue
248
248
249 # walk the repository looking for commits that are in our
249 # walk the repository looking for commits that are in our
250 # reachability graph
250 # reachability graph
251 for i, ctx in chlogwalk():
251 for i, ctx in chlogwalk():
252 n = repo.changelog.node(i)
252 n = repo.changelog.node(i)
253 mask = is_reachable(want_sha1, reachable, n)
253 mask = is_reachable(want_sha1, reachable, n)
254 if mask:
254 if mask:
255 parentstr = ""
255 parentstr = ""
256 if parents:
256 if parents:
257 pp = repo.changelog.parents(n)
257 pp = repo.changelog.parents(n)
258 if pp[0] != nullid:
258 if pp[0] != nullid:
259 parentstr += " " + short(pp[0])
259 parentstr += " " + short(pp[0])
260 if pp[1] != nullid:
260 if pp[1] != nullid:
261 parentstr += " " + short(pp[1])
261 parentstr += " " + short(pp[1])
262 if not full:
262 if not full:
263 ui.write("%s%s\n" % (short(n), parentstr))
263 ui.write("%s%s\n" % (short(n), parentstr))
264 elif full == "commit":
264 elif full == "commit":
265 ui.write("%s%s\n" % (short(n), parentstr))
265 ui.write("%s%s\n" % (short(n), parentstr))
266 catcommit(ui, repo, n, ' ', ctx)
266 catcommit(ui, repo, n, ' ', ctx)
267 else:
267 else:
268 (p1, p2) = repo.changelog.parents(n)
268 (p1, p2) = repo.changelog.parents(n)
269 (h, h1, h2) = map(short, (n, p1, p2))
269 (h, h1, h2) = map(short, (n, p1, p2))
270 (i1, i2) = map(repo.changelog.rev, (p1, p2))
270 (i1, i2) = map(repo.changelog.rev, (p1, p2))
271
271
272 date = ctx.date()[0]
272 date = ctx.date()[0]
273 ui.write("%s %s:%s" % (date, h, mask))
273 ui.write("%s %s:%s" % (date, h, mask))
274 mask = is_reachable(want_sha1, reachable, p1)
274 mask = is_reachable(want_sha1, reachable, p1)
275 if i1 != nullrev and mask > 0:
275 if i1 != nullrev and mask > 0:
276 ui.write("%s:%s " % (h1, mask)),
276 ui.write("%s:%s " % (h1, mask)),
277 mask = is_reachable(want_sha1, reachable, p2)
277 mask = is_reachable(want_sha1, reachable, p2)
278 if i2 != nullrev and mask > 0:
278 if i2 != nullrev and mask > 0:
279 ui.write("%s:%s " % (h2, mask))
279 ui.write("%s:%s " % (h2, mask))
280 ui.write("\n")
280 ui.write("\n")
281 if maxnr and count >= maxnr:
281 if maxnr and count >= maxnr:
282 break
282 break
283 count += 1
283 count += 1
284
284
285 def revparse(ui, repo, *revs, **opts):
285 def revparse(ui, repo, *revs, **opts):
286 """parse given revisions"""
286 """parse given revisions"""
287 def revstr(rev):
287 def revstr(rev):
288 if rev == 'HEAD':
288 if rev == 'HEAD':
289 rev = 'tip'
289 rev = 'tip'
290 return revlog.hex(repo.lookup(rev))
290 return revlog.hex(repo.lookup(rev))
291
291
292 for r in revs:
292 for r in revs:
293 revrange = r.split(':', 1)
293 revrange = r.split(':', 1)
294 ui.write('%s\n' % revstr(revrange[0]))
294 ui.write('%s\n' % revstr(revrange[0]))
295 if len(revrange) == 2:
295 if len(revrange) == 2:
296 ui.write('^%s\n' % revstr(revrange[1]))
296 ui.write('^%s\n' % revstr(revrange[1]))
297
297
298 # git rev-list tries to order things by date, and has the ability to stop
298 # git rev-list tries to order things by date, and has the ability to stop
299 # at a given commit without walking the whole repo. TODO add the stop
299 # at a given commit without walking the whole repo. TODO add the stop
300 # parameter
300 # parameter
301 def revlist(ui, repo, *revs, **opts):
301 def revlist(ui, repo, *revs, **opts):
302 """print revisions"""
302 """print revisions"""
303 if opts['header']:
303 if opts['header']:
304 full = "commit"
304 full = "commit"
305 else:
305 else:
306 full = None
306 full = None
307 copy = [x for x in revs]
307 copy = [x for x in revs]
308 revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
308 revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
309
309
310 def config(ui, repo, **opts):
310 def config(ui, repo, **opts):
311 """print extension options"""
311 """print extension options"""
312 def writeopt(name, value):
312 def writeopt(name, value):
313 ui.write('k=%s\nv=%s\n' % (name, value))
313 ui.write('k=%s\nv=%s\n' % (name, value))
314
314
315 writeopt('vdiff', ui.config('hgk', 'vdiff', ''))
315 writeopt('vdiff', ui.config('hgk', 'vdiff', ''))
316
316
317
317
318 def view(ui, repo, *etc, **opts):
318 def view(ui, repo, *etc, **opts):
319 "start interactive history viewer"
319 "start interactive history viewer"
320 os.chdir(repo.root)
320 os.chdir(repo.root)
321 optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
321 optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
322 cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
322 cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
323 ui.debug(_("running %s\n") % cmd)
323 ui.debug(_("running %s\n") % cmd)
324 util.system(cmd)
324 util.system(cmd)
325
325
326 cmdtable = {
326 cmdtable = {
327 "^view":
327 "^view":
328 (view,
328 (view,
329 [('l', 'limit', '', _('limit number of changes displayed'))],
329 [('l', 'limit', '', _('limit number of changes displayed'))],
330 _('hg view [-l LIMIT] [REVRANGE]')),
330 _('hg view [-l LIMIT] [REVRANGE]')),
331 "debug-diff-tree":
331 "debug-diff-tree":
332 (difftree,
332 (difftree,
333 [('p', 'patch', None, _('generate patch')),
333 [('p', 'patch', None, _('generate patch')),
334 ('r', 'recursive', None, _('recursive')),
334 ('r', 'recursive', None, _('recursive')),
335 ('P', 'pretty', None, _('pretty')),
335 ('P', 'pretty', None, _('pretty')),
336 ('s', 'stdin', None, _('stdin')),
336 ('s', 'stdin', None, _('stdin')),
337 ('C', 'copy', None, _('detect copies')),
337 ('C', 'copy', None, _('detect copies')),
338 ('S', 'search', "", _('search'))],
338 ('S', 'search', "", _('search'))],
339 _('hg git-diff-tree [OPTION]... NODE1 NODE2 [FILE]...')),
339 _('hg git-diff-tree [OPTION]... NODE1 NODE2 [FILE]...')),
340 "debug-cat-file":
340 "debug-cat-file":
341 (catfile,
341 (catfile,
342 [('s', 'stdin', None, _('stdin'))],
342 [('s', 'stdin', None, _('stdin'))],
343 _('hg debug-cat-file [OPTION]... TYPE FILE')),
343 _('hg debug-cat-file [OPTION]... TYPE FILE')),
344 "debug-config":
344 "debug-config":
345 (config, [], _('hg debug-config')),
345 (config, [], _('hg debug-config')),
346 "debug-merge-base":
346 "debug-merge-base":
347 (base, [], _('hg debug-merge-base node node')),
347 (base, [], _('hg debug-merge-base node node')),
348 "debug-rev-parse":
348 "debug-rev-parse":
349 (revparse,
349 (revparse,
350 [('', 'default', '', _('ignored'))],
350 [('', 'default', '', _('ignored'))],
351 _('hg debug-rev-parse REV')),
351 _('hg debug-rev-parse REV')),
352 "debug-rev-list":
352 "debug-rev-list":
353 (revlist,
353 (revlist,
354 [('H', 'header', None, _('header')),
354 [('H', 'header', None, _('header')),
355 ('t', 'topo-order', None, _('topo-order')),
355 ('t', 'topo-order', None, _('topo-order')),
356 ('p', 'parents', None, _('parents')),
356 ('p', 'parents', None, _('parents')),
357 ('n', 'max-count', 0, _('max-count'))],
357 ('n', 'max-count', 0, _('max-count'))],
358 _('hg debug-rev-list [options] revs')),
358 _('hg debug-rev-list [options] revs')),
359 }
359 }
@@ -1,2637 +1,2637 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 '''patch management and development
8 '''patch management and development
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial.node import bin, hex, short, nullid, nullrev
33 from mercurial.node import bin, hex, short, nullid, nullrev
34 from mercurial.lock import release
34 from mercurial.lock import release
35 from mercurial import commands, cmdutil, hg, patch, util
35 from mercurial import commands, cmdutil, hg, patch, util
36 from mercurial import repair, extensions, url, error
36 from mercurial import repair, extensions, url, error
37 import os, sys, re, errno
37 import os, sys, re, errno
38
38
39 commands.norepo += " qclone"
39 commands.norepo += " qclone"
40
40
41 # Patch names looks like unix-file names.
41 # Patch names looks like unix-file names.
42 # They must be joinable with queue directory and result in the patch path.
42 # They must be joinable with queue directory and result in the patch path.
43 normname = util.normpath
43 normname = util.normpath
44
44
45 class statusentry:
45 class statusentry:
46 def __init__(self, rev, name=None):
46 def __init__(self, rev, name=None):
47 if not name:
47 if not name:
48 fields = rev.split(':', 1)
48 fields = rev.split(':', 1)
49 if len(fields) == 2:
49 if len(fields) == 2:
50 self.rev, self.name = fields
50 self.rev, self.name = fields
51 else:
51 else:
52 self.rev, self.name = None, None
52 self.rev, self.name = None, None
53 else:
53 else:
54 self.rev, self.name = rev, name
54 self.rev, self.name = rev, name
55
55
56 def __str__(self):
56 def __str__(self):
57 return self.rev + ':' + self.name
57 return self.rev + ':' + self.name
58
58
59 class patchheader(object):
59 class patchheader(object):
60 def __init__(self, message, comments, user, date, haspatch):
60 def __init__(self, message, comments, user, date, haspatch):
61 self.message = message
61 self.message = message
62 self.comments = comments
62 self.comments = comments
63 self.user = user
63 self.user = user
64 self.date = date
64 self.date = date
65 self.haspatch = haspatch
65 self.haspatch = haspatch
66
66
67 def setuser(self, user):
67 def setuser(self, user):
68 if not self.setheader(['From: ', '# User '], user):
68 if not self.setheader(['From: ', '# User '], user):
69 try:
69 try:
70 patchheaderat = self.comments.index('# HG changeset patch')
70 patchheaderat = self.comments.index('# HG changeset patch')
71 self.comments.insert(patchheaderat + 1,'# User ' + user)
71 self.comments.insert(patchheaderat + 1,'# User ' + user)
72 except ValueError:
72 except ValueError:
73 self.comments = ['From: ' + user, ''] + self.comments
73 self.comments = ['From: ' + user, ''] + self.comments
74 self.user = user
74 self.user = user
75
75
76 def setdate(self, date):
76 def setdate(self, date):
77 if self.setheader(['# Date '], date):
77 if self.setheader(['# Date '], date):
78 self.date = date
78 self.date = date
79
79
80 def setmessage(self, message):
80 def setmessage(self, message):
81 if self.comments:
81 if self.comments:
82 self._delmsg()
82 self._delmsg()
83 self.message = [message]
83 self.message = [message]
84 self.comments += self.message
84 self.comments += self.message
85
85
86 def setheader(self, prefixes, new):
86 def setheader(self, prefixes, new):
87 '''Update all references to a field in the patch header.
87 '''Update all references to a field in the patch header.
88 If none found, add it email style.'''
88 If none found, add it email style.'''
89 res = False
89 res = False
90 for prefix in prefixes:
90 for prefix in prefixes:
91 for i in xrange(len(self.comments)):
91 for i in xrange(len(self.comments)):
92 if self.comments[i].startswith(prefix):
92 if self.comments[i].startswith(prefix):
93 self.comments[i] = prefix + new
93 self.comments[i] = prefix + new
94 res = True
94 res = True
95 break
95 break
96 return res
96 return res
97
97
98 def __str__(self):
98 def __str__(self):
99 if not self.comments:
99 if not self.comments:
100 return ''
100 return ''
101 return '\n'.join(self.comments) + '\n\n'
101 return '\n'.join(self.comments) + '\n\n'
102
102
103 def _delmsg(self):
103 def _delmsg(self):
104 '''Remove existing message, keeping the rest of the comments fields.
104 '''Remove existing message, keeping the rest of the comments fields.
105 If comments contains 'subject: ', message will prepend
105 If comments contains 'subject: ', message will prepend
106 the field and a blank line.'''
106 the field and a blank line.'''
107 if self.message:
107 if self.message:
108 subj = 'subject: ' + self.message[0].lower()
108 subj = 'subject: ' + self.message[0].lower()
109 for i in xrange(len(self.comments)):
109 for i in xrange(len(self.comments)):
110 if subj == self.comments[i].lower():
110 if subj == self.comments[i].lower():
111 del self.comments[i]
111 del self.comments[i]
112 self.message = self.message[2:]
112 self.message = self.message[2:]
113 break
113 break
114 ci = 0
114 ci = 0
115 for mi in xrange(len(self.message)):
115 for mi in xrange(len(self.message)):
116 while self.message[mi] != self.comments[ci]:
116 while self.message[mi] != self.comments[ci]:
117 ci += 1
117 ci += 1
118 del self.comments[ci]
118 del self.comments[ci]
119
119
120 class queue:
120 class queue:
121 def __init__(self, ui, path, patchdir=None):
121 def __init__(self, ui, path, patchdir=None):
122 self.basepath = path
122 self.basepath = path
123 self.path = patchdir or os.path.join(path, "patches")
123 self.path = patchdir or os.path.join(path, "patches")
124 self.opener = util.opener(self.path)
124 self.opener = util.opener(self.path)
125 self.ui = ui
125 self.ui = ui
126 self.applied_dirty = 0
126 self.applied_dirty = 0
127 self.series_dirty = 0
127 self.series_dirty = 0
128 self.series_path = "series"
128 self.series_path = "series"
129 self.status_path = "status"
129 self.status_path = "status"
130 self.guards_path = "guards"
130 self.guards_path = "guards"
131 self.active_guards = None
131 self.active_guards = None
132 self.guards_dirty = False
132 self.guards_dirty = False
133 self._diffopts = None
133 self._diffopts = None
134
134
135 @util.propertycache
135 @util.propertycache
136 def applied(self):
136 def applied(self):
137 if os.path.exists(self.join(self.status_path)):
137 if os.path.exists(self.join(self.status_path)):
138 lines = self.opener(self.status_path).read().splitlines()
138 lines = self.opener(self.status_path).read().splitlines()
139 return [statusentry(l) for l in lines]
139 return [statusentry(l) for l in lines]
140 return []
140 return []
141
141
142 @util.propertycache
142 @util.propertycache
143 def full_series(self):
143 def full_series(self):
144 if os.path.exists(self.join(self.series_path)):
144 if os.path.exists(self.join(self.series_path)):
145 return self.opener(self.series_path).read().splitlines()
145 return self.opener(self.series_path).read().splitlines()
146 return []
146 return []
147
147
148 @util.propertycache
148 @util.propertycache
149 def series(self):
149 def series(self):
150 self.parse_series()
150 self.parse_series()
151 return self.series
151 return self.series
152
152
153 @util.propertycache
153 @util.propertycache
154 def series_guards(self):
154 def series_guards(self):
155 self.parse_series()
155 self.parse_series()
156 return self.series_guards
156 return self.series_guards
157
157
158 def invalidate(self):
158 def invalidate(self):
159 for a in 'applied full_series series series_guards'.split():
159 for a in 'applied full_series series series_guards'.split():
160 if a in self.__dict__:
160 if a in self.__dict__:
161 delattr(self, a)
161 delattr(self, a)
162 self.applied_dirty = 0
162 self.applied_dirty = 0
163 self.series_dirty = 0
163 self.series_dirty = 0
164 self.guards_dirty = False
164 self.guards_dirty = False
165 self.active_guards = None
165 self.active_guards = None
166
166
167 def diffopts(self):
167 def diffopts(self):
168 if self._diffopts is None:
168 if self._diffopts is None:
169 self._diffopts = patch.diffopts(self.ui)
169 self._diffopts = patch.diffopts(self.ui)
170 return self._diffopts
170 return self._diffopts
171
171
172 def join(self, *p):
172 def join(self, *p):
173 return os.path.join(self.path, *p)
173 return os.path.join(self.path, *p)
174
174
175 def find_series(self, patch):
175 def find_series(self, patch):
176 pre = re.compile("(\s*)([^#]+)")
176 pre = re.compile("(\s*)([^#]+)")
177 index = 0
177 index = 0
178 for l in self.full_series:
178 for l in self.full_series:
179 m = pre.match(l)
179 m = pre.match(l)
180 if m:
180 if m:
181 s = m.group(2)
181 s = m.group(2)
182 s = s.rstrip()
182 s = s.rstrip()
183 if s == patch:
183 if s == patch:
184 return index
184 return index
185 index += 1
185 index += 1
186 return None
186 return None
187
187
188 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
188 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
189
189
190 def parse_series(self):
190 def parse_series(self):
191 self.series = []
191 self.series = []
192 self.series_guards = []
192 self.series_guards = []
193 for l in self.full_series:
193 for l in self.full_series:
194 h = l.find('#')
194 h = l.find('#')
195 if h == -1:
195 if h == -1:
196 patch = l
196 patch = l
197 comment = ''
197 comment = ''
198 elif h == 0:
198 elif h == 0:
199 continue
199 continue
200 else:
200 else:
201 patch = l[:h]
201 patch = l[:h]
202 comment = l[h:]
202 comment = l[h:]
203 patch = patch.strip()
203 patch = patch.strip()
204 if patch:
204 if patch:
205 if patch in self.series:
205 if patch in self.series:
206 raise util.Abort(_('%s appears more than once in %s') %
206 raise util.Abort(_('%s appears more than once in %s') %
207 (patch, self.join(self.series_path)))
207 (patch, self.join(self.series_path)))
208 self.series.append(patch)
208 self.series.append(patch)
209 self.series_guards.append(self.guard_re.findall(comment))
209 self.series_guards.append(self.guard_re.findall(comment))
210
210
211 def check_guard(self, guard):
211 def check_guard(self, guard):
212 if not guard:
212 if not guard:
213 return _('guard cannot be an empty string')
213 return _('guard cannot be an empty string')
214 bad_chars = '# \t\r\n\f'
214 bad_chars = '# \t\r\n\f'
215 first = guard[0]
215 first = guard[0]
216 if first in '-+':
216 if first in '-+':
217 return (_('guard %r starts with invalid character: %r') %
217 return (_('guard %r starts with invalid character: %r') %
218 (guard, first))
218 (guard, first))
219 for c in bad_chars:
219 for c in bad_chars:
220 if c in guard:
220 if c in guard:
221 return _('invalid character in guard %r: %r') % (guard, c)
221 return _('invalid character in guard %r: %r') % (guard, c)
222
222
223 def set_active(self, guards):
223 def set_active(self, guards):
224 for guard in guards:
224 for guard in guards:
225 bad = self.check_guard(guard)
225 bad = self.check_guard(guard)
226 if bad:
226 if bad:
227 raise util.Abort(bad)
227 raise util.Abort(bad)
228 guards = sorted(set(guards))
228 guards = sorted(set(guards))
229 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
229 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
230 self.active_guards = guards
230 self.active_guards = guards
231 self.guards_dirty = True
231 self.guards_dirty = True
232
232
233 def active(self):
233 def active(self):
234 if self.active_guards is None:
234 if self.active_guards is None:
235 self.active_guards = []
235 self.active_guards = []
236 try:
236 try:
237 guards = self.opener(self.guards_path).read().split()
237 guards = self.opener(self.guards_path).read().split()
238 except IOError, err:
238 except IOError, err:
239 if err.errno != errno.ENOENT: raise
239 if err.errno != errno.ENOENT: raise
240 guards = []
240 guards = []
241 for i, guard in enumerate(guards):
241 for i, guard in enumerate(guards):
242 bad = self.check_guard(guard)
242 bad = self.check_guard(guard)
243 if bad:
243 if bad:
244 self.ui.warn('%s:%d: %s\n' %
244 self.ui.warn('%s:%d: %s\n' %
245 (self.join(self.guards_path), i + 1, bad))
245 (self.join(self.guards_path), i + 1, bad))
246 else:
246 else:
247 self.active_guards.append(guard)
247 self.active_guards.append(guard)
248 return self.active_guards
248 return self.active_guards
249
249
250 def set_guards(self, idx, guards):
250 def set_guards(self, idx, guards):
251 for g in guards:
251 for g in guards:
252 if len(g) < 2:
252 if len(g) < 2:
253 raise util.Abort(_('guard %r too short') % g)
253 raise util.Abort(_('guard %r too short') % g)
254 if g[0] not in '-+':
254 if g[0] not in '-+':
255 raise util.Abort(_('guard %r starts with invalid char') % g)
255 raise util.Abort(_('guard %r starts with invalid char') % g)
256 bad = self.check_guard(g[1:])
256 bad = self.check_guard(g[1:])
257 if bad:
257 if bad:
258 raise util.Abort(bad)
258 raise util.Abort(bad)
259 drop = self.guard_re.sub('', self.full_series[idx])
259 drop = self.guard_re.sub('', self.full_series[idx])
260 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
260 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
261 self.parse_series()
261 self.parse_series()
262 self.series_dirty = True
262 self.series_dirty = True
263
263
264 def pushable(self, idx):
264 def pushable(self, idx):
265 if isinstance(idx, str):
265 if isinstance(idx, str):
266 idx = self.series.index(idx)
266 idx = self.series.index(idx)
267 patchguards = self.series_guards[idx]
267 patchguards = self.series_guards[idx]
268 if not patchguards:
268 if not patchguards:
269 return True, None
269 return True, None
270 guards = self.active()
270 guards = self.active()
271 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
271 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
272 if exactneg:
272 if exactneg:
273 return False, exactneg[0]
273 return False, exactneg[0]
274 pos = [g for g in patchguards if g[0] == '+']
274 pos = [g for g in patchguards if g[0] == '+']
275 exactpos = [g for g in pos if g[1:] in guards]
275 exactpos = [g for g in pos if g[1:] in guards]
276 if pos:
276 if pos:
277 if exactpos:
277 if exactpos:
278 return True, exactpos[0]
278 return True, exactpos[0]
279 return False, pos
279 return False, pos
280 return True, ''
280 return True, ''
281
281
282 def explain_pushable(self, idx, all_patches=False):
282 def explain_pushable(self, idx, all_patches=False):
283 write = all_patches and self.ui.write or self.ui.warn
283 write = all_patches and self.ui.write or self.ui.warn
284 if all_patches or self.ui.verbose:
284 if all_patches or self.ui.verbose:
285 if isinstance(idx, str):
285 if isinstance(idx, str):
286 idx = self.series.index(idx)
286 idx = self.series.index(idx)
287 pushable, why = self.pushable(idx)
287 pushable, why = self.pushable(idx)
288 if all_patches and pushable:
288 if all_patches and pushable:
289 if why is None:
289 if why is None:
290 write(_('allowing %s - no guards in effect\n') %
290 write(_('allowing %s - no guards in effect\n') %
291 self.series[idx])
291 self.series[idx])
292 else:
292 else:
293 if not why:
293 if not why:
294 write(_('allowing %s - no matching negative guards\n') %
294 write(_('allowing %s - no matching negative guards\n') %
295 self.series[idx])
295 self.series[idx])
296 else:
296 else:
297 write(_('allowing %s - guarded by %r\n') %
297 write(_('allowing %s - guarded by %r\n') %
298 (self.series[idx], why))
298 (self.series[idx], why))
299 if not pushable:
299 if not pushable:
300 if why:
300 if why:
301 write(_('skipping %s - guarded by %r\n') %
301 write(_('skipping %s - guarded by %r\n') %
302 (self.series[idx], why))
302 (self.series[idx], why))
303 else:
303 else:
304 write(_('skipping %s - no matching guards\n') %
304 write(_('skipping %s - no matching guards\n') %
305 self.series[idx])
305 self.series[idx])
306
306
307 def save_dirty(self):
307 def save_dirty(self):
308 def write_list(items, path):
308 def write_list(items, path):
309 fp = self.opener(path, 'w')
309 fp = self.opener(path, 'w')
310 for i in items:
310 for i in items:
311 fp.write("%s\n" % i)
311 fp.write("%s\n" % i)
312 fp.close()
312 fp.close()
313 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
313 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
314 if self.series_dirty: write_list(self.full_series, self.series_path)
314 if self.series_dirty: write_list(self.full_series, self.series_path)
315 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
315 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
316
316
317 def readheaders(self, patch):
317 def readheaders(self, patch):
318 def eatdiff(lines):
318 def eatdiff(lines):
319 while lines:
319 while lines:
320 l = lines[-1]
320 l = lines[-1]
321 if (l.startswith("diff -") or
321 if (l.startswith("diff -") or
322 l.startswith("Index:") or
322 l.startswith("Index:") or
323 l.startswith("===========")):
323 l.startswith("===========")):
324 del lines[-1]
324 del lines[-1]
325 else:
325 else:
326 break
326 break
327 def eatempty(lines):
327 def eatempty(lines):
328 while lines:
328 while lines:
329 l = lines[-1]
329 l = lines[-1]
330 if re.match('\s*$', l):
330 if re.match('\s*$', l):
331 del lines[-1]
331 del lines[-1]
332 else:
332 else:
333 break
333 break
334
334
335 pf = self.join(patch)
335 pf = self.join(patch)
336 message = []
336 message = []
337 comments = []
337 comments = []
338 user = None
338 user = None
339 date = None
339 date = None
340 format = None
340 format = None
341 subject = None
341 subject = None
342 diffstart = 0
342 diffstart = 0
343
343
344 for line in file(pf):
344 for line in file(pf):
345 line = line.rstrip()
345 line = line.rstrip()
346 if line.startswith('diff --git'):
346 if line.startswith('diff --git'):
347 diffstart = 2
347 diffstart = 2
348 break
348 break
349 if diffstart:
349 if diffstart:
350 if line.startswith('+++ '):
350 if line.startswith('+++ '):
351 diffstart = 2
351 diffstart = 2
352 break
352 break
353 if line.startswith("--- "):
353 if line.startswith("--- "):
354 diffstart = 1
354 diffstart = 1
355 continue
355 continue
356 elif format == "hgpatch":
356 elif format == "hgpatch":
357 # parse values when importing the result of an hg export
357 # parse values when importing the result of an hg export
358 if line.startswith("# User "):
358 if line.startswith("# User "):
359 user = line[7:]
359 user = line[7:]
360 elif line.startswith("# Date "):
360 elif line.startswith("# Date "):
361 date = line[7:]
361 date = line[7:]
362 elif not line.startswith("# ") and line:
362 elif not line.startswith("# ") and line:
363 message.append(line)
363 message.append(line)
364 format = None
364 format = None
365 elif line == '# HG changeset patch':
365 elif line == '# HG changeset patch':
366 format = "hgpatch"
366 format = "hgpatch"
367 elif (format != "tagdone" and (line.startswith("Subject: ") or
367 elif (format != "tagdone" and (line.startswith("Subject: ") or
368 line.startswith("subject: "))):
368 line.startswith("subject: "))):
369 subject = line[9:]
369 subject = line[9:]
370 format = "tag"
370 format = "tag"
371 elif (format != "tagdone" and (line.startswith("From: ") or
371 elif (format != "tagdone" and (line.startswith("From: ") or
372 line.startswith("from: "))):
372 line.startswith("from: "))):
373 user = line[6:]
373 user = line[6:]
374 format = "tag"
374 format = "tag"
375 elif format == "tag" and line == "":
375 elif format == "tag" and line == "":
376 # when looking for tags (subject: from: etc) they
376 # when looking for tags (subject: from: etc) they
377 # end once you find a blank line in the source
377 # end once you find a blank line in the source
378 format = "tagdone"
378 format = "tagdone"
379 elif message or line:
379 elif message or line:
380 message.append(line)
380 message.append(line)
381 comments.append(line)
381 comments.append(line)
382
382
383 eatdiff(message)
383 eatdiff(message)
384 eatdiff(comments)
384 eatdiff(comments)
385 eatempty(message)
385 eatempty(message)
386 eatempty(comments)
386 eatempty(comments)
387
387
388 # make sure message isn't empty
388 # make sure message isn't empty
389 if format and format.startswith("tag") and subject:
389 if format and format.startswith("tag") and subject:
390 message.insert(0, "")
390 message.insert(0, "")
391 message.insert(0, subject)
391 message.insert(0, subject)
392 return patchheader(message, comments, user, date, diffstart > 1)
392 return patchheader(message, comments, user, date, diffstart > 1)
393
393
394 def removeundo(self, repo):
394 def removeundo(self, repo):
395 undo = repo.sjoin('undo')
395 undo = repo.sjoin('undo')
396 if not os.path.exists(undo):
396 if not os.path.exists(undo):
397 return
397 return
398 try:
398 try:
399 os.unlink(undo)
399 os.unlink(undo)
400 except OSError, inst:
400 except OSError, inst:
401 self.ui.warn(_('error removing undo: %s\n') % str(inst))
401 self.ui.warn(_('error removing undo: %s\n') % str(inst))
402
402
403 def printdiff(self, repo, node1, node2=None, files=None,
403 def printdiff(self, repo, node1, node2=None, files=None,
404 fp=None, changes=None, opts={}):
404 fp=None, changes=None, opts={}):
405 m = cmdutil.match(repo, files, opts)
405 m = cmdutil.match(repo, files, opts)
406 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
406 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
407 write = fp is None and repo.ui.write or fp.write
407 write = fp is None and repo.ui.write or fp.write
408 for chunk in chunks:
408 for chunk in chunks:
409 write(chunk)
409 write(chunk)
410
410
411 def mergeone(self, repo, mergeq, head, patch, rev):
411 def mergeone(self, repo, mergeq, head, patch, rev):
412 # first try just applying the patch
412 # first try just applying the patch
413 (err, n) = self.apply(repo, [ patch ], update_status=False,
413 (err, n) = self.apply(repo, [ patch ], update_status=False,
414 strict=True, merge=rev)
414 strict=True, merge=rev)
415
415
416 if err == 0:
416 if err == 0:
417 return (err, n)
417 return (err, n)
418
418
419 if n is None:
419 if n is None:
420 raise util.Abort(_("apply failed for patch %s") % patch)
420 raise util.Abort(_("apply failed for patch %s") % patch)
421
421
422 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
422 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
423
423
424 # apply failed, strip away that rev and merge.
424 # apply failed, strip away that rev and merge.
425 hg.clean(repo, head)
425 hg.clean(repo, head)
426 self.strip(repo, n, update=False, backup='strip')
426 self.strip(repo, n, update=False, backup='strip')
427
427
428 ctx = repo[rev]
428 ctx = repo[rev]
429 ret = hg.merge(repo, rev)
429 ret = hg.merge(repo, rev)
430 if ret:
430 if ret:
431 raise util.Abort(_("update returned %d") % ret)
431 raise util.Abort(_("update returned %d") % ret)
432 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
432 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
433 if n is None:
433 if n is None:
434 raise util.Abort(_("repo commit failed"))
434 raise util.Abort(_("repo commit failed"))
435 try:
435 try:
436 ph = mergeq.readheaders(patch)
436 ph = mergeq.readheaders(patch)
437 except:
437 except:
438 raise util.Abort(_("unable to read %s") % patch)
438 raise util.Abort(_("unable to read %s") % patch)
439
439
440 patchf = self.opener(patch, "w")
440 patchf = self.opener(patch, "w")
441 comments = str(ph)
441 comments = str(ph)
442 if comments:
442 if comments:
443 patchf.write(comments)
443 patchf.write(comments)
444 self.printdiff(repo, head, n, fp=patchf)
444 self.printdiff(repo, head, n, fp=patchf)
445 patchf.close()
445 patchf.close()
446 self.removeundo(repo)
446 self.removeundo(repo)
447 return (0, n)
447 return (0, n)
448
448
449 def qparents(self, repo, rev=None):
449 def qparents(self, repo, rev=None):
450 if rev is None:
450 if rev is None:
451 (p1, p2) = repo.dirstate.parents()
451 (p1, p2) = repo.dirstate.parents()
452 if p2 == nullid:
452 if p2 == nullid:
453 return p1
453 return p1
454 if len(self.applied) == 0:
454 if len(self.applied) == 0:
455 return None
455 return None
456 return bin(self.applied[-1].rev)
456 return bin(self.applied[-1].rev)
457 pp = repo.changelog.parents(rev)
457 pp = repo.changelog.parents(rev)
458 if pp[1] != nullid:
458 if pp[1] != nullid:
459 arevs = [ x.rev for x in self.applied ]
459 arevs = [ x.rev for x in self.applied ]
460 p0 = hex(pp[0])
460 p0 = hex(pp[0])
461 p1 = hex(pp[1])
461 p1 = hex(pp[1])
462 if p0 in arevs:
462 if p0 in arevs:
463 return pp[0]
463 return pp[0]
464 if p1 in arevs:
464 if p1 in arevs:
465 return pp[1]
465 return pp[1]
466 return pp[0]
466 return pp[0]
467
467
468 def mergepatch(self, repo, mergeq, series):
468 def mergepatch(self, repo, mergeq, series):
469 if len(self.applied) == 0:
469 if len(self.applied) == 0:
470 # each of the patches merged in will have two parents. This
470 # each of the patches merged in will have two parents. This
471 # can confuse the qrefresh, qdiff, and strip code because it
471 # can confuse the qrefresh, qdiff, and strip code because it
472 # needs to know which parent is actually in the patch queue.
472 # needs to know which parent is actually in the patch queue.
473 # so, we insert a merge marker with only one parent. This way
473 # so, we insert a merge marker with only one parent. This way
474 # the first patch in the queue is never a merge patch
474 # the first patch in the queue is never a merge patch
475 #
475 #
476 pname = ".hg.patches.merge.marker"
476 pname = ".hg.patches.merge.marker"
477 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
477 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
478 self.removeundo(repo)
478 self.removeundo(repo)
479 self.applied.append(statusentry(hex(n), pname))
479 self.applied.append(statusentry(hex(n), pname))
480 self.applied_dirty = 1
480 self.applied_dirty = 1
481
481
482 head = self.qparents(repo)
482 head = self.qparents(repo)
483
483
484 for patch in series:
484 for patch in series:
485 patch = mergeq.lookup(patch, strict=True)
485 patch = mergeq.lookup(patch, strict=True)
486 if not patch:
486 if not patch:
487 self.ui.warn(_("patch %s does not exist\n") % patch)
487 self.ui.warn(_("patch %s does not exist\n") % patch)
488 return (1, None)
488 return (1, None)
489 pushable, reason = self.pushable(patch)
489 pushable, reason = self.pushable(patch)
490 if not pushable:
490 if not pushable:
491 self.explain_pushable(patch, all_patches=True)
491 self.explain_pushable(patch, all_patches=True)
492 continue
492 continue
493 info = mergeq.isapplied(patch)
493 info = mergeq.isapplied(patch)
494 if not info:
494 if not info:
495 self.ui.warn(_("patch %s is not applied\n") % patch)
495 self.ui.warn(_("patch %s is not applied\n") % patch)
496 return (1, None)
496 return (1, None)
497 rev = bin(info[1])
497 rev = bin(info[1])
498 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
498 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
499 if head:
499 if head:
500 self.applied.append(statusentry(hex(head), patch))
500 self.applied.append(statusentry(hex(head), patch))
501 self.applied_dirty = 1
501 self.applied_dirty = 1
502 if err:
502 if err:
503 return (err, head)
503 return (err, head)
504 self.save_dirty()
504 self.save_dirty()
505 return (0, head)
505 return (0, head)
506
506
507 def patch(self, repo, patchfile):
507 def patch(self, repo, patchfile):
508 '''Apply patchfile to the working directory.
508 '''Apply patchfile to the working directory.
509 patchfile: file name of patch'''
509 patchfile: file name of patch'''
510 files = {}
510 files = {}
511 try:
511 try:
512 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
512 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
513 files=files)
513 files=files)
514 except Exception, inst:
514 except Exception, inst:
515 self.ui.note(str(inst) + '\n')
515 self.ui.note(str(inst) + '\n')
516 if not self.ui.verbose:
516 if not self.ui.verbose:
517 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
517 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
518 return (False, files, False)
518 return (False, files, False)
519
519
520 return (True, files, fuzz)
520 return (True, files, fuzz)
521
521
522 def apply(self, repo, series, list=False, update_status=True,
522 def apply(self, repo, series, list=False, update_status=True,
523 strict=False, patchdir=None, merge=None, all_files={}):
523 strict=False, patchdir=None, merge=None, all_files={}):
524 wlock = lock = tr = None
524 wlock = lock = tr = None
525 try:
525 try:
526 wlock = repo.wlock()
526 wlock = repo.wlock()
527 lock = repo.lock()
527 lock = repo.lock()
528 tr = repo.transaction()
528 tr = repo.transaction()
529 try:
529 try:
530 ret = self._apply(repo, series, list, update_status,
530 ret = self._apply(repo, series, list, update_status,
531 strict, patchdir, merge, all_files=all_files)
531 strict, patchdir, merge, all_files=all_files)
532 tr.close()
532 tr.close()
533 self.save_dirty()
533 self.save_dirty()
534 return ret
534 return ret
535 except:
535 except:
536 try:
536 try:
537 tr.abort()
537 tr.abort()
538 finally:
538 finally:
539 repo.invalidate()
539 repo.invalidate()
540 repo.dirstate.invalidate()
540 repo.dirstate.invalidate()
541 raise
541 raise
542 finally:
542 finally:
543 del tr
543 del tr
544 release(lock, wlock)
544 release(lock, wlock)
545 self.removeundo(repo)
545 self.removeundo(repo)
546
546
547 def _apply(self, repo, series, list=False, update_status=True,
547 def _apply(self, repo, series, list=False, update_status=True,
548 strict=False, patchdir=None, merge=None, all_files={}):
548 strict=False, patchdir=None, merge=None, all_files={}):
549 # TODO unify with commands.py
549 # TODO unify with commands.py
550 if not patchdir:
550 if not patchdir:
551 patchdir = self.path
551 patchdir = self.path
552 err = 0
552 err = 0
553 n = None
553 n = None
554 for patchname in series:
554 for patchname in series:
555 pushable, reason = self.pushable(patchname)
555 pushable, reason = self.pushable(patchname)
556 if not pushable:
556 if not pushable:
557 self.explain_pushable(patchname, all_patches=True)
557 self.explain_pushable(patchname, all_patches=True)
558 continue
558 continue
559 self.ui.warn(_("applying %s\n") % patchname)
559 self.ui.warn(_("applying %s\n") % patchname)
560 pf = os.path.join(patchdir, patchname)
560 pf = os.path.join(patchdir, patchname)
561
561
562 try:
562 try:
563 ph = self.readheaders(patchname)
563 ph = self.readheaders(patchname)
564 except:
564 except:
565 self.ui.warn(_("Unable to read %s\n") % patchname)
565 self.ui.warn(_("Unable to read %s\n") % patchname)
566 err = 1
566 err = 1
567 break
567 break
568
568
569 message = ph.message
569 message = ph.message
570 if not message:
570 if not message:
571 message = _("imported patch %s\n") % patchname
571 message = _("imported patch %s\n") % patchname
572 else:
572 else:
573 if list:
573 if list:
574 message.append(_("\nimported patch %s") % patchname)
574 message.append(_("\nimported patch %s") % patchname)
575 message = '\n'.join(message)
575 message = '\n'.join(message)
576
576
577 if ph.haspatch:
577 if ph.haspatch:
578 (patcherr, files, fuzz) = self.patch(repo, pf)
578 (patcherr, files, fuzz) = self.patch(repo, pf)
579 all_files.update(files)
579 all_files.update(files)
580 patcherr = not patcherr
580 patcherr = not patcherr
581 else:
581 else:
582 self.ui.warn(_("patch %s is empty\n") % patchname)
582 self.ui.warn(_("patch %s is empty\n") % patchname)
583 patcherr, files, fuzz = 0, [], 0
583 patcherr, files, fuzz = 0, [], 0
584
584
585 if merge and files:
585 if merge and files:
586 # Mark as removed/merged and update dirstate parent info
586 # Mark as removed/merged and update dirstate parent info
587 removed = []
587 removed = []
588 merged = []
588 merged = []
589 for f in files:
589 for f in files:
590 if os.path.exists(repo.wjoin(f)):
590 if os.path.exists(repo.wjoin(f)):
591 merged.append(f)
591 merged.append(f)
592 else:
592 else:
593 removed.append(f)
593 removed.append(f)
594 for f in removed:
594 for f in removed:
595 repo.dirstate.remove(f)
595 repo.dirstate.remove(f)
596 for f in merged:
596 for f in merged:
597 repo.dirstate.merge(f)
597 repo.dirstate.merge(f)
598 p1, p2 = repo.dirstate.parents()
598 p1, p2 = repo.dirstate.parents()
599 repo.dirstate.setparents(p1, merge)
599 repo.dirstate.setparents(p1, merge)
600
600
601 files = patch.updatedir(self.ui, repo, files)
601 files = patch.updatedir(self.ui, repo, files)
602 match = cmdutil.matchfiles(repo, files or [])
602 match = cmdutil.matchfiles(repo, files or [])
603 n = repo.commit(files, message, ph.user, ph.date, match=match,
603 n = repo.commit(files, message, ph.user, ph.date, match=match,
604 force=True)
604 force=True)
605
605
606 if n is None:
606 if n is None:
607 raise util.Abort(_("repo commit failed"))
607 raise util.Abort(_("repo commit failed"))
608
608
609 if update_status:
609 if update_status:
610 self.applied.append(statusentry(hex(n), patchname))
610 self.applied.append(statusentry(hex(n), patchname))
611
611
612 if patcherr:
612 if patcherr:
613 self.ui.warn(_("patch failed, rejects left in working dir\n"))
613 self.ui.warn(_("patch failed, rejects left in working dir\n"))
614 err = 1
614 err = 1
615 break
615 break
616
616
617 if fuzz and strict:
617 if fuzz and strict:
618 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
618 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
619 err = 1
619 err = 1
620 break
620 break
621 return (err, n)
621 return (err, n)
622
622
623 def _clean_series(self, patches):
623 def _clean_series(self, patches):
624 for i in sorted([self.find_series(p) for p in patches], reverse=True):
624 for i in sorted([self.find_series(p) for p in patches], reverse=True):
625 del self.full_series[i]
625 del self.full_series[i]
626 self.parse_series()
626 self.parse_series()
627 self.series_dirty = 1
627 self.series_dirty = 1
628
628
629 def finish(self, repo, revs):
629 def finish(self, repo, revs):
630 firstrev = repo[self.applied[0].rev].rev()
630 firstrev = repo[self.applied[0].rev].rev()
631 appliedbase = 0
631 appliedbase = 0
632 patches = []
632 patches = []
633 for rev in sorted(revs):
633 for rev in sorted(revs):
634 if rev < firstrev:
634 if rev < firstrev:
635 raise util.Abort(_('revision %d is not managed') % rev)
635 raise util.Abort(_('revision %d is not managed') % rev)
636 base = bin(self.applied[appliedbase].rev)
636 base = bin(self.applied[appliedbase].rev)
637 node = repo.changelog.node(rev)
637 node = repo.changelog.node(rev)
638 if node != base:
638 if node != base:
639 raise util.Abort(_('cannot delete revision %d above '
639 raise util.Abort(_('cannot delete revision %d above '
640 'applied patches') % rev)
640 'applied patches') % rev)
641 patches.append(self.applied[appliedbase].name)
641 patches.append(self.applied[appliedbase].name)
642 appliedbase += 1
642 appliedbase += 1
643
643
644 r = self.qrepo()
644 r = self.qrepo()
645 if r:
645 if r:
646 r.remove(patches, True)
646 r.remove(patches, True)
647 else:
647 else:
648 for p in patches:
648 for p in patches:
649 os.unlink(self.join(p))
649 os.unlink(self.join(p))
650
650
651 del self.applied[:appliedbase]
651 del self.applied[:appliedbase]
652 self.applied_dirty = 1
652 self.applied_dirty = 1
653 self._clean_series(patches)
653 self._clean_series(patches)
654
654
655 def delete(self, repo, patches, opts):
655 def delete(self, repo, patches, opts):
656 if not patches and not opts.get('rev'):
656 if not patches and not opts.get('rev'):
657 raise util.Abort(_('qdelete requires at least one revision or '
657 raise util.Abort(_('qdelete requires at least one revision or '
658 'patch name'))
658 'patch name'))
659
659
660 realpatches = []
660 realpatches = []
661 for patch in patches:
661 for patch in patches:
662 patch = self.lookup(patch, strict=True)
662 patch = self.lookup(patch, strict=True)
663 info = self.isapplied(patch)
663 info = self.isapplied(patch)
664 if info:
664 if info:
665 raise util.Abort(_("cannot delete applied patch %s") % patch)
665 raise util.Abort(_("cannot delete applied patch %s") % patch)
666 if patch not in self.series:
666 if patch not in self.series:
667 raise util.Abort(_("patch %s not in series file") % patch)
667 raise util.Abort(_("patch %s not in series file") % patch)
668 realpatches.append(patch)
668 realpatches.append(patch)
669
669
670 appliedbase = 0
670 appliedbase = 0
671 if opts.get('rev'):
671 if opts.get('rev'):
672 if not self.applied:
672 if not self.applied:
673 raise util.Abort(_('no patches applied'))
673 raise util.Abort(_('no patches applied'))
674 revs = cmdutil.revrange(repo, opts['rev'])
674 revs = cmdutil.revrange(repo, opts['rev'])
675 if len(revs) > 1 and revs[0] > revs[1]:
675 if len(revs) > 1 and revs[0] > revs[1]:
676 revs.reverse()
676 revs.reverse()
677 for rev in revs:
677 for rev in revs:
678 if appliedbase >= len(self.applied):
678 if appliedbase >= len(self.applied):
679 raise util.Abort(_("revision %d is not managed") % rev)
679 raise util.Abort(_("revision %d is not managed") % rev)
680
680
681 base = bin(self.applied[appliedbase].rev)
681 base = bin(self.applied[appliedbase].rev)
682 node = repo.changelog.node(rev)
682 node = repo.changelog.node(rev)
683 if node != base:
683 if node != base:
684 raise util.Abort(_("cannot delete revision %d above "
684 raise util.Abort(_("cannot delete revision %d above "
685 "applied patches") % rev)
685 "applied patches") % rev)
686 realpatches.append(self.applied[appliedbase].name)
686 realpatches.append(self.applied[appliedbase].name)
687 appliedbase += 1
687 appliedbase += 1
688
688
689 if not opts.get('keep'):
689 if not opts.get('keep'):
690 r = self.qrepo()
690 r = self.qrepo()
691 if r:
691 if r:
692 r.remove(realpatches, True)
692 r.remove(realpatches, True)
693 else:
693 else:
694 for p in realpatches:
694 for p in realpatches:
695 os.unlink(self.join(p))
695 os.unlink(self.join(p))
696
696
697 if appliedbase:
697 if appliedbase:
698 del self.applied[:appliedbase]
698 del self.applied[:appliedbase]
699 self.applied_dirty = 1
699 self.applied_dirty = 1
700 self._clean_series(realpatches)
700 self._clean_series(realpatches)
701
701
702 def check_toppatch(self, repo):
702 def check_toppatch(self, repo):
703 if len(self.applied) > 0:
703 if len(self.applied) > 0:
704 top = bin(self.applied[-1].rev)
704 top = bin(self.applied[-1].rev)
705 pp = repo.dirstate.parents()
705 pp = repo.dirstate.parents()
706 if top not in pp:
706 if top not in pp:
707 raise util.Abort(_("working directory revision is not qtip"))
707 raise util.Abort(_("working directory revision is not qtip"))
708 return top
708 return top
709 return None
709 return None
710 def check_localchanges(self, repo, force=False, refresh=True):
710 def check_localchanges(self, repo, force=False, refresh=True):
711 m, a, r, d = repo.status()[:4]
711 m, a, r, d = repo.status()[:4]
712 if m or a or r or d:
712 if m or a or r or d:
713 if not force:
713 if not force:
714 if refresh:
714 if refresh:
715 raise util.Abort(_("local changes found, refresh first"))
715 raise util.Abort(_("local changes found, refresh first"))
716 else:
716 else:
717 raise util.Abort(_("local changes found"))
717 raise util.Abort(_("local changes found"))
718 return m, a, r, d
718 return m, a, r, d
719
719
720 _reserved = ('series', 'status', 'guards')
720 _reserved = ('series', 'status', 'guards')
721 def check_reserved_name(self, name):
721 def check_reserved_name(self, name):
722 if (name in self._reserved or name.startswith('.hg')
722 if (name in self._reserved or name.startswith('.hg')
723 or name.startswith('.mq')):
723 or name.startswith('.mq')):
724 raise util.Abort(_('"%s" cannot be used as the name of a patch')
724 raise util.Abort(_('"%s" cannot be used as the name of a patch')
725 % name)
725 % name)
726
726
727 def new(self, repo, patchfn, *pats, **opts):
727 def new(self, repo, patchfn, *pats, **opts):
728 """options:
728 """options:
729 msg: a string or a no-argument function returning a string
729 msg: a string or a no-argument function returning a string
730 """
730 """
731 msg = opts.get('msg')
731 msg = opts.get('msg')
732 force = opts.get('force')
732 force = opts.get('force')
733 user = opts.get('user')
733 user = opts.get('user')
734 date = opts.get('date')
734 date = opts.get('date')
735 if date:
735 if date:
736 date = util.parsedate(date)
736 date = util.parsedate(date)
737 self.check_reserved_name(patchfn)
737 self.check_reserved_name(patchfn)
738 if os.path.exists(self.join(patchfn)):
738 if os.path.exists(self.join(patchfn)):
739 raise util.Abort(_('patch "%s" already exists') % patchfn)
739 raise util.Abort(_('patch "%s" already exists') % patchfn)
740 if opts.get('include') or opts.get('exclude') or pats:
740 if opts.get('include') or opts.get('exclude') or pats:
741 match = cmdutil.match(repo, pats, opts)
741 match = cmdutil.match(repo, pats, opts)
742 # detect missing files in pats
742 # detect missing files in pats
743 def badfn(f, msg):
743 def badfn(f, msg):
744 raise util.Abort('%s: %s' % (f, msg))
744 raise util.Abort('%s: %s' % (f, msg))
745 match.bad = badfn
745 match.bad = badfn
746 m, a, r, d = repo.status(match=match)[:4]
746 m, a, r, d = repo.status(match=match)[:4]
747 else:
747 else:
748 m, a, r, d = self.check_localchanges(repo, force)
748 m, a, r, d = self.check_localchanges(repo, force)
749 match = cmdutil.matchfiles(repo, m + a + r)
749 match = cmdutil.matchfiles(repo, m + a + r)
750 commitfiles = m + a + r
750 commitfiles = m + a + r
751 self.check_toppatch(repo)
751 self.check_toppatch(repo)
752 insert = self.full_series_end()
752 insert = self.full_series_end()
753 wlock = repo.wlock()
753 wlock = repo.wlock()
754 try:
754 try:
755 # if patch file write fails, abort early
755 # if patch file write fails, abort early
756 p = self.opener(patchfn, "w")
756 p = self.opener(patchfn, "w")
757 try:
757 try:
758 if date:
758 if date:
759 p.write("# HG changeset patch\n")
759 p.write("# HG changeset patch\n")
760 if user:
760 if user:
761 p.write("# User " + user + "\n")
761 p.write("# User " + user + "\n")
762 p.write("# Date %d %d\n\n" % date)
762 p.write("# Date %d %d\n\n" % date)
763 elif user:
763 elif user:
764 p.write("From: " + user + "\n\n")
764 p.write("From: " + user + "\n\n")
765
765
766 if hasattr(msg, '__call__'):
766 if hasattr(msg, '__call__'):
767 msg = msg()
767 msg = msg()
768 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
768 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
769 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
769 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
770 if n is None:
770 if n is None:
771 raise util.Abort(_("repo commit failed"))
771 raise util.Abort(_("repo commit failed"))
772 try:
772 try:
773 self.full_series[insert:insert] = [patchfn]
773 self.full_series[insert:insert] = [patchfn]
774 self.applied.append(statusentry(hex(n), patchfn))
774 self.applied.append(statusentry(hex(n), patchfn))
775 self.parse_series()
775 self.parse_series()
776 self.series_dirty = 1
776 self.series_dirty = 1
777 self.applied_dirty = 1
777 self.applied_dirty = 1
778 if msg:
778 if msg:
779 msg = msg + "\n\n"
779 msg = msg + "\n\n"
780 p.write(msg)
780 p.write(msg)
781 if commitfiles:
781 if commitfiles:
782 diffopts = self.diffopts()
782 diffopts = self.diffopts()
783 if opts.get('git'): diffopts.git = True
783 if opts.get('git'): diffopts.git = True
784 parent = self.qparents(repo, n)
784 parent = self.qparents(repo, n)
785 chunks = patch.diff(repo, node1=parent, node2=n,
785 chunks = patch.diff(repo, node1=parent, node2=n,
786 match=match, opts=diffopts)
786 match=match, opts=diffopts)
787 for chunk in chunks:
787 for chunk in chunks:
788 p.write(chunk)
788 p.write(chunk)
789 p.close()
789 p.close()
790 wlock.release()
790 wlock.release()
791 wlock = None
791 wlock = None
792 r = self.qrepo()
792 r = self.qrepo()
793 if r: r.add([patchfn])
793 if r: r.add([patchfn])
794 except:
794 except:
795 repo.rollback()
795 repo.rollback()
796 raise
796 raise
797 except Exception:
797 except Exception:
798 patchpath = self.join(patchfn)
798 patchpath = self.join(patchfn)
799 try:
799 try:
800 os.unlink(patchpath)
800 os.unlink(patchpath)
801 except:
801 except:
802 self.ui.warn(_('error unlinking %s\n') % patchpath)
802 self.ui.warn(_('error unlinking %s\n') % patchpath)
803 raise
803 raise
804 self.removeundo(repo)
804 self.removeundo(repo)
805 finally:
805 finally:
806 release(wlock)
806 release(wlock)
807
807
808 def strip(self, repo, rev, update=True, backup="all", force=None):
808 def strip(self, repo, rev, update=True, backup="all", force=None):
809 wlock = lock = None
809 wlock = lock = None
810 try:
810 try:
811 wlock = repo.wlock()
811 wlock = repo.wlock()
812 lock = repo.lock()
812 lock = repo.lock()
813
813
814 if update:
814 if update:
815 self.check_localchanges(repo, force=force, refresh=False)
815 self.check_localchanges(repo, force=force, refresh=False)
816 urev = self.qparents(repo, rev)
816 urev = self.qparents(repo, rev)
817 hg.clean(repo, urev)
817 hg.clean(repo, urev)
818 repo.dirstate.write()
818 repo.dirstate.write()
819
819
820 self.removeundo(repo)
820 self.removeundo(repo)
821 repair.strip(self.ui, repo, rev, backup)
821 repair.strip(self.ui, repo, rev, backup)
822 # strip may have unbundled a set of backed up revisions after
822 # strip may have unbundled a set of backed up revisions after
823 # the actual strip
823 # the actual strip
824 self.removeundo(repo)
824 self.removeundo(repo)
825 finally:
825 finally:
826 release(lock, wlock)
826 release(lock, wlock)
827
827
828 def isapplied(self, patch):
828 def isapplied(self, patch):
829 """returns (index, rev, patch)"""
829 """returns (index, rev, patch)"""
830 for i in xrange(len(self.applied)):
830 for i in xrange(len(self.applied)):
831 a = self.applied[i]
831 a = self.applied[i]
832 if a.name == patch:
832 if a.name == patch:
833 return (i, a.rev, a.name)
833 return (i, a.rev, a.name)
834 return None
834 return None
835
835
836 # if the exact patch name does not exist, we try a few
836 # if the exact patch name does not exist, we try a few
837 # variations. If strict is passed, we try only #1
837 # variations. If strict is passed, we try only #1
838 #
838 #
839 # 1) a number to indicate an offset in the series file
839 # 1) a number to indicate an offset in the series file
840 # 2) a unique substring of the patch name was given
840 # 2) a unique substring of the patch name was given
841 # 3) patchname[-+]num to indicate an offset in the series file
841 # 3) patchname[-+]num to indicate an offset in the series file
842 def lookup(self, patch, strict=False):
842 def lookup(self, patch, strict=False):
843 patch = patch and str(patch)
843 patch = patch and str(patch)
844
844
845 def partial_name(s):
845 def partial_name(s):
846 if s in self.series:
846 if s in self.series:
847 return s
847 return s
848 matches = [x for x in self.series if s in x]
848 matches = [x for x in self.series if s in x]
849 if len(matches) > 1:
849 if len(matches) > 1:
850 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
850 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
851 for m in matches:
851 for m in matches:
852 self.ui.warn(' %s\n' % m)
852 self.ui.warn(' %s\n' % m)
853 return None
853 return None
854 if matches:
854 if matches:
855 return matches[0]
855 return matches[0]
856 if len(self.series) > 0 and len(self.applied) > 0:
856 if len(self.series) > 0 and len(self.applied) > 0:
857 if s == 'qtip':
857 if s == 'qtip':
858 return self.series[self.series_end(True)-1]
858 return self.series[self.series_end(True)-1]
859 if s == 'qbase':
859 if s == 'qbase':
860 return self.series[0]
860 return self.series[0]
861 return None
861 return None
862
862
863 if patch is None:
863 if patch is None:
864 return None
864 return None
865 if patch in self.series:
865 if patch in self.series:
866 return patch
866 return patch
867
867
868 if not os.path.isfile(self.join(patch)):
868 if not os.path.isfile(self.join(patch)):
869 try:
869 try:
870 sno = int(patch)
870 sno = int(patch)
871 except(ValueError, OverflowError):
871 except(ValueError, OverflowError):
872 pass
872 pass
873 else:
873 else:
874 if -len(self.series) <= sno < len(self.series):
874 if -len(self.series) <= sno < len(self.series):
875 return self.series[sno]
875 return self.series[sno]
876
876
877 if not strict:
877 if not strict:
878 res = partial_name(patch)
878 res = partial_name(patch)
879 if res:
879 if res:
880 return res
880 return res
881 minus = patch.rfind('-')
881 minus = patch.rfind('-')
882 if minus >= 0:
882 if minus >= 0:
883 res = partial_name(patch[:minus])
883 res = partial_name(patch[:minus])
884 if res:
884 if res:
885 i = self.series.index(res)
885 i = self.series.index(res)
886 try:
886 try:
887 off = int(patch[minus+1:] or 1)
887 off = int(patch[minus+1:] or 1)
888 except(ValueError, OverflowError):
888 except(ValueError, OverflowError):
889 pass
889 pass
890 else:
890 else:
891 if i - off >= 0:
891 if i - off >= 0:
892 return self.series[i - off]
892 return self.series[i - off]
893 plus = patch.rfind('+')
893 plus = patch.rfind('+')
894 if plus >= 0:
894 if plus >= 0:
895 res = partial_name(patch[:plus])
895 res = partial_name(patch[:plus])
896 if res:
896 if res:
897 i = self.series.index(res)
897 i = self.series.index(res)
898 try:
898 try:
899 off = int(patch[plus+1:] or 1)
899 off = int(patch[plus+1:] or 1)
900 except(ValueError, OverflowError):
900 except(ValueError, OverflowError):
901 pass
901 pass
902 else:
902 else:
903 if i + off < len(self.series):
903 if i + off < len(self.series):
904 return self.series[i + off]
904 return self.series[i + off]
905 raise util.Abort(_("patch %s not in series") % patch)
905 raise util.Abort(_("patch %s not in series") % patch)
906
906
907 def push(self, repo, patch=None, force=False, list=False,
907 def push(self, repo, patch=None, force=False, list=False,
908 mergeq=None, all=False):
908 mergeq=None, all=False):
909 wlock = repo.wlock()
909 wlock = repo.wlock()
910 if repo.dirstate.parents()[0] not in repo.heads():
910 if repo.dirstate.parents()[0] not in repo.heads():
911 self.ui.status(_("(working directory not at a head)\n"))
911 self.ui.status(_("(working directory not at a head)\n"))
912
912
913 if not self.series:
913 if not self.series:
914 self.ui.warn(_('no patches in series\n'))
914 self.ui.warn(_('no patches in series\n'))
915 return 0
915 return 0
916
916
917 try:
917 try:
918 patch = self.lookup(patch)
918 patch = self.lookup(patch)
919 # Suppose our series file is: A B C and the current 'top'
919 # Suppose our series file is: A B C and the current 'top'
920 # patch is B. qpush C should be performed (moving forward)
920 # patch is B. qpush C should be performed (moving forward)
921 # qpush B is a NOP (no change) qpush A is an error (can't
921 # qpush B is a NOP (no change) qpush A is an error (can't
922 # go backwards with qpush)
922 # go backwards with qpush)
923 if patch:
923 if patch:
924 info = self.isapplied(patch)
924 info = self.isapplied(patch)
925 if info:
925 if info:
926 if info[0] < len(self.applied) - 1:
926 if info[0] < len(self.applied) - 1:
927 raise util.Abort(
927 raise util.Abort(
928 _("cannot push to a previous patch: %s") % patch)
928 _("cannot push to a previous patch: %s") % patch)
929 self.ui.warn(
929 self.ui.warn(
930 _('qpush: %s is already at the top\n') % patch)
930 _('qpush: %s is already at the top\n') % patch)
931 return
931 return
932 pushable, reason = self.pushable(patch)
932 pushable, reason = self.pushable(patch)
933 if not pushable:
933 if not pushable:
934 if reason:
934 if reason:
935 reason = _('guarded by %r') % reason
935 reason = _('guarded by %r') % reason
936 else:
936 else:
937 reason = _('no matching guards')
937 reason = _('no matching guards')
938 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
938 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
939 return 1
939 return 1
940 elif all:
940 elif all:
941 patch = self.series[-1]
941 patch = self.series[-1]
942 if self.isapplied(patch):
942 if self.isapplied(patch):
943 self.ui.warn(_('all patches are currently applied\n'))
943 self.ui.warn(_('all patches are currently applied\n'))
944 return 0
944 return 0
945
945
946 # Following the above example, starting at 'top' of B:
946 # Following the above example, starting at 'top' of B:
947 # qpush should be performed (pushes C), but a subsequent
947 # qpush should be performed (pushes C), but a subsequent
948 # qpush without an argument is an error (nothing to
948 # qpush without an argument is an error (nothing to
949 # apply). This allows a loop of "...while hg qpush..." to
949 # apply). This allows a loop of "...while hg qpush..." to
950 # work as it detects an error when done
950 # work as it detects an error when done
951 start = self.series_end()
951 start = self.series_end()
952 if start == len(self.series):
952 if start == len(self.series):
953 self.ui.warn(_('patch series already fully applied\n'))
953 self.ui.warn(_('patch series already fully applied\n'))
954 return 1
954 return 1
955 if not force:
955 if not force:
956 self.check_localchanges(repo)
956 self.check_localchanges(repo)
957
957
958 self.applied_dirty = 1
958 self.applied_dirty = 1
959 if start > 0:
959 if start > 0:
960 self.check_toppatch(repo)
960 self.check_toppatch(repo)
961 if not patch:
961 if not patch:
962 patch = self.series[start]
962 patch = self.series[start]
963 end = start + 1
963 end = start + 1
964 else:
964 else:
965 end = self.series.index(patch, start) + 1
965 end = self.series.index(patch, start) + 1
966 s = self.series[start:end]
966 s = self.series[start:end]
967 all_files = {}
967 all_files = {}
968 try:
968 try:
969 if mergeq:
969 if mergeq:
970 ret = self.mergepatch(repo, mergeq, s)
970 ret = self.mergepatch(repo, mergeq, s)
971 else:
971 else:
972 ret = self.apply(repo, s, list, all_files=all_files)
972 ret = self.apply(repo, s, list, all_files=all_files)
973 except:
973 except:
974 self.ui.warn(_('cleaning up working directory...'))
974 self.ui.warn(_('cleaning up working directory...'))
975 node = repo.dirstate.parents()[0]
975 node = repo.dirstate.parents()[0]
976 hg.revert(repo, node, None)
976 hg.revert(repo, node, None)
977 unknown = repo.status(unknown=True)[4]
977 unknown = repo.status(unknown=True)[4]
978 # only remove unknown files that we know we touched or
978 # only remove unknown files that we know we touched or
979 # created while patching
979 # created while patching
980 for f in unknown:
980 for f in unknown:
981 if f in all_files:
981 if f in all_files:
982 util.unlink(repo.wjoin(f))
982 util.unlink(repo.wjoin(f))
983 self.ui.warn(_('done\n'))
983 self.ui.warn(_('done\n'))
984 raise
984 raise
985 top = self.applied[-1].name
985 top = self.applied[-1].name
986 if ret[0]:
986 if ret[0]:
987 self.ui.write(_("errors during apply, please fix and "
987 self.ui.write(_("errors during apply, please fix and "
988 "refresh %s\n") % top)
988 "refresh %s\n") % top)
989 else:
989 else:
990 self.ui.write(_("now at: %s\n") % top)
990 self.ui.write(_("now at: %s\n") % top)
991 return ret[0]
991 return ret[0]
992 finally:
992 finally:
993 wlock.release()
993 wlock.release()
994
994
995 def pop(self, repo, patch=None, force=False, update=True, all=False):
995 def pop(self, repo, patch=None, force=False, update=True, all=False):
996 def getfile(f, rev, flags):
996 def getfile(f, rev, flags):
997 t = repo.file(f).read(rev)
997 t = repo.file(f).read(rev)
998 repo.wwrite(f, t, flags)
998 repo.wwrite(f, t, flags)
999
999
1000 wlock = repo.wlock()
1000 wlock = repo.wlock()
1001 try:
1001 try:
1002 if patch:
1002 if patch:
1003 # index, rev, patch
1003 # index, rev, patch
1004 info = self.isapplied(patch)
1004 info = self.isapplied(patch)
1005 if not info:
1005 if not info:
1006 patch = self.lookup(patch)
1006 patch = self.lookup(patch)
1007 info = self.isapplied(patch)
1007 info = self.isapplied(patch)
1008 if not info:
1008 if not info:
1009 raise util.Abort(_("patch %s is not applied") % patch)
1009 raise util.Abort(_("patch %s is not applied") % patch)
1010
1010
1011 if len(self.applied) == 0:
1011 if len(self.applied) == 0:
1012 # Allow qpop -a to work repeatedly,
1012 # Allow qpop -a to work repeatedly,
1013 # but not qpop without an argument
1013 # but not qpop without an argument
1014 self.ui.warn(_("no patches applied\n"))
1014 self.ui.warn(_("no patches applied\n"))
1015 return not all
1015 return not all
1016
1016
1017 if all:
1017 if all:
1018 start = 0
1018 start = 0
1019 elif patch:
1019 elif patch:
1020 start = info[0] + 1
1020 start = info[0] + 1
1021 else:
1021 else:
1022 start = len(self.applied) - 1
1022 start = len(self.applied) - 1
1023
1023
1024 if start >= len(self.applied):
1024 if start >= len(self.applied):
1025 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1025 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1026 return
1026 return
1027
1027
1028 if not update:
1028 if not update:
1029 parents = repo.dirstate.parents()
1029 parents = repo.dirstate.parents()
1030 rr = [ bin(x.rev) for x in self.applied ]
1030 rr = [ bin(x.rev) for x in self.applied ]
1031 for p in parents:
1031 for p in parents:
1032 if p in rr:
1032 if p in rr:
1033 self.ui.warn(_("qpop: forcing dirstate update\n"))
1033 self.ui.warn(_("qpop: forcing dirstate update\n"))
1034 update = True
1034 update = True
1035 else:
1035 else:
1036 parents = [p.hex() for p in repo[None].parents()]
1036 parents = [p.hex() for p in repo[None].parents()]
1037 needupdate = False
1037 needupdate = False
1038 for entry in self.applied[start:]:
1038 for entry in self.applied[start:]:
1039 if entry.rev in parents:
1039 if entry.rev in parents:
1040 needupdate = True
1040 needupdate = True
1041 break
1041 break
1042 update = needupdate
1042 update = needupdate
1043
1043
1044 if not force and update:
1044 if not force and update:
1045 self.check_localchanges(repo)
1045 self.check_localchanges(repo)
1046
1046
1047 self.applied_dirty = 1
1047 self.applied_dirty = 1
1048 end = len(self.applied)
1048 end = len(self.applied)
1049 rev = bin(self.applied[start].rev)
1049 rev = bin(self.applied[start].rev)
1050 if update:
1050 if update:
1051 top = self.check_toppatch(repo)
1051 top = self.check_toppatch(repo)
1052
1052
1053 try:
1053 try:
1054 heads = repo.changelog.heads(rev)
1054 heads = repo.changelog.heads(rev)
1055 except error.LookupError:
1055 except error.LookupError:
1056 node = short(rev)
1056 node = short(rev)
1057 raise util.Abort(_('trying to pop unknown node %s') % node)
1057 raise util.Abort(_('trying to pop unknown node %s') % node)
1058
1058
1059 if heads != [bin(self.applied[-1].rev)]:
1059 if heads != [bin(self.applied[-1].rev)]:
1060 raise util.Abort(_("popping would remove a revision not "
1060 raise util.Abort(_("popping would remove a revision not "
1061 "managed by this patch queue"))
1061 "managed by this patch queue"))
1062
1062
1063 # we know there are no local changes, so we can make a simplified
1063 # we know there are no local changes, so we can make a simplified
1064 # form of hg.update.
1064 # form of hg.update.
1065 if update:
1065 if update:
1066 qp = self.qparents(repo, rev)
1066 qp = self.qparents(repo, rev)
1067 changes = repo.changelog.read(qp)
1067 changes = repo.changelog.read(qp)
1068 mmap = repo.manifest.read(changes[0])
1068 mmap = repo.manifest.read(changes[0])
1069 m, a, r, d = repo.status(qp, top)[:4]
1069 m, a, r, d = repo.status(qp, top)[:4]
1070 if d:
1070 if d:
1071 raise util.Abort(_("deletions found between repo revs"))
1071 raise util.Abort(_("deletions found between repo revs"))
1072 for f in m:
1072 for f in m:
1073 getfile(f, mmap[f], mmap.flags(f))
1073 getfile(f, mmap[f], mmap.flags(f))
1074 for f in r:
1074 for f in r:
1075 getfile(f, mmap[f], mmap.flags(f))
1075 getfile(f, mmap[f], mmap.flags(f))
1076 for f in m + r:
1076 for f in m + r:
1077 repo.dirstate.normal(f)
1077 repo.dirstate.normal(f)
1078 for f in a:
1078 for f in a:
1079 try:
1079 try:
1080 os.unlink(repo.wjoin(f))
1080 os.unlink(repo.wjoin(f))
1081 except OSError, e:
1081 except OSError, e:
1082 if e.errno != errno.ENOENT:
1082 if e.errno != errno.ENOENT:
1083 raise
1083 raise
1084 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1084 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1085 except: pass
1085 except: pass
1086 repo.dirstate.forget(f)
1086 repo.dirstate.forget(f)
1087 repo.dirstate.setparents(qp, nullid)
1087 repo.dirstate.setparents(qp, nullid)
1088 del self.applied[start:end]
1088 del self.applied[start:end]
1089 self.strip(repo, rev, update=False, backup='strip')
1089 self.strip(repo, rev, update=False, backup='strip')
1090 if len(self.applied):
1090 if len(self.applied):
1091 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1091 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1092 else:
1092 else:
1093 self.ui.write(_("patch queue now empty\n"))
1093 self.ui.write(_("patch queue now empty\n"))
1094 finally:
1094 finally:
1095 wlock.release()
1095 wlock.release()
1096
1096
1097 def diff(self, repo, pats, opts):
1097 def diff(self, repo, pats, opts):
1098 top = self.check_toppatch(repo)
1098 top = self.check_toppatch(repo)
1099 if not top:
1099 if not top:
1100 self.ui.write(_("no patches applied\n"))
1100 self.ui.write(_("no patches applied\n"))
1101 return
1101 return
1102 qp = self.qparents(repo, top)
1102 qp = self.qparents(repo, top)
1103 self._diffopts = patch.diffopts(self.ui, opts)
1103 self._diffopts = patch.diffopts(self.ui, opts)
1104 self.printdiff(repo, qp, files=pats, opts=opts)
1104 self.printdiff(repo, qp, files=pats, opts=opts)
1105
1105
1106 def refresh(self, repo, pats=None, **opts):
1106 def refresh(self, repo, pats=None, **opts):
1107 if len(self.applied) == 0:
1107 if len(self.applied) == 0:
1108 self.ui.write(_("no patches applied\n"))
1108 self.ui.write(_("no patches applied\n"))
1109 return 1
1109 return 1
1110 msg = opts.get('msg', '').rstrip()
1110 msg = opts.get('msg', '').rstrip()
1111 newuser = opts.get('user')
1111 newuser = opts.get('user')
1112 newdate = opts.get('date')
1112 newdate = opts.get('date')
1113 if newdate:
1113 if newdate:
1114 newdate = '%d %d' % util.parsedate(newdate)
1114 newdate = '%d %d' % util.parsedate(newdate)
1115 wlock = repo.wlock()
1115 wlock = repo.wlock()
1116 try:
1116 try:
1117 self.check_toppatch(repo)
1117 self.check_toppatch(repo)
1118 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1118 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1119 top = bin(top)
1119 top = bin(top)
1120 if repo.changelog.heads(top) != [top]:
1120 if repo.changelog.heads(top) != [top]:
1121 raise util.Abort(_("cannot refresh a revision with children"))
1121 raise util.Abort(_("cannot refresh a revision with children"))
1122 cparents = repo.changelog.parents(top)
1122 cparents = repo.changelog.parents(top)
1123 patchparent = self.qparents(repo, top)
1123 patchparent = self.qparents(repo, top)
1124 ph = self.readheaders(patchfn)
1124 ph = self.readheaders(patchfn)
1125
1125
1126 patchf = self.opener(patchfn, 'r')
1126 patchf = self.opener(patchfn, 'r')
1127
1127
1128 # if the patch was a git patch, refresh it as a git patch
1128 # if the patch was a git patch, refresh it as a git patch
1129 for line in patchf:
1129 for line in patchf:
1130 if line.startswith('diff --git'):
1130 if line.startswith('diff --git'):
1131 self.diffopts().git = True
1131 self.diffopts().git = True
1132 break
1132 break
1133
1133
1134 if msg:
1134 if msg:
1135 ph.setmessage(msg)
1135 ph.setmessage(msg)
1136 if newuser:
1136 if newuser:
1137 ph.setuser(newuser)
1137 ph.setuser(newuser)
1138 if newdate:
1138 if newdate:
1139 ph.setdate(newdate)
1139 ph.setdate(newdate)
1140
1140
1141 # only commit new patch when write is complete
1141 # only commit new patch when write is complete
1142 patchf = self.opener(patchfn, 'w', atomictemp=True)
1142 patchf = self.opener(patchfn, 'w', atomictemp=True)
1143
1143
1144 patchf.seek(0)
1144 patchf.seek(0)
1145 patchf.truncate()
1145 patchf.truncate()
1146
1146
1147 comments = str(ph)
1147 comments = str(ph)
1148 if comments:
1148 if comments:
1149 patchf.write(comments)
1149 patchf.write(comments)
1150
1150
1151 if opts.get('git'):
1151 if opts.get('git'):
1152 self.diffopts().git = True
1152 self.diffopts().git = True
1153 tip = repo.changelog.tip()
1153 tip = repo.changelog.tip()
1154 if top == tip:
1154 if top == tip:
1155 # if the top of our patch queue is also the tip, there is an
1155 # if the top of our patch queue is also the tip, there is an
1156 # optimization here. We update the dirstate in place and strip
1156 # optimization here. We update the dirstate in place and strip
1157 # off the tip commit. Then just commit the current directory
1157 # off the tip commit. Then just commit the current directory
1158 # tree. We can also send repo.commit the list of files
1158 # tree. We can also send repo.commit the list of files
1159 # changed to speed up the diff
1159 # changed to speed up the diff
1160 #
1160 #
1161 # in short mode, we only diff the files included in the
1161 # in short mode, we only diff the files included in the
1162 # patch already plus specified files
1162 # patch already plus specified files
1163 #
1163 #
1164 # this should really read:
1164 # this should really read:
1165 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1165 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1166 # but we do it backwards to take advantage of manifest/chlog
1166 # but we do it backwards to take advantage of manifest/chlog
1167 # caching against the next repo.status call
1167 # caching against the next repo.status call
1168 #
1168 #
1169 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1169 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1170 changes = repo.changelog.read(tip)
1170 changes = repo.changelog.read(tip)
1171 man = repo.manifest.read(changes[0])
1171 man = repo.manifest.read(changes[0])
1172 aaa = aa[:]
1172 aaa = aa[:]
1173 matchfn = cmdutil.match(repo, pats, opts)
1173 matchfn = cmdutil.match(repo, pats, opts)
1174 if opts.get('short'):
1174 if opts.get('short'):
1175 # if amending a patch, we start with existing
1175 # if amending a patch, we start with existing
1176 # files plus specified files - unfiltered
1176 # files plus specified files - unfiltered
1177 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1177 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1178 # filter with inc/exl options
1178 # filter with inc/exl options
1179 matchfn = cmdutil.match(repo, opts=opts)
1179 matchfn = cmdutil.match(repo, opts=opts)
1180 else:
1180 else:
1181 match = cmdutil.matchall(repo)
1181 match = cmdutil.matchall(repo)
1182 m, a, r, d = repo.status(match=match)[:4]
1182 m, a, r, d = repo.status(match=match)[:4]
1183
1183
1184 # we might end up with files that were added between
1184 # we might end up with files that were added between
1185 # tip and the dirstate parent, but then changed in the
1185 # tip and the dirstate parent, but then changed in the
1186 # local dirstate. in this case, we want them to only
1186 # local dirstate. in this case, we want them to only
1187 # show up in the added section
1187 # show up in the added section
1188 for x in m:
1188 for x in m:
1189 if x not in aa:
1189 if x not in aa:
1190 mm.append(x)
1190 mm.append(x)
1191 # we might end up with files added by the local dirstate that
1191 # we might end up with files added by the local dirstate that
1192 # were deleted by the patch. In this case, they should only
1192 # were deleted by the patch. In this case, they should only
1193 # show up in the changed section.
1193 # show up in the changed section.
1194 for x in a:
1194 for x in a:
1195 if x in dd:
1195 if x in dd:
1196 del dd[dd.index(x)]
1196 del dd[dd.index(x)]
1197 mm.append(x)
1197 mm.append(x)
1198 else:
1198 else:
1199 aa.append(x)
1199 aa.append(x)
1200 # make sure any files deleted in the local dirstate
1200 # make sure any files deleted in the local dirstate
1201 # are not in the add or change column of the patch
1201 # are not in the add or change column of the patch
1202 forget = []
1202 forget = []
1203 for x in d + r:
1203 for x in d + r:
1204 if x in aa:
1204 if x in aa:
1205 del aa[aa.index(x)]
1205 del aa[aa.index(x)]
1206 forget.append(x)
1206 forget.append(x)
1207 continue
1207 continue
1208 elif x in mm:
1208 elif x in mm:
1209 del mm[mm.index(x)]
1209 del mm[mm.index(x)]
1210 dd.append(x)
1210 dd.append(x)
1211
1211
1212 m = list(set(mm))
1212 m = list(set(mm))
1213 r = list(set(dd))
1213 r = list(set(dd))
1214 a = list(set(aa))
1214 a = list(set(aa))
1215 c = [filter(matchfn, l) for l in (m, a, r)]
1215 c = [filter(matchfn, l) for l in (m, a, r)]
1216 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1216 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1217 chunks = patch.diff(repo, patchparent, match=match,
1217 chunks = patch.diff(repo, patchparent, match=match,
1218 changes=c, opts=self.diffopts())
1218 changes=c, opts=self.diffopts())
1219 for chunk in chunks:
1219 for chunk in chunks:
1220 patchf.write(chunk)
1220 patchf.write(chunk)
1221
1221
1222 try:
1222 try:
1223 if self.diffopts().git:
1223 if self.diffopts().git:
1224 copies = {}
1224 copies = {}
1225 for dst in a:
1225 for dst in a:
1226 src = repo.dirstate.copied(dst)
1226 src = repo.dirstate.copied(dst)
1227 # during qfold, the source file for copies may
1227 # during qfold, the source file for copies may
1228 # be removed. Treat this as a simple add.
1228 # be removed. Treat this as a simple add.
1229 if src is not None and src in repo.dirstate:
1229 if src is not None and src in repo.dirstate:
1230 copies.setdefault(src, []).append(dst)
1230 copies.setdefault(src, []).append(dst)
1231 repo.dirstate.add(dst)
1231 repo.dirstate.add(dst)
1232 # remember the copies between patchparent and tip
1232 # remember the copies between patchparent and tip
1233 for dst in aaa:
1233 for dst in aaa:
1234 f = repo.file(dst)
1234 f = repo.file(dst)
1235 src = f.renamed(man[dst])
1235 src = f.renamed(man[dst])
1236 if src:
1236 if src:
1237 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1237 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1238 if dst in a:
1238 if dst in a:
1239 copies[src[0]].append(dst)
1239 copies[src[0]].append(dst)
1240 # we can't copy a file created by the patch itself
1240 # we can't copy a file created by the patch itself
1241 if dst in copies:
1241 if dst in copies:
1242 del copies[dst]
1242 del copies[dst]
1243 for src, dsts in copies.iteritems():
1243 for src, dsts in copies.iteritems():
1244 for dst in dsts:
1244 for dst in dsts:
1245 repo.dirstate.copy(src, dst)
1245 repo.dirstate.copy(src, dst)
1246 else:
1246 else:
1247 for dst in a:
1247 for dst in a:
1248 repo.dirstate.add(dst)
1248 repo.dirstate.add(dst)
1249 # Drop useless copy information
1249 # Drop useless copy information
1250 for f in list(repo.dirstate.copies()):
1250 for f in list(repo.dirstate.copies()):
1251 repo.dirstate.copy(None, f)
1251 repo.dirstate.copy(None, f)
1252 for f in r:
1252 for f in r:
1253 repo.dirstate.remove(f)
1253 repo.dirstate.remove(f)
1254 # if the patch excludes a modified file, mark that
1254 # if the patch excludes a modified file, mark that
1255 # file with mtime=0 so status can see it.
1255 # file with mtime=0 so status can see it.
1256 mm = []
1256 mm = []
1257 for i in xrange(len(m)-1, -1, -1):
1257 for i in xrange(len(m)-1, -1, -1):
1258 if not matchfn(m[i]):
1258 if not matchfn(m[i]):
1259 mm.append(m[i])
1259 mm.append(m[i])
1260 del m[i]
1260 del m[i]
1261 for f in m:
1261 for f in m:
1262 repo.dirstate.normal(f)
1262 repo.dirstate.normal(f)
1263 for f in mm:
1263 for f in mm:
1264 repo.dirstate.normallookup(f)
1264 repo.dirstate.normallookup(f)
1265 for f in forget:
1265 for f in forget:
1266 repo.dirstate.forget(f)
1266 repo.dirstate.forget(f)
1267
1267
1268 if not msg:
1268 if not msg:
1269 if not ph.message:
1269 if not ph.message:
1270 message = "[mq]: %s\n" % patchfn
1270 message = "[mq]: %s\n" % patchfn
1271 else:
1271 else:
1272 message = "\n".join(ph.message)
1272 message = "\n".join(ph.message)
1273 else:
1273 else:
1274 message = msg
1274 message = msg
1275
1275
1276 user = ph.user or changes[1]
1276 user = ph.user or changes[1]
1277
1277
1278 # assumes strip can roll itself back if interrupted
1278 # assumes strip can roll itself back if interrupted
1279 repo.dirstate.setparents(*cparents)
1279 repo.dirstate.setparents(*cparents)
1280 self.applied.pop()
1280 self.applied.pop()
1281 self.applied_dirty = 1
1281 self.applied_dirty = 1
1282 self.strip(repo, top, update=False,
1282 self.strip(repo, top, update=False,
1283 backup='strip')
1283 backup='strip')
1284 except:
1284 except:
1285 repo.dirstate.invalidate()
1285 repo.dirstate.invalidate()
1286 raise
1286 raise
1287
1287
1288 try:
1288 try:
1289 # might be nice to attempt to roll back strip after this
1289 # might be nice to attempt to roll back strip after this
1290 patchf.rename()
1290 patchf.rename()
1291 n = repo.commit(match.files(), message, user, ph.date,
1291 n = repo.commit(match.files(), message, user, ph.date,
1292 match=match, force=1)
1292 match=match, force=1)
1293 self.applied.append(statusentry(hex(n), patchfn))
1293 self.applied.append(statusentry(hex(n), patchfn))
1294 except:
1294 except:
1295 ctx = repo[cparents[0]]
1295 ctx = repo[cparents[0]]
1296 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1296 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1297 self.save_dirty()
1297 self.save_dirty()
1298 self.ui.warn(_('refresh interrupted while patch was popped! '
1298 self.ui.warn(_('refresh interrupted while patch was popped! '
1299 '(revert --all, qpush to recover)\n'))
1299 '(revert --all, qpush to recover)\n'))
1300 raise
1300 raise
1301 else:
1301 else:
1302 self.printdiff(repo, patchparent, fp=patchf)
1302 self.printdiff(repo, patchparent, fp=patchf)
1303 patchf.rename()
1303 patchf.rename()
1304 added = repo.status()[1]
1304 added = repo.status()[1]
1305 for a in added:
1305 for a in added:
1306 f = repo.wjoin(a)
1306 f = repo.wjoin(a)
1307 try:
1307 try:
1308 os.unlink(f)
1308 os.unlink(f)
1309 except OSError, e:
1309 except OSError, e:
1310 if e.errno != errno.ENOENT:
1310 if e.errno != errno.ENOENT:
1311 raise
1311 raise
1312 try: os.removedirs(os.path.dirname(f))
1312 try: os.removedirs(os.path.dirname(f))
1313 except: pass
1313 except: pass
1314 # forget the file copies in the dirstate
1314 # forget the file copies in the dirstate
1315 # push should readd the files later on
1315 # push should readd the files later on
1316 repo.dirstate.forget(a)
1316 repo.dirstate.forget(a)
1317 self.pop(repo, force=True)
1317 self.pop(repo, force=True)
1318 self.push(repo, force=True)
1318 self.push(repo, force=True)
1319 finally:
1319 finally:
1320 wlock.release()
1320 wlock.release()
1321 self.removeundo(repo)
1321 self.removeundo(repo)
1322
1322
1323 def init(self, repo, create=False):
1323 def init(self, repo, create=False):
1324 if not create and os.path.isdir(self.path):
1324 if not create and os.path.isdir(self.path):
1325 raise util.Abort(_("patch queue directory already exists"))
1325 raise util.Abort(_("patch queue directory already exists"))
1326 try:
1326 try:
1327 os.mkdir(self.path)
1327 os.mkdir(self.path)
1328 except OSError, inst:
1328 except OSError, inst:
1329 if inst.errno != errno.EEXIST or not create:
1329 if inst.errno != errno.EEXIST or not create:
1330 raise
1330 raise
1331 if create:
1331 if create:
1332 return self.qrepo(create=True)
1332 return self.qrepo(create=True)
1333
1333
1334 def unapplied(self, repo, patch=None):
1334 def unapplied(self, repo, patch=None):
1335 if patch and patch not in self.series:
1335 if patch and patch not in self.series:
1336 raise util.Abort(_("patch %s is not in series file") % patch)
1336 raise util.Abort(_("patch %s is not in series file") % patch)
1337 if not patch:
1337 if not patch:
1338 start = self.series_end()
1338 start = self.series_end()
1339 else:
1339 else:
1340 start = self.series.index(patch) + 1
1340 start = self.series.index(patch) + 1
1341 unapplied = []
1341 unapplied = []
1342 for i in xrange(start, len(self.series)):
1342 for i in xrange(start, len(self.series)):
1343 pushable, reason = self.pushable(i)
1343 pushable, reason = self.pushable(i)
1344 if pushable:
1344 if pushable:
1345 unapplied.append((i, self.series[i]))
1345 unapplied.append((i, self.series[i]))
1346 self.explain_pushable(i)
1346 self.explain_pushable(i)
1347 return unapplied
1347 return unapplied
1348
1348
1349 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1349 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1350 summary=False):
1350 summary=False):
1351 def displayname(patchname):
1351 def displayname(patchname):
1352 if summary:
1352 if summary:
1353 ph = self.readheaders(patchname)
1353 ph = self.readheaders(patchname)
1354 msg = ph.message
1354 msg = ph.message
1355 msg = msg and ': ' + msg[0] or ': '
1355 msg = msg and ': ' + msg[0] or ': '
1356 else:
1356 else:
1357 msg = ''
1357 msg = ''
1358 return '%s%s' % (patchname, msg)
1358 return '%s%s' % (patchname, msg)
1359
1359
1360 applied = set([p.name for p in self.applied])
1360 applied = set([p.name for p in self.applied])
1361 if length is None:
1361 if length is None:
1362 length = len(self.series) - start
1362 length = len(self.series) - start
1363 if not missing:
1363 if not missing:
1364 for i in xrange(start, start+length):
1364 for i in xrange(start, start+length):
1365 patch = self.series[i]
1365 patch = self.series[i]
1366 if patch in applied:
1366 if patch in applied:
1367 stat = 'A'
1367 stat = 'A'
1368 elif self.pushable(i)[0]:
1368 elif self.pushable(i)[0]:
1369 stat = 'U'
1369 stat = 'U'
1370 else:
1370 else:
1371 stat = 'G'
1371 stat = 'G'
1372 pfx = ''
1372 pfx = ''
1373 if self.ui.verbose:
1373 if self.ui.verbose:
1374 pfx = '%d %s ' % (i, stat)
1374 pfx = '%d %s ' % (i, stat)
1375 elif status and status != stat:
1375 elif status and status != stat:
1376 continue
1376 continue
1377 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1377 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1378 else:
1378 else:
1379 msng_list = []
1379 msng_list = []
1380 for root, dirs, files in os.walk(self.path):
1380 for root, dirs, files in os.walk(self.path):
1381 d = root[len(self.path) + 1:]
1381 d = root[len(self.path) + 1:]
1382 for f in files:
1382 for f in files:
1383 fl = os.path.join(d, f)
1383 fl = os.path.join(d, f)
1384 if (fl not in self.series and
1384 if (fl not in self.series and
1385 fl not in (self.status_path, self.series_path,
1385 fl not in (self.status_path, self.series_path,
1386 self.guards_path)
1386 self.guards_path)
1387 and not fl.startswith('.')):
1387 and not fl.startswith('.')):
1388 msng_list.append(fl)
1388 msng_list.append(fl)
1389 for x in sorted(msng_list):
1389 for x in sorted(msng_list):
1390 pfx = self.ui.verbose and ('D ') or ''
1390 pfx = self.ui.verbose and ('D ') or ''
1391 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1391 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1392
1392
1393 def issaveline(self, l):
1393 def issaveline(self, l):
1394 if l.name == '.hg.patches.save.line':
1394 if l.name == '.hg.patches.save.line':
1395 return True
1395 return True
1396
1396
1397 def qrepo(self, create=False):
1397 def qrepo(self, create=False):
1398 if create or os.path.isdir(self.join(".hg")):
1398 if create or os.path.isdir(self.join(".hg")):
1399 return hg.repository(self.ui, path=self.path, create=create)
1399 return hg.repository(self.ui, path=self.path, create=create)
1400
1400
1401 def restore(self, repo, rev, delete=None, qupdate=None):
1401 def restore(self, repo, rev, delete=None, qupdate=None):
1402 c = repo.changelog.read(rev)
1402 c = repo.changelog.read(rev)
1403 desc = c[4].strip()
1403 desc = c[4].strip()
1404 lines = desc.splitlines()
1404 lines = desc.splitlines()
1405 i = 0
1405 i = 0
1406 datastart = None
1406 datastart = None
1407 series = []
1407 series = []
1408 applied = []
1408 applied = []
1409 qpp = None
1409 qpp = None
1410 for i in xrange(0, len(lines)):
1410 for i in xrange(len(lines)):
1411 if lines[i] == 'Patch Data:':
1411 if lines[i] == 'Patch Data:':
1412 datastart = i + 1
1412 datastart = i + 1
1413 elif lines[i].startswith('Dirstate:'):
1413 elif lines[i].startswith('Dirstate:'):
1414 l = lines[i].rstrip()
1414 l = lines[i].rstrip()
1415 l = l[10:].split(' ')
1415 l = l[10:].split(' ')
1416 qpp = [ bin(x) for x in l ]
1416 qpp = [ bin(x) for x in l ]
1417 elif datastart != None:
1417 elif datastart != None:
1418 l = lines[i].rstrip()
1418 l = lines[i].rstrip()
1419 se = statusentry(l)
1419 se = statusentry(l)
1420 file_ = se.name
1420 file_ = se.name
1421 if se.rev:
1421 if se.rev:
1422 applied.append(se)
1422 applied.append(se)
1423 else:
1423 else:
1424 series.append(file_)
1424 series.append(file_)
1425 if datastart is None:
1425 if datastart is None:
1426 self.ui.warn(_("No saved patch data found\n"))
1426 self.ui.warn(_("No saved patch data found\n"))
1427 return 1
1427 return 1
1428 self.ui.warn(_("restoring status: %s\n") % lines[0])
1428 self.ui.warn(_("restoring status: %s\n") % lines[0])
1429 self.full_series = series
1429 self.full_series = series
1430 self.applied = applied
1430 self.applied = applied
1431 self.parse_series()
1431 self.parse_series()
1432 self.series_dirty = 1
1432 self.series_dirty = 1
1433 self.applied_dirty = 1
1433 self.applied_dirty = 1
1434 heads = repo.changelog.heads()
1434 heads = repo.changelog.heads()
1435 if delete:
1435 if delete:
1436 if rev not in heads:
1436 if rev not in heads:
1437 self.ui.warn(_("save entry has children, leaving it alone\n"))
1437 self.ui.warn(_("save entry has children, leaving it alone\n"))
1438 else:
1438 else:
1439 self.ui.warn(_("removing save entry %s\n") % short(rev))
1439 self.ui.warn(_("removing save entry %s\n") % short(rev))
1440 pp = repo.dirstate.parents()
1440 pp = repo.dirstate.parents()
1441 if rev in pp:
1441 if rev in pp:
1442 update = True
1442 update = True
1443 else:
1443 else:
1444 update = False
1444 update = False
1445 self.strip(repo, rev, update=update, backup='strip')
1445 self.strip(repo, rev, update=update, backup='strip')
1446 if qpp:
1446 if qpp:
1447 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1447 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1448 (short(qpp[0]), short(qpp[1])))
1448 (short(qpp[0]), short(qpp[1])))
1449 if qupdate:
1449 if qupdate:
1450 self.ui.status(_("queue directory updating\n"))
1450 self.ui.status(_("queue directory updating\n"))
1451 r = self.qrepo()
1451 r = self.qrepo()
1452 if not r:
1452 if not r:
1453 self.ui.warn(_("Unable to load queue repository\n"))
1453 self.ui.warn(_("Unable to load queue repository\n"))
1454 return 1
1454 return 1
1455 hg.clean(r, qpp[0])
1455 hg.clean(r, qpp[0])
1456
1456
1457 def save(self, repo, msg=None):
1457 def save(self, repo, msg=None):
1458 if len(self.applied) == 0:
1458 if len(self.applied) == 0:
1459 self.ui.warn(_("save: no patches applied, exiting\n"))
1459 self.ui.warn(_("save: no patches applied, exiting\n"))
1460 return 1
1460 return 1
1461 if self.issaveline(self.applied[-1]):
1461 if self.issaveline(self.applied[-1]):
1462 self.ui.warn(_("status is already saved\n"))
1462 self.ui.warn(_("status is already saved\n"))
1463 return 1
1463 return 1
1464
1464
1465 ar = [ ':' + x for x in self.full_series ]
1465 ar = [ ':' + x for x in self.full_series ]
1466 if not msg:
1466 if not msg:
1467 msg = _("hg patches saved state")
1467 msg = _("hg patches saved state")
1468 else:
1468 else:
1469 msg = "hg patches: " + msg.rstrip('\r\n')
1469 msg = "hg patches: " + msg.rstrip('\r\n')
1470 r = self.qrepo()
1470 r = self.qrepo()
1471 if r:
1471 if r:
1472 pp = r.dirstate.parents()
1472 pp = r.dirstate.parents()
1473 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1473 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1474 msg += "\n\nPatch Data:\n"
1474 msg += "\n\nPatch Data:\n"
1475 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1475 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1476 "\n".join(ar) + '\n' or "")
1476 "\n".join(ar) + '\n' or "")
1477 n = repo.commit(None, text, user=None, force=1)
1477 n = repo.commit(None, text, user=None, force=1)
1478 if not n:
1478 if not n:
1479 self.ui.warn(_("repo commit failed\n"))
1479 self.ui.warn(_("repo commit failed\n"))
1480 return 1
1480 return 1
1481 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1481 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1482 self.applied_dirty = 1
1482 self.applied_dirty = 1
1483 self.removeundo(repo)
1483 self.removeundo(repo)
1484
1484
1485 def full_series_end(self):
1485 def full_series_end(self):
1486 if len(self.applied) > 0:
1486 if len(self.applied) > 0:
1487 p = self.applied[-1].name
1487 p = self.applied[-1].name
1488 end = self.find_series(p)
1488 end = self.find_series(p)
1489 if end is None:
1489 if end is None:
1490 return len(self.full_series)
1490 return len(self.full_series)
1491 return end + 1
1491 return end + 1
1492 return 0
1492 return 0
1493
1493
1494 def series_end(self, all_patches=False):
1494 def series_end(self, all_patches=False):
1495 """If all_patches is False, return the index of the next pushable patch
1495 """If all_patches is False, return the index of the next pushable patch
1496 in the series, or the series length. If all_patches is True, return the
1496 in the series, or the series length. If all_patches is True, return the
1497 index of the first patch past the last applied one.
1497 index of the first patch past the last applied one.
1498 """
1498 """
1499 end = 0
1499 end = 0
1500 def next(start):
1500 def next(start):
1501 if all_patches:
1501 if all_patches:
1502 return start
1502 return start
1503 i = start
1503 i = start
1504 while i < len(self.series):
1504 while i < len(self.series):
1505 p, reason = self.pushable(i)
1505 p, reason = self.pushable(i)
1506 if p:
1506 if p:
1507 break
1507 break
1508 self.explain_pushable(i)
1508 self.explain_pushable(i)
1509 i += 1
1509 i += 1
1510 return i
1510 return i
1511 if len(self.applied) > 0:
1511 if len(self.applied) > 0:
1512 p = self.applied[-1].name
1512 p = self.applied[-1].name
1513 try:
1513 try:
1514 end = self.series.index(p)
1514 end = self.series.index(p)
1515 except ValueError:
1515 except ValueError:
1516 return 0
1516 return 0
1517 return next(end + 1)
1517 return next(end + 1)
1518 return next(end)
1518 return next(end)
1519
1519
1520 def appliedname(self, index):
1520 def appliedname(self, index):
1521 pname = self.applied[index].name
1521 pname = self.applied[index].name
1522 if not self.ui.verbose:
1522 if not self.ui.verbose:
1523 p = pname
1523 p = pname
1524 else:
1524 else:
1525 p = str(self.series.index(pname)) + " " + pname
1525 p = str(self.series.index(pname)) + " " + pname
1526 return p
1526 return p
1527
1527
1528 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1528 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1529 force=None, git=False):
1529 force=None, git=False):
1530 def checkseries(patchname):
1530 def checkseries(patchname):
1531 if patchname in self.series:
1531 if patchname in self.series:
1532 raise util.Abort(_('patch %s is already in the series file')
1532 raise util.Abort(_('patch %s is already in the series file')
1533 % patchname)
1533 % patchname)
1534 def checkfile(patchname):
1534 def checkfile(patchname):
1535 if not force and os.path.exists(self.join(patchname)):
1535 if not force and os.path.exists(self.join(patchname)):
1536 raise util.Abort(_('patch "%s" already exists')
1536 raise util.Abort(_('patch "%s" already exists')
1537 % patchname)
1537 % patchname)
1538
1538
1539 if rev:
1539 if rev:
1540 if files:
1540 if files:
1541 raise util.Abort(_('option "-r" not valid when importing '
1541 raise util.Abort(_('option "-r" not valid when importing '
1542 'files'))
1542 'files'))
1543 rev = cmdutil.revrange(repo, rev)
1543 rev = cmdutil.revrange(repo, rev)
1544 rev.sort(lambda x, y: cmp(y, x))
1544 rev.sort(lambda x, y: cmp(y, x))
1545 if (len(files) > 1 or len(rev) > 1) and patchname:
1545 if (len(files) > 1 or len(rev) > 1) and patchname:
1546 raise util.Abort(_('option "-n" not valid when importing multiple '
1546 raise util.Abort(_('option "-n" not valid when importing multiple '
1547 'patches'))
1547 'patches'))
1548 i = 0
1548 i = 0
1549 added = []
1549 added = []
1550 if rev:
1550 if rev:
1551 # If mq patches are applied, we can only import revisions
1551 # If mq patches are applied, we can only import revisions
1552 # that form a linear path to qbase.
1552 # that form a linear path to qbase.
1553 # Otherwise, they should form a linear path to a head.
1553 # Otherwise, they should form a linear path to a head.
1554 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1554 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1555 if len(heads) > 1:
1555 if len(heads) > 1:
1556 raise util.Abort(_('revision %d is the root of more than one '
1556 raise util.Abort(_('revision %d is the root of more than one '
1557 'branch') % rev[-1])
1557 'branch') % rev[-1])
1558 if self.applied:
1558 if self.applied:
1559 base = hex(repo.changelog.node(rev[0]))
1559 base = hex(repo.changelog.node(rev[0]))
1560 if base in [n.rev for n in self.applied]:
1560 if base in [n.rev for n in self.applied]:
1561 raise util.Abort(_('revision %d is already managed')
1561 raise util.Abort(_('revision %d is already managed')
1562 % rev[0])
1562 % rev[0])
1563 if heads != [bin(self.applied[-1].rev)]:
1563 if heads != [bin(self.applied[-1].rev)]:
1564 raise util.Abort(_('revision %d is not the parent of '
1564 raise util.Abort(_('revision %d is not the parent of '
1565 'the queue') % rev[0])
1565 'the queue') % rev[0])
1566 base = repo.changelog.rev(bin(self.applied[0].rev))
1566 base = repo.changelog.rev(bin(self.applied[0].rev))
1567 lastparent = repo.changelog.parentrevs(base)[0]
1567 lastparent = repo.changelog.parentrevs(base)[0]
1568 else:
1568 else:
1569 if heads != [repo.changelog.node(rev[0])]:
1569 if heads != [repo.changelog.node(rev[0])]:
1570 raise util.Abort(_('revision %d has unmanaged children')
1570 raise util.Abort(_('revision %d has unmanaged children')
1571 % rev[0])
1571 % rev[0])
1572 lastparent = None
1572 lastparent = None
1573
1573
1574 if git:
1574 if git:
1575 self.diffopts().git = True
1575 self.diffopts().git = True
1576
1576
1577 for r in rev:
1577 for r in rev:
1578 p1, p2 = repo.changelog.parentrevs(r)
1578 p1, p2 = repo.changelog.parentrevs(r)
1579 n = repo.changelog.node(r)
1579 n = repo.changelog.node(r)
1580 if p2 != nullrev:
1580 if p2 != nullrev:
1581 raise util.Abort(_('cannot import merge revision %d') % r)
1581 raise util.Abort(_('cannot import merge revision %d') % r)
1582 if lastparent and lastparent != r:
1582 if lastparent and lastparent != r:
1583 raise util.Abort(_('revision %d is not the parent of %d')
1583 raise util.Abort(_('revision %d is not the parent of %d')
1584 % (r, lastparent))
1584 % (r, lastparent))
1585 lastparent = p1
1585 lastparent = p1
1586
1586
1587 if not patchname:
1587 if not patchname:
1588 patchname = normname('%d.diff' % r)
1588 patchname = normname('%d.diff' % r)
1589 self.check_reserved_name(patchname)
1589 self.check_reserved_name(patchname)
1590 checkseries(patchname)
1590 checkseries(patchname)
1591 checkfile(patchname)
1591 checkfile(patchname)
1592 self.full_series.insert(0, patchname)
1592 self.full_series.insert(0, patchname)
1593
1593
1594 patchf = self.opener(patchname, "w")
1594 patchf = self.opener(patchname, "w")
1595 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1595 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1596 patchf.close()
1596 patchf.close()
1597
1597
1598 se = statusentry(hex(n), patchname)
1598 se = statusentry(hex(n), patchname)
1599 self.applied.insert(0, se)
1599 self.applied.insert(0, se)
1600
1600
1601 added.append(patchname)
1601 added.append(patchname)
1602 patchname = None
1602 patchname = None
1603 self.parse_series()
1603 self.parse_series()
1604 self.applied_dirty = 1
1604 self.applied_dirty = 1
1605
1605
1606 for filename in files:
1606 for filename in files:
1607 if existing:
1607 if existing:
1608 if filename == '-':
1608 if filename == '-':
1609 raise util.Abort(_('-e is incompatible with import from -'))
1609 raise util.Abort(_('-e is incompatible with import from -'))
1610 if not patchname:
1610 if not patchname:
1611 patchname = normname(filename)
1611 patchname = normname(filename)
1612 self.check_reserved_name(patchname)
1612 self.check_reserved_name(patchname)
1613 if not os.path.isfile(self.join(patchname)):
1613 if not os.path.isfile(self.join(patchname)):
1614 raise util.Abort(_("patch %s does not exist") % patchname)
1614 raise util.Abort(_("patch %s does not exist") % patchname)
1615 else:
1615 else:
1616 try:
1616 try:
1617 if filename == '-':
1617 if filename == '-':
1618 if not patchname:
1618 if not patchname:
1619 raise util.Abort(_('need --name to import a patch from -'))
1619 raise util.Abort(_('need --name to import a patch from -'))
1620 text = sys.stdin.read()
1620 text = sys.stdin.read()
1621 else:
1621 else:
1622 text = url.open(self.ui, filename).read()
1622 text = url.open(self.ui, filename).read()
1623 except (OSError, IOError):
1623 except (OSError, IOError):
1624 raise util.Abort(_("unable to read %s") % filename)
1624 raise util.Abort(_("unable to read %s") % filename)
1625 if not patchname:
1625 if not patchname:
1626 patchname = normname(os.path.basename(filename))
1626 patchname = normname(os.path.basename(filename))
1627 self.check_reserved_name(patchname)
1627 self.check_reserved_name(patchname)
1628 checkfile(patchname)
1628 checkfile(patchname)
1629 patchf = self.opener(patchname, "w")
1629 patchf = self.opener(patchname, "w")
1630 patchf.write(text)
1630 patchf.write(text)
1631 if not force:
1631 if not force:
1632 checkseries(patchname)
1632 checkseries(patchname)
1633 if patchname not in self.series:
1633 if patchname not in self.series:
1634 index = self.full_series_end() + i
1634 index = self.full_series_end() + i
1635 self.full_series[index:index] = [patchname]
1635 self.full_series[index:index] = [patchname]
1636 self.parse_series()
1636 self.parse_series()
1637 self.ui.warn(_("adding %s to series file\n") % patchname)
1637 self.ui.warn(_("adding %s to series file\n") % patchname)
1638 i += 1
1638 i += 1
1639 added.append(patchname)
1639 added.append(patchname)
1640 patchname = None
1640 patchname = None
1641 self.series_dirty = 1
1641 self.series_dirty = 1
1642 qrepo = self.qrepo()
1642 qrepo = self.qrepo()
1643 if qrepo:
1643 if qrepo:
1644 qrepo.add(added)
1644 qrepo.add(added)
1645
1645
1646 def delete(ui, repo, *patches, **opts):
1646 def delete(ui, repo, *patches, **opts):
1647 """remove patches from queue
1647 """remove patches from queue
1648
1648
1649 The patches must not be applied, unless they are arguments to the
1649 The patches must not be applied, unless they are arguments to the
1650 -r/--rev parameter. At least one patch or revision is required.
1650 -r/--rev parameter. At least one patch or revision is required.
1651
1651
1652 With --rev, mq will stop managing the named revisions (converting
1652 With --rev, mq will stop managing the named revisions (converting
1653 them to regular mercurial changesets). The qfinish command should
1653 them to regular mercurial changesets). The qfinish command should
1654 be used as an alternative for qdelete -r, as the latter option is
1654 be used as an alternative for qdelete -r, as the latter option is
1655 deprecated.
1655 deprecated.
1656
1656
1657 With -k/--keep, the patch files are preserved in the patch
1657 With -k/--keep, the patch files are preserved in the patch
1658 directory."""
1658 directory."""
1659 q = repo.mq
1659 q = repo.mq
1660 q.delete(repo, patches, opts)
1660 q.delete(repo, patches, opts)
1661 q.save_dirty()
1661 q.save_dirty()
1662 return 0
1662 return 0
1663
1663
1664 def applied(ui, repo, patch=None, **opts):
1664 def applied(ui, repo, patch=None, **opts):
1665 """print the patches already applied"""
1665 """print the patches already applied"""
1666 q = repo.mq
1666 q = repo.mq
1667 if patch:
1667 if patch:
1668 if patch not in q.series:
1668 if patch not in q.series:
1669 raise util.Abort(_("patch %s is not in series file") % patch)
1669 raise util.Abort(_("patch %s is not in series file") % patch)
1670 end = q.series.index(patch) + 1
1670 end = q.series.index(patch) + 1
1671 else:
1671 else:
1672 end = q.series_end(True)
1672 end = q.series_end(True)
1673 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1673 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1674
1674
1675 def unapplied(ui, repo, patch=None, **opts):
1675 def unapplied(ui, repo, patch=None, **opts):
1676 """print the patches not yet applied"""
1676 """print the patches not yet applied"""
1677 q = repo.mq
1677 q = repo.mq
1678 if patch:
1678 if patch:
1679 if patch not in q.series:
1679 if patch not in q.series:
1680 raise util.Abort(_("patch %s is not in series file") % patch)
1680 raise util.Abort(_("patch %s is not in series file") % patch)
1681 start = q.series.index(patch) + 1
1681 start = q.series.index(patch) + 1
1682 else:
1682 else:
1683 start = q.series_end(True)
1683 start = q.series_end(True)
1684 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1684 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1685
1685
1686 def qimport(ui, repo, *filename, **opts):
1686 def qimport(ui, repo, *filename, **opts):
1687 """import a patch
1687 """import a patch
1688
1688
1689 The patch is inserted into the series after the last applied
1689 The patch is inserted into the series after the last applied
1690 patch. If no patches have been applied, qimport prepends the patch
1690 patch. If no patches have been applied, qimport prepends the patch
1691 to the series.
1691 to the series.
1692
1692
1693 The patch will have the same name as its source file unless you
1693 The patch will have the same name as its source file unless you
1694 give it a new one with -n/--name.
1694 give it a new one with -n/--name.
1695
1695
1696 You can register an existing patch inside the patch directory with
1696 You can register an existing patch inside the patch directory with
1697 the -e/--existing flag.
1697 the -e/--existing flag.
1698
1698
1699 With -f/--force, an existing patch of the same name will be
1699 With -f/--force, an existing patch of the same name will be
1700 overwritten.
1700 overwritten.
1701
1701
1702 An existing changeset may be placed under mq control with -r/--rev
1702 An existing changeset may be placed under mq control with -r/--rev
1703 (e.g. qimport --rev tip -n patch will place tip under mq control).
1703 (e.g. qimport --rev tip -n patch will place tip under mq control).
1704 With -g/--git, patches imported with --rev will use the git diff
1704 With -g/--git, patches imported with --rev will use the git diff
1705 format. See the diffs help topic for information on why this is
1705 format. See the diffs help topic for information on why this is
1706 important for preserving rename/copy information and permission
1706 important for preserving rename/copy information and permission
1707 changes.
1707 changes.
1708
1708
1709 To import a patch from standard input, pass - as the patch file.
1709 To import a patch from standard input, pass - as the patch file.
1710 When importing from standard input, a patch name must be specified
1710 When importing from standard input, a patch name must be specified
1711 using the --name flag.
1711 using the --name flag.
1712 """
1712 """
1713 q = repo.mq
1713 q = repo.mq
1714 q.qimport(repo, filename, patchname=opts['name'],
1714 q.qimport(repo, filename, patchname=opts['name'],
1715 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1715 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1716 git=opts['git'])
1716 git=opts['git'])
1717 q.save_dirty()
1717 q.save_dirty()
1718
1718
1719 if opts.get('push') and not opts.get('rev'):
1719 if opts.get('push') and not opts.get('rev'):
1720 return q.push(repo, None)
1720 return q.push(repo, None)
1721 return 0
1721 return 0
1722
1722
1723 def init(ui, repo, **opts):
1723 def init(ui, repo, **opts):
1724 """init a new queue repository
1724 """init a new queue repository
1725
1725
1726 The queue repository is unversioned by default. If
1726 The queue repository is unversioned by default. If
1727 -c/--create-repo is specified, qinit will create a separate nested
1727 -c/--create-repo is specified, qinit will create a separate nested
1728 repository for patches (qinit -c may also be run later to convert
1728 repository for patches (qinit -c may also be run later to convert
1729 an unversioned patch repository into a versioned one). You can use
1729 an unversioned patch repository into a versioned one). You can use
1730 qcommit to commit changes to this queue repository."""
1730 qcommit to commit changes to this queue repository."""
1731 q = repo.mq
1731 q = repo.mq
1732 r = q.init(repo, create=opts['create_repo'])
1732 r = q.init(repo, create=opts['create_repo'])
1733 q.save_dirty()
1733 q.save_dirty()
1734 if r:
1734 if r:
1735 if not os.path.exists(r.wjoin('.hgignore')):
1735 if not os.path.exists(r.wjoin('.hgignore')):
1736 fp = r.wopener('.hgignore', 'w')
1736 fp = r.wopener('.hgignore', 'w')
1737 fp.write('^\\.hg\n')
1737 fp.write('^\\.hg\n')
1738 fp.write('^\\.mq\n')
1738 fp.write('^\\.mq\n')
1739 fp.write('syntax: glob\n')
1739 fp.write('syntax: glob\n')
1740 fp.write('status\n')
1740 fp.write('status\n')
1741 fp.write('guards\n')
1741 fp.write('guards\n')
1742 fp.close()
1742 fp.close()
1743 if not os.path.exists(r.wjoin('series')):
1743 if not os.path.exists(r.wjoin('series')):
1744 r.wopener('series', 'w').close()
1744 r.wopener('series', 'w').close()
1745 r.add(['.hgignore', 'series'])
1745 r.add(['.hgignore', 'series'])
1746 commands.add(ui, r)
1746 commands.add(ui, r)
1747 return 0
1747 return 0
1748
1748
1749 def clone(ui, source, dest=None, **opts):
1749 def clone(ui, source, dest=None, **opts):
1750 '''clone main and patch repository at same time
1750 '''clone main and patch repository at same time
1751
1751
1752 If source is local, destination will have no patches applied. If
1752 If source is local, destination will have no patches applied. If
1753 source is remote, this command can not check if patches are
1753 source is remote, this command can not check if patches are
1754 applied in source, so cannot guarantee that patches are not
1754 applied in source, so cannot guarantee that patches are not
1755 applied in destination. If you clone remote repository, be sure
1755 applied in destination. If you clone remote repository, be sure
1756 before that it has no patches applied.
1756 before that it has no patches applied.
1757
1757
1758 Source patch repository is looked for in <src>/.hg/patches by
1758 Source patch repository is looked for in <src>/.hg/patches by
1759 default. Use -p <url> to change.
1759 default. Use -p <url> to change.
1760
1760
1761 The patch directory must be a nested mercurial repository, as
1761 The patch directory must be a nested mercurial repository, as
1762 would be created by qinit -c.
1762 would be created by qinit -c.
1763 '''
1763 '''
1764 def patchdir(repo):
1764 def patchdir(repo):
1765 url = repo.url()
1765 url = repo.url()
1766 if url.endswith('/'):
1766 if url.endswith('/'):
1767 url = url[:-1]
1767 url = url[:-1]
1768 return url + '/.hg/patches'
1768 return url + '/.hg/patches'
1769 if dest is None:
1769 if dest is None:
1770 dest = hg.defaultdest(source)
1770 dest = hg.defaultdest(source)
1771 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1771 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1772 if opts['patches']:
1772 if opts['patches']:
1773 patchespath = ui.expandpath(opts['patches'])
1773 patchespath = ui.expandpath(opts['patches'])
1774 else:
1774 else:
1775 patchespath = patchdir(sr)
1775 patchespath = patchdir(sr)
1776 try:
1776 try:
1777 hg.repository(ui, patchespath)
1777 hg.repository(ui, patchespath)
1778 except error.RepoError:
1778 except error.RepoError:
1779 raise util.Abort(_('versioned patch repository not found'
1779 raise util.Abort(_('versioned patch repository not found'
1780 ' (see qinit -c)'))
1780 ' (see qinit -c)'))
1781 qbase, destrev = None, None
1781 qbase, destrev = None, None
1782 if sr.local():
1782 if sr.local():
1783 if sr.mq.applied:
1783 if sr.mq.applied:
1784 qbase = bin(sr.mq.applied[0].rev)
1784 qbase = bin(sr.mq.applied[0].rev)
1785 if not hg.islocal(dest):
1785 if not hg.islocal(dest):
1786 heads = set(sr.heads())
1786 heads = set(sr.heads())
1787 destrev = list(heads.difference(sr.heads(qbase)))
1787 destrev = list(heads.difference(sr.heads(qbase)))
1788 destrev.append(sr.changelog.parents(qbase)[0])
1788 destrev.append(sr.changelog.parents(qbase)[0])
1789 elif sr.capable('lookup'):
1789 elif sr.capable('lookup'):
1790 try:
1790 try:
1791 qbase = sr.lookup('qbase')
1791 qbase = sr.lookup('qbase')
1792 except error.RepoError:
1792 except error.RepoError:
1793 pass
1793 pass
1794 ui.note(_('cloning main repository\n'))
1794 ui.note(_('cloning main repository\n'))
1795 sr, dr = hg.clone(ui, sr.url(), dest,
1795 sr, dr = hg.clone(ui, sr.url(), dest,
1796 pull=opts['pull'],
1796 pull=opts['pull'],
1797 rev=destrev,
1797 rev=destrev,
1798 update=False,
1798 update=False,
1799 stream=opts['uncompressed'])
1799 stream=opts['uncompressed'])
1800 ui.note(_('cloning patch repository\n'))
1800 ui.note(_('cloning patch repository\n'))
1801 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1801 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1802 pull=opts['pull'], update=not opts['noupdate'],
1802 pull=opts['pull'], update=not opts['noupdate'],
1803 stream=opts['uncompressed'])
1803 stream=opts['uncompressed'])
1804 if dr.local():
1804 if dr.local():
1805 if qbase:
1805 if qbase:
1806 ui.note(_('stripping applied patches from destination '
1806 ui.note(_('stripping applied patches from destination '
1807 'repository\n'))
1807 'repository\n'))
1808 dr.mq.strip(dr, qbase, update=False, backup=None)
1808 dr.mq.strip(dr, qbase, update=False, backup=None)
1809 if not opts['noupdate']:
1809 if not opts['noupdate']:
1810 ui.note(_('updating destination repository\n'))
1810 ui.note(_('updating destination repository\n'))
1811 hg.update(dr, dr.changelog.tip())
1811 hg.update(dr, dr.changelog.tip())
1812
1812
1813 def commit(ui, repo, *pats, **opts):
1813 def commit(ui, repo, *pats, **opts):
1814 """commit changes in the queue repository"""
1814 """commit changes in the queue repository"""
1815 q = repo.mq
1815 q = repo.mq
1816 r = q.qrepo()
1816 r = q.qrepo()
1817 if not r: raise util.Abort('no queue repository')
1817 if not r: raise util.Abort('no queue repository')
1818 commands.commit(r.ui, r, *pats, **opts)
1818 commands.commit(r.ui, r, *pats, **opts)
1819
1819
1820 def series(ui, repo, **opts):
1820 def series(ui, repo, **opts):
1821 """print the entire series file"""
1821 """print the entire series file"""
1822 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1822 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1823 return 0
1823 return 0
1824
1824
1825 def top(ui, repo, **opts):
1825 def top(ui, repo, **opts):
1826 """print the name of the current patch"""
1826 """print the name of the current patch"""
1827 q = repo.mq
1827 q = repo.mq
1828 t = q.applied and q.series_end(True) or 0
1828 t = q.applied and q.series_end(True) or 0
1829 if t:
1829 if t:
1830 return q.qseries(repo, start=t-1, length=1, status='A',
1830 return q.qseries(repo, start=t-1, length=1, status='A',
1831 summary=opts.get('summary'))
1831 summary=opts.get('summary'))
1832 else:
1832 else:
1833 ui.write(_("no patches applied\n"))
1833 ui.write(_("no patches applied\n"))
1834 return 1
1834 return 1
1835
1835
1836 def next(ui, repo, **opts):
1836 def next(ui, repo, **opts):
1837 """print the name of the next patch"""
1837 """print the name of the next patch"""
1838 q = repo.mq
1838 q = repo.mq
1839 end = q.series_end()
1839 end = q.series_end()
1840 if end == len(q.series):
1840 if end == len(q.series):
1841 ui.write(_("all patches applied\n"))
1841 ui.write(_("all patches applied\n"))
1842 return 1
1842 return 1
1843 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1843 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1844
1844
1845 def prev(ui, repo, **opts):
1845 def prev(ui, repo, **opts):
1846 """print the name of the previous patch"""
1846 """print the name of the previous patch"""
1847 q = repo.mq
1847 q = repo.mq
1848 l = len(q.applied)
1848 l = len(q.applied)
1849 if l == 1:
1849 if l == 1:
1850 ui.write(_("only one patch applied\n"))
1850 ui.write(_("only one patch applied\n"))
1851 return 1
1851 return 1
1852 if not l:
1852 if not l:
1853 ui.write(_("no patches applied\n"))
1853 ui.write(_("no patches applied\n"))
1854 return 1
1854 return 1
1855 return q.qseries(repo, start=l-2, length=1, status='A',
1855 return q.qseries(repo, start=l-2, length=1, status='A',
1856 summary=opts.get('summary'))
1856 summary=opts.get('summary'))
1857
1857
1858 def setupheaderopts(ui, opts):
1858 def setupheaderopts(ui, opts):
1859 def do(opt,val):
1859 def do(opt,val):
1860 if not opts[opt] and opts['current' + opt]:
1860 if not opts[opt] and opts['current' + opt]:
1861 opts[opt] = val
1861 opts[opt] = val
1862 do('user', ui.username())
1862 do('user', ui.username())
1863 do('date', "%d %d" % util.makedate())
1863 do('date', "%d %d" % util.makedate())
1864
1864
1865 def new(ui, repo, patch, *args, **opts):
1865 def new(ui, repo, patch, *args, **opts):
1866 """create a new patch
1866 """create a new patch
1867
1867
1868 qnew creates a new patch on top of the currently-applied patch (if
1868 qnew creates a new patch on top of the currently-applied patch (if
1869 any). It will refuse to run if there are any outstanding changes
1869 any). It will refuse to run if there are any outstanding changes
1870 unless -f/--force is specified, in which case the patch will be
1870 unless -f/--force is specified, in which case the patch will be
1871 initialized with them. You may also use -I/--include,
1871 initialized with them. You may also use -I/--include,
1872 -X/--exclude, and/or a list of files after the patch name to add
1872 -X/--exclude, and/or a list of files after the patch name to add
1873 only changes to matching files to the new patch, leaving the rest
1873 only changes to matching files to the new patch, leaving the rest
1874 as uncommitted modifications.
1874 as uncommitted modifications.
1875
1875
1876 -u/--user and -d/--date can be used to set the (given) user and
1876 -u/--user and -d/--date can be used to set the (given) user and
1877 date, respectively. -U/--currentuser and -D/--currentdate set user
1877 date, respectively. -U/--currentuser and -D/--currentdate set user
1878 to current user and date to current date.
1878 to current user and date to current date.
1879
1879
1880 -e/--edit, -m/--message or -l/--logfile set the patch header as
1880 -e/--edit, -m/--message or -l/--logfile set the patch header as
1881 well as the commit message. If none is specified, the header is
1881 well as the commit message. If none is specified, the header is
1882 empty and the commit message is '[mq]: PATCH'.
1882 empty and the commit message is '[mq]: PATCH'.
1883
1883
1884 Use the -g/--git option to keep the patch in the git extended diff
1884 Use the -g/--git option to keep the patch in the git extended diff
1885 format. Read the diffs help topic for more information on why this
1885 format. Read the diffs help topic for more information on why this
1886 is important for preserving permission changes and copy/rename
1886 is important for preserving permission changes and copy/rename
1887 information.
1887 information.
1888 """
1888 """
1889 msg = cmdutil.logmessage(opts)
1889 msg = cmdutil.logmessage(opts)
1890 def getmsg(): return ui.edit(msg, ui.username())
1890 def getmsg(): return ui.edit(msg, ui.username())
1891 q = repo.mq
1891 q = repo.mq
1892 opts['msg'] = msg
1892 opts['msg'] = msg
1893 if opts.get('edit'):
1893 if opts.get('edit'):
1894 opts['msg'] = getmsg
1894 opts['msg'] = getmsg
1895 else:
1895 else:
1896 opts['msg'] = msg
1896 opts['msg'] = msg
1897 setupheaderopts(ui, opts)
1897 setupheaderopts(ui, opts)
1898 q.new(repo, patch, *args, **opts)
1898 q.new(repo, patch, *args, **opts)
1899 q.save_dirty()
1899 q.save_dirty()
1900 return 0
1900 return 0
1901
1901
1902 def refresh(ui, repo, *pats, **opts):
1902 def refresh(ui, repo, *pats, **opts):
1903 """update the current patch
1903 """update the current patch
1904
1904
1905 If any file patterns are provided, the refreshed patch will
1905 If any file patterns are provided, the refreshed patch will
1906 contain only the modifications that match those patterns; the
1906 contain only the modifications that match those patterns; the
1907 remaining modifications will remain in the working directory.
1907 remaining modifications will remain in the working directory.
1908
1908
1909 If -s/--short is specified, files currently included in the patch
1909 If -s/--short is specified, files currently included in the patch
1910 will be refreshed just like matched files and remain in the patch.
1910 will be refreshed just like matched files and remain in the patch.
1911
1911
1912 hg add/remove/copy/rename work as usual, though you might want to
1912 hg add/remove/copy/rename work as usual, though you might want to
1913 use git-style patches (-g/--git or [diff] git=1) to track copies
1913 use git-style patches (-g/--git or [diff] git=1) to track copies
1914 and renames. See the diffs help topic for more information on the
1914 and renames. See the diffs help topic for more information on the
1915 git diff format.
1915 git diff format.
1916 """
1916 """
1917 q = repo.mq
1917 q = repo.mq
1918 message = cmdutil.logmessage(opts)
1918 message = cmdutil.logmessage(opts)
1919 if opts['edit']:
1919 if opts['edit']:
1920 if not q.applied:
1920 if not q.applied:
1921 ui.write(_("no patches applied\n"))
1921 ui.write(_("no patches applied\n"))
1922 return 1
1922 return 1
1923 if message:
1923 if message:
1924 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1924 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1925 patch = q.applied[-1].name
1925 patch = q.applied[-1].name
1926 ph = q.readheaders(patch)
1926 ph = q.readheaders(patch)
1927 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1927 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1928 setupheaderopts(ui, opts)
1928 setupheaderopts(ui, opts)
1929 ret = q.refresh(repo, pats, msg=message, **opts)
1929 ret = q.refresh(repo, pats, msg=message, **opts)
1930 q.save_dirty()
1930 q.save_dirty()
1931 return ret
1931 return ret
1932
1932
1933 def diff(ui, repo, *pats, **opts):
1933 def diff(ui, repo, *pats, **opts):
1934 """diff of the current patch and subsequent modifications
1934 """diff of the current patch and subsequent modifications
1935
1935
1936 Shows a diff which includes the current patch as well as any
1936 Shows a diff which includes the current patch as well as any
1937 changes which have been made in the working directory since the
1937 changes which have been made in the working directory since the
1938 last refresh (thus showing what the current patch would become
1938 last refresh (thus showing what the current patch would become
1939 after a qrefresh).
1939 after a qrefresh).
1940
1940
1941 Use 'hg diff' if you only want to see the changes made since the
1941 Use 'hg diff' if you only want to see the changes made since the
1942 last qrefresh, or 'hg export qtip' if you want to see changes made
1942 last qrefresh, or 'hg export qtip' if you want to see changes made
1943 by the current patch without including changes made since the
1943 by the current patch without including changes made since the
1944 qrefresh.
1944 qrefresh.
1945 """
1945 """
1946 repo.mq.diff(repo, pats, opts)
1946 repo.mq.diff(repo, pats, opts)
1947 return 0
1947 return 0
1948
1948
1949 def fold(ui, repo, *files, **opts):
1949 def fold(ui, repo, *files, **opts):
1950 """fold the named patches into the current patch
1950 """fold the named patches into the current patch
1951
1951
1952 Patches must not yet be applied. Each patch will be successively
1952 Patches must not yet be applied. Each patch will be successively
1953 applied to the current patch in the order given. If all the
1953 applied to the current patch in the order given. If all the
1954 patches apply successfully, the current patch will be refreshed
1954 patches apply successfully, the current patch will be refreshed
1955 with the new cumulative patch, and the folded patches will be
1955 with the new cumulative patch, and the folded patches will be
1956 deleted. With -k/--keep, the folded patch files will not be
1956 deleted. With -k/--keep, the folded patch files will not be
1957 removed afterwards.
1957 removed afterwards.
1958
1958
1959 The header for each folded patch will be concatenated with the
1959 The header for each folded patch will be concatenated with the
1960 current patch header, separated by a line of '* * *'."""
1960 current patch header, separated by a line of '* * *'."""
1961
1961
1962 q = repo.mq
1962 q = repo.mq
1963
1963
1964 if not files:
1964 if not files:
1965 raise util.Abort(_('qfold requires at least one patch name'))
1965 raise util.Abort(_('qfold requires at least one patch name'))
1966 if not q.check_toppatch(repo):
1966 if not q.check_toppatch(repo):
1967 raise util.Abort(_('No patches applied'))
1967 raise util.Abort(_('No patches applied'))
1968 q.check_localchanges(repo)
1968 q.check_localchanges(repo)
1969
1969
1970 message = cmdutil.logmessage(opts)
1970 message = cmdutil.logmessage(opts)
1971 if opts['edit']:
1971 if opts['edit']:
1972 if message:
1972 if message:
1973 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1973 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1974
1974
1975 parent = q.lookup('qtip')
1975 parent = q.lookup('qtip')
1976 patches = []
1976 patches = []
1977 messages = []
1977 messages = []
1978 for f in files:
1978 for f in files:
1979 p = q.lookup(f)
1979 p = q.lookup(f)
1980 if p in patches or p == parent:
1980 if p in patches or p == parent:
1981 ui.warn(_('Skipping already folded patch %s') % p)
1981 ui.warn(_('Skipping already folded patch %s') % p)
1982 if q.isapplied(p):
1982 if q.isapplied(p):
1983 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1983 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1984 patches.append(p)
1984 patches.append(p)
1985
1985
1986 for p in patches:
1986 for p in patches:
1987 if not message:
1987 if not message:
1988 ph = q.readheaders(p)
1988 ph = q.readheaders(p)
1989 if ph.message:
1989 if ph.message:
1990 messages.append(ph.message)
1990 messages.append(ph.message)
1991 pf = q.join(p)
1991 pf = q.join(p)
1992 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1992 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1993 if not patchsuccess:
1993 if not patchsuccess:
1994 raise util.Abort(_('Error folding patch %s') % p)
1994 raise util.Abort(_('Error folding patch %s') % p)
1995 patch.updatedir(ui, repo, files)
1995 patch.updatedir(ui, repo, files)
1996
1996
1997 if not message:
1997 if not message:
1998 ph = q.readheaders(parent)
1998 ph = q.readheaders(parent)
1999 message, user = ph.message, ph.user
1999 message, user = ph.message, ph.user
2000 for msg in messages:
2000 for msg in messages:
2001 message.append('* * *')
2001 message.append('* * *')
2002 message.extend(msg)
2002 message.extend(msg)
2003 message = '\n'.join(message)
2003 message = '\n'.join(message)
2004
2004
2005 if opts['edit']:
2005 if opts['edit']:
2006 message = ui.edit(message, user or ui.username())
2006 message = ui.edit(message, user or ui.username())
2007
2007
2008 q.refresh(repo, msg=message)
2008 q.refresh(repo, msg=message)
2009 q.delete(repo, patches, opts)
2009 q.delete(repo, patches, opts)
2010 q.save_dirty()
2010 q.save_dirty()
2011
2011
2012 def goto(ui, repo, patch, **opts):
2012 def goto(ui, repo, patch, **opts):
2013 '''push or pop patches until named patch is at top of stack'''
2013 '''push or pop patches until named patch is at top of stack'''
2014 q = repo.mq
2014 q = repo.mq
2015 patch = q.lookup(patch)
2015 patch = q.lookup(patch)
2016 if q.isapplied(patch):
2016 if q.isapplied(patch):
2017 ret = q.pop(repo, patch, force=opts['force'])
2017 ret = q.pop(repo, patch, force=opts['force'])
2018 else:
2018 else:
2019 ret = q.push(repo, patch, force=opts['force'])
2019 ret = q.push(repo, patch, force=opts['force'])
2020 q.save_dirty()
2020 q.save_dirty()
2021 return ret
2021 return ret
2022
2022
2023 def guard(ui, repo, *args, **opts):
2023 def guard(ui, repo, *args, **opts):
2024 '''set or print guards for a patch
2024 '''set or print guards for a patch
2025
2025
2026 Guards control whether a patch can be pushed. A patch with no
2026 Guards control whether a patch can be pushed. A patch with no
2027 guards is always pushed. A patch with a positive guard ("+foo") is
2027 guards is always pushed. A patch with a positive guard ("+foo") is
2028 pushed only if the qselect command has activated it. A patch with
2028 pushed only if the qselect command has activated it. A patch with
2029 a negative guard ("-foo") is never pushed if the qselect command
2029 a negative guard ("-foo") is never pushed if the qselect command
2030 has activated it.
2030 has activated it.
2031
2031
2032 With no arguments, print the currently active guards.
2032 With no arguments, print the currently active guards.
2033 With arguments, set guards for the named patch.
2033 With arguments, set guards for the named patch.
2034 NOTE: Specifying negative guards now requires '--'.
2034 NOTE: Specifying negative guards now requires '--'.
2035
2035
2036 To set guards on another patch:
2036 To set guards on another patch:
2037 hg qguard -- other.patch +2.6.17 -stable
2037 hg qguard -- other.patch +2.6.17 -stable
2038 '''
2038 '''
2039 def status(idx):
2039 def status(idx):
2040 guards = q.series_guards[idx] or ['unguarded']
2040 guards = q.series_guards[idx] or ['unguarded']
2041 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2041 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2042 q = repo.mq
2042 q = repo.mq
2043 patch = None
2043 patch = None
2044 args = list(args)
2044 args = list(args)
2045 if opts['list']:
2045 if opts['list']:
2046 if args or opts['none']:
2046 if args or opts['none']:
2047 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2047 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2048 for i in xrange(len(q.series)):
2048 for i in xrange(len(q.series)):
2049 status(i)
2049 status(i)
2050 return
2050 return
2051 if not args or args[0][0:1] in '-+':
2051 if not args or args[0][0:1] in '-+':
2052 if not q.applied:
2052 if not q.applied:
2053 raise util.Abort(_('no patches applied'))
2053 raise util.Abort(_('no patches applied'))
2054 patch = q.applied[-1].name
2054 patch = q.applied[-1].name
2055 if patch is None and args[0][0:1] not in '-+':
2055 if patch is None and args[0][0:1] not in '-+':
2056 patch = args.pop(0)
2056 patch = args.pop(0)
2057 if patch is None:
2057 if patch is None:
2058 raise util.Abort(_('no patch to work with'))
2058 raise util.Abort(_('no patch to work with'))
2059 if args or opts['none']:
2059 if args or opts['none']:
2060 idx = q.find_series(patch)
2060 idx = q.find_series(patch)
2061 if idx is None:
2061 if idx is None:
2062 raise util.Abort(_('no patch named %s') % patch)
2062 raise util.Abort(_('no patch named %s') % patch)
2063 q.set_guards(idx, args)
2063 q.set_guards(idx, args)
2064 q.save_dirty()
2064 q.save_dirty()
2065 else:
2065 else:
2066 status(q.series.index(q.lookup(patch)))
2066 status(q.series.index(q.lookup(patch)))
2067
2067
2068 def header(ui, repo, patch=None):
2068 def header(ui, repo, patch=None):
2069 """print the header of the topmost or specified patch"""
2069 """print the header of the topmost or specified patch"""
2070 q = repo.mq
2070 q = repo.mq
2071
2071
2072 if patch:
2072 if patch:
2073 patch = q.lookup(patch)
2073 patch = q.lookup(patch)
2074 else:
2074 else:
2075 if not q.applied:
2075 if not q.applied:
2076 ui.write('no patches applied\n')
2076 ui.write('no patches applied\n')
2077 return 1
2077 return 1
2078 patch = q.lookup('qtip')
2078 patch = q.lookup('qtip')
2079 ph = repo.mq.readheaders(patch)
2079 ph = repo.mq.readheaders(patch)
2080
2080
2081 ui.write('\n'.join(ph.message) + '\n')
2081 ui.write('\n'.join(ph.message) + '\n')
2082
2082
2083 def lastsavename(path):
2083 def lastsavename(path):
2084 (directory, base) = os.path.split(path)
2084 (directory, base) = os.path.split(path)
2085 names = os.listdir(directory)
2085 names = os.listdir(directory)
2086 namere = re.compile("%s.([0-9]+)" % base)
2086 namere = re.compile("%s.([0-9]+)" % base)
2087 maxindex = None
2087 maxindex = None
2088 maxname = None
2088 maxname = None
2089 for f in names:
2089 for f in names:
2090 m = namere.match(f)
2090 m = namere.match(f)
2091 if m:
2091 if m:
2092 index = int(m.group(1))
2092 index = int(m.group(1))
2093 if maxindex is None or index > maxindex:
2093 if maxindex is None or index > maxindex:
2094 maxindex = index
2094 maxindex = index
2095 maxname = f
2095 maxname = f
2096 if maxname:
2096 if maxname:
2097 return (os.path.join(directory, maxname), maxindex)
2097 return (os.path.join(directory, maxname), maxindex)
2098 return (None, None)
2098 return (None, None)
2099
2099
2100 def savename(path):
2100 def savename(path):
2101 (last, index) = lastsavename(path)
2101 (last, index) = lastsavename(path)
2102 if last is None:
2102 if last is None:
2103 index = 0
2103 index = 0
2104 newpath = path + ".%d" % (index + 1)
2104 newpath = path + ".%d" % (index + 1)
2105 return newpath
2105 return newpath
2106
2106
2107 def push(ui, repo, patch=None, **opts):
2107 def push(ui, repo, patch=None, **opts):
2108 """push the next patch onto the stack
2108 """push the next patch onto the stack
2109
2109
2110 When -f/--force is applied, all local changes in patched files
2110 When -f/--force is applied, all local changes in patched files
2111 will be lost.
2111 will be lost.
2112 """
2112 """
2113 q = repo.mq
2113 q = repo.mq
2114 mergeq = None
2114 mergeq = None
2115
2115
2116 if opts['merge']:
2116 if opts['merge']:
2117 if opts['name']:
2117 if opts['name']:
2118 newpath = repo.join(opts['name'])
2118 newpath = repo.join(opts['name'])
2119 else:
2119 else:
2120 newpath, i = lastsavename(q.path)
2120 newpath, i = lastsavename(q.path)
2121 if not newpath:
2121 if not newpath:
2122 ui.warn(_("no saved queues found, please use -n\n"))
2122 ui.warn(_("no saved queues found, please use -n\n"))
2123 return 1
2123 return 1
2124 mergeq = queue(ui, repo.join(""), newpath)
2124 mergeq = queue(ui, repo.join(""), newpath)
2125 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2125 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2126 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2126 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2127 mergeq=mergeq, all=opts.get('all'))
2127 mergeq=mergeq, all=opts.get('all'))
2128 return ret
2128 return ret
2129
2129
2130 def pop(ui, repo, patch=None, **opts):
2130 def pop(ui, repo, patch=None, **opts):
2131 """pop the current patch off the stack
2131 """pop the current patch off the stack
2132
2132
2133 By default, pops off the top of the patch stack. If given a patch
2133 By default, pops off the top of the patch stack. If given a patch
2134 name, keeps popping off patches until the named patch is at the
2134 name, keeps popping off patches until the named patch is at the
2135 top of the stack.
2135 top of the stack.
2136 """
2136 """
2137 localupdate = True
2137 localupdate = True
2138 if opts['name']:
2138 if opts['name']:
2139 q = queue(ui, repo.join(""), repo.join(opts['name']))
2139 q = queue(ui, repo.join(""), repo.join(opts['name']))
2140 ui.warn(_('using patch queue: %s\n') % q.path)
2140 ui.warn(_('using patch queue: %s\n') % q.path)
2141 localupdate = False
2141 localupdate = False
2142 else:
2142 else:
2143 q = repo.mq
2143 q = repo.mq
2144 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2144 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2145 all=opts['all'])
2145 all=opts['all'])
2146 q.save_dirty()
2146 q.save_dirty()
2147 return ret
2147 return ret
2148
2148
2149 def rename(ui, repo, patch, name=None, **opts):
2149 def rename(ui, repo, patch, name=None, **opts):
2150 """rename a patch
2150 """rename a patch
2151
2151
2152 With one argument, renames the current patch to PATCH1.
2152 With one argument, renames the current patch to PATCH1.
2153 With two arguments, renames PATCH1 to PATCH2."""
2153 With two arguments, renames PATCH1 to PATCH2."""
2154
2154
2155 q = repo.mq
2155 q = repo.mq
2156
2156
2157 if not name:
2157 if not name:
2158 name = patch
2158 name = patch
2159 patch = None
2159 patch = None
2160
2160
2161 if patch:
2161 if patch:
2162 patch = q.lookup(patch)
2162 patch = q.lookup(patch)
2163 else:
2163 else:
2164 if not q.applied:
2164 if not q.applied:
2165 ui.write(_('no patches applied\n'))
2165 ui.write(_('no patches applied\n'))
2166 return
2166 return
2167 patch = q.lookup('qtip')
2167 patch = q.lookup('qtip')
2168 absdest = q.join(name)
2168 absdest = q.join(name)
2169 if os.path.isdir(absdest):
2169 if os.path.isdir(absdest):
2170 name = normname(os.path.join(name, os.path.basename(patch)))
2170 name = normname(os.path.join(name, os.path.basename(patch)))
2171 absdest = q.join(name)
2171 absdest = q.join(name)
2172 if os.path.exists(absdest):
2172 if os.path.exists(absdest):
2173 raise util.Abort(_('%s already exists') % absdest)
2173 raise util.Abort(_('%s already exists') % absdest)
2174
2174
2175 if name in q.series:
2175 if name in q.series:
2176 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2176 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2177
2177
2178 if ui.verbose:
2178 if ui.verbose:
2179 ui.write('renaming %s to %s\n' % (patch, name))
2179 ui.write('renaming %s to %s\n' % (patch, name))
2180 i = q.find_series(patch)
2180 i = q.find_series(patch)
2181 guards = q.guard_re.findall(q.full_series[i])
2181 guards = q.guard_re.findall(q.full_series[i])
2182 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2182 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2183 q.parse_series()
2183 q.parse_series()
2184 q.series_dirty = 1
2184 q.series_dirty = 1
2185
2185
2186 info = q.isapplied(patch)
2186 info = q.isapplied(patch)
2187 if info:
2187 if info:
2188 q.applied[info[0]] = statusentry(info[1], name)
2188 q.applied[info[0]] = statusentry(info[1], name)
2189 q.applied_dirty = 1
2189 q.applied_dirty = 1
2190
2190
2191 util.rename(q.join(patch), absdest)
2191 util.rename(q.join(patch), absdest)
2192 r = q.qrepo()
2192 r = q.qrepo()
2193 if r:
2193 if r:
2194 wlock = r.wlock()
2194 wlock = r.wlock()
2195 try:
2195 try:
2196 if r.dirstate[patch] == 'a':
2196 if r.dirstate[patch] == 'a':
2197 r.dirstate.forget(patch)
2197 r.dirstate.forget(patch)
2198 r.dirstate.add(name)
2198 r.dirstate.add(name)
2199 else:
2199 else:
2200 if r.dirstate[name] == 'r':
2200 if r.dirstate[name] == 'r':
2201 r.undelete([name])
2201 r.undelete([name])
2202 r.copy(patch, name)
2202 r.copy(patch, name)
2203 r.remove([patch], False)
2203 r.remove([patch], False)
2204 finally:
2204 finally:
2205 wlock.release()
2205 wlock.release()
2206
2206
2207 q.save_dirty()
2207 q.save_dirty()
2208
2208
2209 def restore(ui, repo, rev, **opts):
2209 def restore(ui, repo, rev, **opts):
2210 """restore the queue state saved by a revision"""
2210 """restore the queue state saved by a revision"""
2211 rev = repo.lookup(rev)
2211 rev = repo.lookup(rev)
2212 q = repo.mq
2212 q = repo.mq
2213 q.restore(repo, rev, delete=opts['delete'],
2213 q.restore(repo, rev, delete=opts['delete'],
2214 qupdate=opts['update'])
2214 qupdate=opts['update'])
2215 q.save_dirty()
2215 q.save_dirty()
2216 return 0
2216 return 0
2217
2217
2218 def save(ui, repo, **opts):
2218 def save(ui, repo, **opts):
2219 """save current queue state"""
2219 """save current queue state"""
2220 q = repo.mq
2220 q = repo.mq
2221 message = cmdutil.logmessage(opts)
2221 message = cmdutil.logmessage(opts)
2222 ret = q.save(repo, msg=message)
2222 ret = q.save(repo, msg=message)
2223 if ret:
2223 if ret:
2224 return ret
2224 return ret
2225 q.save_dirty()
2225 q.save_dirty()
2226 if opts['copy']:
2226 if opts['copy']:
2227 path = q.path
2227 path = q.path
2228 if opts['name']:
2228 if opts['name']:
2229 newpath = os.path.join(q.basepath, opts['name'])
2229 newpath = os.path.join(q.basepath, opts['name'])
2230 if os.path.exists(newpath):
2230 if os.path.exists(newpath):
2231 if not os.path.isdir(newpath):
2231 if not os.path.isdir(newpath):
2232 raise util.Abort(_('destination %s exists and is not '
2232 raise util.Abort(_('destination %s exists and is not '
2233 'a directory') % newpath)
2233 'a directory') % newpath)
2234 if not opts['force']:
2234 if not opts['force']:
2235 raise util.Abort(_('destination %s exists, '
2235 raise util.Abort(_('destination %s exists, '
2236 'use -f to force') % newpath)
2236 'use -f to force') % newpath)
2237 else:
2237 else:
2238 newpath = savename(path)
2238 newpath = savename(path)
2239 ui.warn(_("copy %s to %s\n") % (path, newpath))
2239 ui.warn(_("copy %s to %s\n") % (path, newpath))
2240 util.copyfiles(path, newpath)
2240 util.copyfiles(path, newpath)
2241 if opts['empty']:
2241 if opts['empty']:
2242 try:
2242 try:
2243 os.unlink(q.join(q.status_path))
2243 os.unlink(q.join(q.status_path))
2244 except:
2244 except:
2245 pass
2245 pass
2246 return 0
2246 return 0
2247
2247
2248 def strip(ui, repo, rev, **opts):
2248 def strip(ui, repo, rev, **opts):
2249 """strip a revision and all its descendants from the repository
2249 """strip a revision and all its descendants from the repository
2250
2250
2251 If one of the working directory's parent revisions is stripped, the
2251 If one of the working directory's parent revisions is stripped, the
2252 working directory will be updated to the parent of the stripped
2252 working directory will be updated to the parent of the stripped
2253 revision.
2253 revision.
2254 """
2254 """
2255 backup = 'all'
2255 backup = 'all'
2256 if opts['backup']:
2256 if opts['backup']:
2257 backup = 'strip'
2257 backup = 'strip'
2258 elif opts['nobackup']:
2258 elif opts['nobackup']:
2259 backup = 'none'
2259 backup = 'none'
2260
2260
2261 rev = repo.lookup(rev)
2261 rev = repo.lookup(rev)
2262 p = repo.dirstate.parents()
2262 p = repo.dirstate.parents()
2263 cl = repo.changelog
2263 cl = repo.changelog
2264 update = True
2264 update = True
2265 if p[0] == nullid:
2265 if p[0] == nullid:
2266 update = False
2266 update = False
2267 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2267 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2268 update = False
2268 update = False
2269 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2269 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2270 update = False
2270 update = False
2271
2271
2272 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2272 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2273 return 0
2273 return 0
2274
2274
2275 def select(ui, repo, *args, **opts):
2275 def select(ui, repo, *args, **opts):
2276 '''set or print guarded patches to push
2276 '''set or print guarded patches to push
2277
2277
2278 Use the qguard command to set or print guards on patch, then use
2278 Use the qguard command to set or print guards on patch, then use
2279 qselect to tell mq which guards to use. A patch will be pushed if
2279 qselect to tell mq which guards to use. A patch will be pushed if
2280 it has no guards or any positive guards match the currently
2280 it has no guards or any positive guards match the currently
2281 selected guard, but will not be pushed if any negative guards
2281 selected guard, but will not be pushed if any negative guards
2282 match the current guard. For example:
2282 match the current guard. For example:
2283
2283
2284 qguard foo.patch -stable (negative guard)
2284 qguard foo.patch -stable (negative guard)
2285 qguard bar.patch +stable (positive guard)
2285 qguard bar.patch +stable (positive guard)
2286 qselect stable
2286 qselect stable
2287
2287
2288 This activates the "stable" guard. mq will skip foo.patch (because
2288 This activates the "stable" guard. mq will skip foo.patch (because
2289 it has a negative match) but push bar.patch (because it has a
2289 it has a negative match) but push bar.patch (because it has a
2290 positive match).
2290 positive match).
2291
2291
2292 With no arguments, prints the currently active guards.
2292 With no arguments, prints the currently active guards.
2293 With one argument, sets the active guard.
2293 With one argument, sets the active guard.
2294
2294
2295 Use -n/--none to deactivate guards (no other arguments needed).
2295 Use -n/--none to deactivate guards (no other arguments needed).
2296 When no guards are active, patches with positive guards are
2296 When no guards are active, patches with positive guards are
2297 skipped and patches with negative guards are pushed.
2297 skipped and patches with negative guards are pushed.
2298
2298
2299 qselect can change the guards on applied patches. It does not pop
2299 qselect can change the guards on applied patches. It does not pop
2300 guarded patches by default. Use --pop to pop back to the last
2300 guarded patches by default. Use --pop to pop back to the last
2301 applied patch that is not guarded. Use --reapply (which implies
2301 applied patch that is not guarded. Use --reapply (which implies
2302 --pop) to push back to the current patch afterwards, but skip
2302 --pop) to push back to the current patch afterwards, but skip
2303 guarded patches.
2303 guarded patches.
2304
2304
2305 Use -s/--series to print a list of all guards in the series file
2305 Use -s/--series to print a list of all guards in the series file
2306 (no other arguments needed). Use -v for more information.'''
2306 (no other arguments needed). Use -v for more information.'''
2307
2307
2308 q = repo.mq
2308 q = repo.mq
2309 guards = q.active()
2309 guards = q.active()
2310 if args or opts['none']:
2310 if args or opts['none']:
2311 old_unapplied = q.unapplied(repo)
2311 old_unapplied = q.unapplied(repo)
2312 old_guarded = [i for i in xrange(len(q.applied)) if
2312 old_guarded = [i for i in xrange(len(q.applied)) if
2313 not q.pushable(i)[0]]
2313 not q.pushable(i)[0]]
2314 q.set_active(args)
2314 q.set_active(args)
2315 q.save_dirty()
2315 q.save_dirty()
2316 if not args:
2316 if not args:
2317 ui.status(_('guards deactivated\n'))
2317 ui.status(_('guards deactivated\n'))
2318 if not opts['pop'] and not opts['reapply']:
2318 if not opts['pop'] and not opts['reapply']:
2319 unapplied = q.unapplied(repo)
2319 unapplied = q.unapplied(repo)
2320 guarded = [i for i in xrange(len(q.applied))
2320 guarded = [i for i in xrange(len(q.applied))
2321 if not q.pushable(i)[0]]
2321 if not q.pushable(i)[0]]
2322 if len(unapplied) != len(old_unapplied):
2322 if len(unapplied) != len(old_unapplied):
2323 ui.status(_('number of unguarded, unapplied patches has '
2323 ui.status(_('number of unguarded, unapplied patches has '
2324 'changed from %d to %d\n') %
2324 'changed from %d to %d\n') %
2325 (len(old_unapplied), len(unapplied)))
2325 (len(old_unapplied), len(unapplied)))
2326 if len(guarded) != len(old_guarded):
2326 if len(guarded) != len(old_guarded):
2327 ui.status(_('number of guarded, applied patches has changed '
2327 ui.status(_('number of guarded, applied patches has changed '
2328 'from %d to %d\n') %
2328 'from %d to %d\n') %
2329 (len(old_guarded), len(guarded)))
2329 (len(old_guarded), len(guarded)))
2330 elif opts['series']:
2330 elif opts['series']:
2331 guards = {}
2331 guards = {}
2332 noguards = 0
2332 noguards = 0
2333 for gs in q.series_guards:
2333 for gs in q.series_guards:
2334 if not gs:
2334 if not gs:
2335 noguards += 1
2335 noguards += 1
2336 for g in gs:
2336 for g in gs:
2337 guards.setdefault(g, 0)
2337 guards.setdefault(g, 0)
2338 guards[g] += 1
2338 guards[g] += 1
2339 if ui.verbose:
2339 if ui.verbose:
2340 guards['NONE'] = noguards
2340 guards['NONE'] = noguards
2341 guards = guards.items()
2341 guards = guards.items()
2342 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2342 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2343 if guards:
2343 if guards:
2344 ui.note(_('guards in series file:\n'))
2344 ui.note(_('guards in series file:\n'))
2345 for guard, count in guards:
2345 for guard, count in guards:
2346 ui.note('%2d ' % count)
2346 ui.note('%2d ' % count)
2347 ui.write(guard, '\n')
2347 ui.write(guard, '\n')
2348 else:
2348 else:
2349 ui.note(_('no guards in series file\n'))
2349 ui.note(_('no guards in series file\n'))
2350 else:
2350 else:
2351 if guards:
2351 if guards:
2352 ui.note(_('active guards:\n'))
2352 ui.note(_('active guards:\n'))
2353 for g in guards:
2353 for g in guards:
2354 ui.write(g, '\n')
2354 ui.write(g, '\n')
2355 else:
2355 else:
2356 ui.write(_('no active guards\n'))
2356 ui.write(_('no active guards\n'))
2357 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2357 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2358 popped = False
2358 popped = False
2359 if opts['pop'] or opts['reapply']:
2359 if opts['pop'] or opts['reapply']:
2360 for i in xrange(len(q.applied)):
2360 for i in xrange(len(q.applied)):
2361 pushable, reason = q.pushable(i)
2361 pushable, reason = q.pushable(i)
2362 if not pushable:
2362 if not pushable:
2363 ui.status(_('popping guarded patches\n'))
2363 ui.status(_('popping guarded patches\n'))
2364 popped = True
2364 popped = True
2365 if i == 0:
2365 if i == 0:
2366 q.pop(repo, all=True)
2366 q.pop(repo, all=True)
2367 else:
2367 else:
2368 q.pop(repo, i-1)
2368 q.pop(repo, i-1)
2369 break
2369 break
2370 if popped:
2370 if popped:
2371 try:
2371 try:
2372 if reapply:
2372 if reapply:
2373 ui.status(_('reapplying unguarded patches\n'))
2373 ui.status(_('reapplying unguarded patches\n'))
2374 q.push(repo, reapply)
2374 q.push(repo, reapply)
2375 finally:
2375 finally:
2376 q.save_dirty()
2376 q.save_dirty()
2377
2377
2378 def finish(ui, repo, *revrange, **opts):
2378 def finish(ui, repo, *revrange, **opts):
2379 """move applied patches into repository history
2379 """move applied patches into repository history
2380
2380
2381 Finishes the specified revisions (corresponding to applied
2381 Finishes the specified revisions (corresponding to applied
2382 patches) by moving them out of mq control into regular repository
2382 patches) by moving them out of mq control into regular repository
2383 history.
2383 history.
2384
2384
2385 Accepts a revision range or the -a/--applied option. If --applied
2385 Accepts a revision range or the -a/--applied option. If --applied
2386 is specified, all applied mq revisions are removed from mq
2386 is specified, all applied mq revisions are removed from mq
2387 control. Otherwise, the given revisions must be at the base of the
2387 control. Otherwise, the given revisions must be at the base of the
2388 stack of applied patches.
2388 stack of applied patches.
2389
2389
2390 This can be especially useful if your changes have been applied to
2390 This can be especially useful if your changes have been applied to
2391 an upstream repository, or if you are about to push your changes
2391 an upstream repository, or if you are about to push your changes
2392 to upstream.
2392 to upstream.
2393 """
2393 """
2394 if not opts['applied'] and not revrange:
2394 if not opts['applied'] and not revrange:
2395 raise util.Abort(_('no revisions specified'))
2395 raise util.Abort(_('no revisions specified'))
2396 elif opts['applied']:
2396 elif opts['applied']:
2397 revrange = ('qbase:qtip',) + revrange
2397 revrange = ('qbase:qtip',) + revrange
2398
2398
2399 q = repo.mq
2399 q = repo.mq
2400 if not q.applied:
2400 if not q.applied:
2401 ui.status(_('no patches applied\n'))
2401 ui.status(_('no patches applied\n'))
2402 return 0
2402 return 0
2403
2403
2404 revs = cmdutil.revrange(repo, revrange)
2404 revs = cmdutil.revrange(repo, revrange)
2405 q.finish(repo, revs)
2405 q.finish(repo, revs)
2406 q.save_dirty()
2406 q.save_dirty()
2407 return 0
2407 return 0
2408
2408
2409 def reposetup(ui, repo):
2409 def reposetup(ui, repo):
2410 class mqrepo(repo.__class__):
2410 class mqrepo(repo.__class__):
2411 @util.propertycache
2411 @util.propertycache
2412 def mq(self):
2412 def mq(self):
2413 return queue(self.ui, self.join(""))
2413 return queue(self.ui, self.join(""))
2414
2414
2415 def abort_if_wdir_patched(self, errmsg, force=False):
2415 def abort_if_wdir_patched(self, errmsg, force=False):
2416 if self.mq.applied and not force:
2416 if self.mq.applied and not force:
2417 parent = hex(self.dirstate.parents()[0])
2417 parent = hex(self.dirstate.parents()[0])
2418 if parent in [s.rev for s in self.mq.applied]:
2418 if parent in [s.rev for s in self.mq.applied]:
2419 raise util.Abort(errmsg)
2419 raise util.Abort(errmsg)
2420
2420
2421 def commit(self, *args, **opts):
2421 def commit(self, *args, **opts):
2422 if len(args) >= 6:
2422 if len(args) >= 6:
2423 force = args[5]
2423 force = args[5]
2424 else:
2424 else:
2425 force = opts.get('force')
2425 force = opts.get('force')
2426 self.abort_if_wdir_patched(
2426 self.abort_if_wdir_patched(
2427 _('cannot commit over an applied mq patch'),
2427 _('cannot commit over an applied mq patch'),
2428 force)
2428 force)
2429
2429
2430 return super(mqrepo, self).commit(*args, **opts)
2430 return super(mqrepo, self).commit(*args, **opts)
2431
2431
2432 def push(self, remote, force=False, revs=None):
2432 def push(self, remote, force=False, revs=None):
2433 if self.mq.applied and not force and not revs:
2433 if self.mq.applied and not force and not revs:
2434 raise util.Abort(_('source has mq patches applied'))
2434 raise util.Abort(_('source has mq patches applied'))
2435 return super(mqrepo, self).push(remote, force, revs)
2435 return super(mqrepo, self).push(remote, force, revs)
2436
2436
2437 def tags(self):
2437 def tags(self):
2438 if self.tagscache:
2438 if self.tagscache:
2439 return self.tagscache
2439 return self.tagscache
2440
2440
2441 tagscache = super(mqrepo, self).tags()
2441 tagscache = super(mqrepo, self).tags()
2442
2442
2443 q = self.mq
2443 q = self.mq
2444 if not q.applied:
2444 if not q.applied:
2445 return tagscache
2445 return tagscache
2446
2446
2447 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2447 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2448
2448
2449 if mqtags[-1][0] not in self.changelog.nodemap:
2449 if mqtags[-1][0] not in self.changelog.nodemap:
2450 self.ui.warn(_('mq status file refers to unknown node %s\n')
2450 self.ui.warn(_('mq status file refers to unknown node %s\n')
2451 % short(mqtags[-1][0]))
2451 % short(mqtags[-1][0]))
2452 return tagscache
2452 return tagscache
2453
2453
2454 mqtags.append((mqtags[-1][0], 'qtip'))
2454 mqtags.append((mqtags[-1][0], 'qtip'))
2455 mqtags.append((mqtags[0][0], 'qbase'))
2455 mqtags.append((mqtags[0][0], 'qbase'))
2456 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2456 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2457 for patch in mqtags:
2457 for patch in mqtags:
2458 if patch[1] in tagscache:
2458 if patch[1] in tagscache:
2459 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2459 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2460 % patch[1])
2460 % patch[1])
2461 else:
2461 else:
2462 tagscache[patch[1]] = patch[0]
2462 tagscache[patch[1]] = patch[0]
2463
2463
2464 return tagscache
2464 return tagscache
2465
2465
2466 def _branchtags(self, partial, lrev):
2466 def _branchtags(self, partial, lrev):
2467 q = self.mq
2467 q = self.mq
2468 if not q.applied:
2468 if not q.applied:
2469 return super(mqrepo, self)._branchtags(partial, lrev)
2469 return super(mqrepo, self)._branchtags(partial, lrev)
2470
2470
2471 cl = self.changelog
2471 cl = self.changelog
2472 qbasenode = bin(q.applied[0].rev)
2472 qbasenode = bin(q.applied[0].rev)
2473 if qbasenode not in cl.nodemap:
2473 if qbasenode not in cl.nodemap:
2474 self.ui.warn(_('mq status file refers to unknown node %s\n')
2474 self.ui.warn(_('mq status file refers to unknown node %s\n')
2475 % short(qbasenode))
2475 % short(qbasenode))
2476 return super(mqrepo, self)._branchtags(partial, lrev)
2476 return super(mqrepo, self)._branchtags(partial, lrev)
2477
2477
2478 qbase = cl.rev(qbasenode)
2478 qbase = cl.rev(qbasenode)
2479 start = lrev + 1
2479 start = lrev + 1
2480 if start < qbase:
2480 if start < qbase:
2481 # update the cache (excluding the patches) and save it
2481 # update the cache (excluding the patches) and save it
2482 self._updatebranchcache(partial, lrev+1, qbase)
2482 self._updatebranchcache(partial, lrev+1, qbase)
2483 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2483 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2484 start = qbase
2484 start = qbase
2485 # if start = qbase, the cache is as updated as it should be.
2485 # if start = qbase, the cache is as updated as it should be.
2486 # if start > qbase, the cache includes (part of) the patches.
2486 # if start > qbase, the cache includes (part of) the patches.
2487 # we might as well use it, but we won't save it.
2487 # we might as well use it, but we won't save it.
2488
2488
2489 # update the cache up to the tip
2489 # update the cache up to the tip
2490 self._updatebranchcache(partial, start, len(cl))
2490 self._updatebranchcache(partial, start, len(cl))
2491
2491
2492 return partial
2492 return partial
2493
2493
2494 if repo.local():
2494 if repo.local():
2495 repo.__class__ = mqrepo
2495 repo.__class__ = mqrepo
2496
2496
2497 def mqimport(orig, ui, repo, *args, **kwargs):
2497 def mqimport(orig, ui, repo, *args, **kwargs):
2498 if hasattr(repo, 'abort_if_wdir_patched'):
2498 if hasattr(repo, 'abort_if_wdir_patched'):
2499 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2499 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2500 kwargs.get('force'))
2500 kwargs.get('force'))
2501 return orig(ui, repo, *args, **kwargs)
2501 return orig(ui, repo, *args, **kwargs)
2502
2502
2503 def uisetup(ui):
2503 def uisetup(ui):
2504 extensions.wrapcommand(commands.table, 'import', mqimport)
2504 extensions.wrapcommand(commands.table, 'import', mqimport)
2505
2505
2506 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2506 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2507
2507
2508 cmdtable = {
2508 cmdtable = {
2509 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2509 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2510 "qclone":
2510 "qclone":
2511 (clone,
2511 (clone,
2512 [('', 'pull', None, _('use pull protocol to copy metadata')),
2512 [('', 'pull', None, _('use pull protocol to copy metadata')),
2513 ('U', 'noupdate', None, _('do not update the new working directories')),
2513 ('U', 'noupdate', None, _('do not update the new working directories')),
2514 ('', 'uncompressed', None,
2514 ('', 'uncompressed', None,
2515 _('use uncompressed transfer (fast over LAN)')),
2515 _('use uncompressed transfer (fast over LAN)')),
2516 ('p', 'patches', '', _('location of source patch repository')),
2516 ('p', 'patches', '', _('location of source patch repository')),
2517 ] + commands.remoteopts,
2517 ] + commands.remoteopts,
2518 _('hg qclone [OPTION]... SOURCE [DEST]')),
2518 _('hg qclone [OPTION]... SOURCE [DEST]')),
2519 "qcommit|qci":
2519 "qcommit|qci":
2520 (commit,
2520 (commit,
2521 commands.table["^commit|ci"][1],
2521 commands.table["^commit|ci"][1],
2522 _('hg qcommit [OPTION]... [FILE]...')),
2522 _('hg qcommit [OPTION]... [FILE]...')),
2523 "^qdiff":
2523 "^qdiff":
2524 (diff,
2524 (diff,
2525 commands.diffopts + commands.diffopts2 + commands.walkopts,
2525 commands.diffopts + commands.diffopts2 + commands.walkopts,
2526 _('hg qdiff [OPTION]... [FILE]...')),
2526 _('hg qdiff [OPTION]... [FILE]...')),
2527 "qdelete|qremove|qrm":
2527 "qdelete|qremove|qrm":
2528 (delete,
2528 (delete,
2529 [('k', 'keep', None, _('keep patch file')),
2529 [('k', 'keep', None, _('keep patch file')),
2530 ('r', 'rev', [], _('stop managing a revision'))],
2530 ('r', 'rev', [], _('stop managing a revision'))],
2531 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2531 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2532 'qfold':
2532 'qfold':
2533 (fold,
2533 (fold,
2534 [('e', 'edit', None, _('edit patch header')),
2534 [('e', 'edit', None, _('edit patch header')),
2535 ('k', 'keep', None, _('keep folded patch files')),
2535 ('k', 'keep', None, _('keep folded patch files')),
2536 ] + commands.commitopts,
2536 ] + commands.commitopts,
2537 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2537 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2538 'qgoto':
2538 'qgoto':
2539 (goto,
2539 (goto,
2540 [('f', 'force', None, _('overwrite any local changes'))],
2540 [('f', 'force', None, _('overwrite any local changes'))],
2541 _('hg qgoto [OPTION]... PATCH')),
2541 _('hg qgoto [OPTION]... PATCH')),
2542 'qguard':
2542 'qguard':
2543 (guard,
2543 (guard,
2544 [('l', 'list', None, _('list all patches and guards')),
2544 [('l', 'list', None, _('list all patches and guards')),
2545 ('n', 'none', None, _('drop all guards'))],
2545 ('n', 'none', None, _('drop all guards'))],
2546 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2546 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2547 'qheader': (header, [], _('hg qheader [PATCH]')),
2547 'qheader': (header, [], _('hg qheader [PATCH]')),
2548 "^qimport":
2548 "^qimport":
2549 (qimport,
2549 (qimport,
2550 [('e', 'existing', None, _('import file in patch directory')),
2550 [('e', 'existing', None, _('import file in patch directory')),
2551 ('n', 'name', '', _('patch file name')),
2551 ('n', 'name', '', _('patch file name')),
2552 ('f', 'force', None, _('overwrite existing files')),
2552 ('f', 'force', None, _('overwrite existing files')),
2553 ('r', 'rev', [], _('place existing revisions under mq control')),
2553 ('r', 'rev', [], _('place existing revisions under mq control')),
2554 ('g', 'git', None, _('use git extended diff format')),
2554 ('g', 'git', None, _('use git extended diff format')),
2555 ('P', 'push', None, _('qpush after importing'))],
2555 ('P', 'push', None, _('qpush after importing'))],
2556 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2556 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2557 "^qinit":
2557 "^qinit":
2558 (init,
2558 (init,
2559 [('c', 'create-repo', None, _('create queue repository'))],
2559 [('c', 'create-repo', None, _('create queue repository'))],
2560 _('hg qinit [-c]')),
2560 _('hg qinit [-c]')),
2561 "qnew":
2561 "qnew":
2562 (new,
2562 (new,
2563 [('e', 'edit', None, _('edit commit message')),
2563 [('e', 'edit', None, _('edit commit message')),
2564 ('f', 'force', None, _('import uncommitted changes into patch')),
2564 ('f', 'force', None, _('import uncommitted changes into patch')),
2565 ('g', 'git', None, _('use git extended diff format')),
2565 ('g', 'git', None, _('use git extended diff format')),
2566 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2566 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2567 ('u', 'user', '', _('add "From: <given user>" to patch')),
2567 ('u', 'user', '', _('add "From: <given user>" to patch')),
2568 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2568 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2569 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2569 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2570 ] + commands.walkopts + commands.commitopts,
2570 ] + commands.walkopts + commands.commitopts,
2571 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2571 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2572 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2572 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2573 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2573 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2574 "^qpop":
2574 "^qpop":
2575 (pop,
2575 (pop,
2576 [('a', 'all', None, _('pop all patches')),
2576 [('a', 'all', None, _('pop all patches')),
2577 ('n', 'name', '', _('queue name to pop')),
2577 ('n', 'name', '', _('queue name to pop')),
2578 ('f', 'force', None, _('forget any local changes'))],
2578 ('f', 'force', None, _('forget any local changes'))],
2579 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2579 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2580 "^qpush":
2580 "^qpush":
2581 (push,
2581 (push,
2582 [('f', 'force', None, _('apply if the patch has rejects')),
2582 [('f', 'force', None, _('apply if the patch has rejects')),
2583 ('l', 'list', None, _('list patch name in commit text')),
2583 ('l', 'list', None, _('list patch name in commit text')),
2584 ('a', 'all', None, _('apply all patches')),
2584 ('a', 'all', None, _('apply all patches')),
2585 ('m', 'merge', None, _('merge from another queue')),
2585 ('m', 'merge', None, _('merge from another queue')),
2586 ('n', 'name', '', _('merge queue name'))],
2586 ('n', 'name', '', _('merge queue name'))],
2587 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2587 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2588 "^qrefresh":
2588 "^qrefresh":
2589 (refresh,
2589 (refresh,
2590 [('e', 'edit', None, _('edit commit message')),
2590 [('e', 'edit', None, _('edit commit message')),
2591 ('g', 'git', None, _('use git extended diff format')),
2591 ('g', 'git', None, _('use git extended diff format')),
2592 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2592 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2593 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2593 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2594 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2594 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2595 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2595 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2596 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2596 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2597 ] + commands.walkopts + commands.commitopts,
2597 ] + commands.walkopts + commands.commitopts,
2598 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2598 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2599 'qrename|qmv':
2599 'qrename|qmv':
2600 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2600 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2601 "qrestore":
2601 "qrestore":
2602 (restore,
2602 (restore,
2603 [('d', 'delete', None, _('delete save entry')),
2603 [('d', 'delete', None, _('delete save entry')),
2604 ('u', 'update', None, _('update queue working directory'))],
2604 ('u', 'update', None, _('update queue working directory'))],
2605 _('hg qrestore [-d] [-u] REV')),
2605 _('hg qrestore [-d] [-u] REV')),
2606 "qsave":
2606 "qsave":
2607 (save,
2607 (save,
2608 [('c', 'copy', None, _('copy patch directory')),
2608 [('c', 'copy', None, _('copy patch directory')),
2609 ('n', 'name', '', _('copy directory name')),
2609 ('n', 'name', '', _('copy directory name')),
2610 ('e', 'empty', None, _('clear queue status file')),
2610 ('e', 'empty', None, _('clear queue status file')),
2611 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2611 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2612 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2612 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2613 "qselect":
2613 "qselect":
2614 (select,
2614 (select,
2615 [('n', 'none', None, _('disable all guards')),
2615 [('n', 'none', None, _('disable all guards')),
2616 ('s', 'series', None, _('list all guards in series file')),
2616 ('s', 'series', None, _('list all guards in series file')),
2617 ('', 'pop', None, _('pop to before first guarded applied patch')),
2617 ('', 'pop', None, _('pop to before first guarded applied patch')),
2618 ('', 'reapply', None, _('pop, then reapply patches'))],
2618 ('', 'reapply', None, _('pop, then reapply patches'))],
2619 _('hg qselect [OPTION]... [GUARD]...')),
2619 _('hg qselect [OPTION]... [GUARD]...')),
2620 "qseries":
2620 "qseries":
2621 (series,
2621 (series,
2622 [('m', 'missing', None, _('print patches not in series')),
2622 [('m', 'missing', None, _('print patches not in series')),
2623 ] + seriesopts,
2623 ] + seriesopts,
2624 _('hg qseries [-ms]')),
2624 _('hg qseries [-ms]')),
2625 "^strip":
2625 "^strip":
2626 (strip,
2626 (strip,
2627 [('f', 'force', None, _('force removal with local changes')),
2627 [('f', 'force', None, _('force removal with local changes')),
2628 ('b', 'backup', None, _('bundle unrelated changesets')),
2628 ('b', 'backup', None, _('bundle unrelated changesets')),
2629 ('n', 'nobackup', None, _('no backups'))],
2629 ('n', 'nobackup', None, _('no backups'))],
2630 _('hg strip [-f] [-b] [-n] REV')),
2630 _('hg strip [-f] [-b] [-n] REV')),
2631 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2631 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2632 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2632 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2633 "qfinish":
2633 "qfinish":
2634 (finish,
2634 (finish,
2635 [('a', 'applied', None, _('finish all applied changesets'))],
2635 [('a', 'applied', None, _('finish all applied changesets'))],
2636 _('hg qfinish [-a] [REV...]')),
2636 _('hg qfinish [-a] [REV...]')),
2637 }
2637 }
@@ -1,145 +1,145 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2, incorporated herein by reference.
7 # GNU General Public License version 2, incorporated herein by reference.
8
8
9 import changegroup
9 import changegroup
10 from node import nullrev, short
10 from node import nullrev, short
11 from i18n import _
11 from i18n import _
12 import os
12 import os
13
13
14 def _bundle(repo, bases, heads, node, suffix, extranodes=None):
14 def _bundle(repo, bases, heads, node, suffix, extranodes=None):
15 """create a bundle with the specified revisions as a backup"""
15 """create a bundle with the specified revisions as a backup"""
16 cg = repo.changegroupsubset(bases, heads, 'strip', extranodes)
16 cg = repo.changegroupsubset(bases, heads, 'strip', extranodes)
17 backupdir = repo.join("strip-backup")
17 backupdir = repo.join("strip-backup")
18 if not os.path.isdir(backupdir):
18 if not os.path.isdir(backupdir):
19 os.mkdir(backupdir)
19 os.mkdir(backupdir)
20 name = os.path.join(backupdir, "%s-%s" % (short(node), suffix))
20 name = os.path.join(backupdir, "%s-%s" % (short(node), suffix))
21 repo.ui.warn(_("saving bundle to %s\n") % name)
21 repo.ui.warn(_("saving bundle to %s\n") % name)
22 return changegroup.writebundle(cg, name, "HG10BZ")
22 return changegroup.writebundle(cg, name, "HG10BZ")
23
23
24 def _collectfiles(repo, striprev):
24 def _collectfiles(repo, striprev):
25 """find out the filelogs affected by the strip"""
25 """find out the filelogs affected by the strip"""
26 files = set()
26 files = set()
27
27
28 for x in xrange(striprev, len(repo)):
28 for x in xrange(striprev, len(repo)):
29 files.update(repo[x].files())
29 files.update(repo[x].files())
30
30
31 return sorted(files)
31 return sorted(files)
32
32
33 def _collectextranodes(repo, files, link):
33 def _collectextranodes(repo, files, link):
34 """return the nodes that have to be saved before the strip"""
34 """return the nodes that have to be saved before the strip"""
35 def collectone(revlog):
35 def collectone(revlog):
36 extra = []
36 extra = []
37 startrev = count = len(revlog)
37 startrev = count = len(revlog)
38 # find the truncation point of the revlog
38 # find the truncation point of the revlog
39 for i in xrange(0, count):
39 for i in xrange(count):
40 lrev = revlog.linkrev(i)
40 lrev = revlog.linkrev(i)
41 if lrev >= link:
41 if lrev >= link:
42 startrev = i + 1
42 startrev = i + 1
43 break
43 break
44
44
45 # see if any revision after that point has a linkrev less than link
45 # see if any revision after that point has a linkrev less than link
46 # (we have to manually save these guys)
46 # (we have to manually save these guys)
47 for i in xrange(startrev, count):
47 for i in xrange(startrev, count):
48 node = revlog.node(i)
48 node = revlog.node(i)
49 lrev = revlog.linkrev(i)
49 lrev = revlog.linkrev(i)
50 if lrev < link:
50 if lrev < link:
51 extra.append((node, cl.node(lrev)))
51 extra.append((node, cl.node(lrev)))
52
52
53 return extra
53 return extra
54
54
55 extranodes = {}
55 extranodes = {}
56 cl = repo.changelog
56 cl = repo.changelog
57 extra = collectone(repo.manifest)
57 extra = collectone(repo.manifest)
58 if extra:
58 if extra:
59 extranodes[1] = extra
59 extranodes[1] = extra
60 for fname in files:
60 for fname in files:
61 f = repo.file(fname)
61 f = repo.file(fname)
62 extra = collectone(f)
62 extra = collectone(f)
63 if extra:
63 if extra:
64 extranodes[fname] = extra
64 extranodes[fname] = extra
65
65
66 return extranodes
66 return extranodes
67
67
68 def strip(ui, repo, node, backup="all"):
68 def strip(ui, repo, node, backup="all"):
69 cl = repo.changelog
69 cl = repo.changelog
70 # TODO delete the undo files, and handle undo of merge sets
70 # TODO delete the undo files, and handle undo of merge sets
71 striprev = cl.rev(node)
71 striprev = cl.rev(node)
72
72
73 # Some revisions with rev > striprev may not be descendants of striprev.
73 # Some revisions with rev > striprev may not be descendants of striprev.
74 # We have to find these revisions and put them in a bundle, so that
74 # We have to find these revisions and put them in a bundle, so that
75 # we can restore them after the truncations.
75 # we can restore them after the truncations.
76 # To create the bundle we use repo.changegroupsubset which requires
76 # To create the bundle we use repo.changegroupsubset which requires
77 # the list of heads and bases of the set of interesting revisions.
77 # the list of heads and bases of the set of interesting revisions.
78 # (head = revision in the set that has no descendant in the set;
78 # (head = revision in the set that has no descendant in the set;
79 # base = revision in the set that has no ancestor in the set)
79 # base = revision in the set that has no ancestor in the set)
80 tostrip = set((striprev,))
80 tostrip = set((striprev,))
81 saveheads = set()
81 saveheads = set()
82 savebases = []
82 savebases = []
83 for r in xrange(striprev + 1, len(cl)):
83 for r in xrange(striprev + 1, len(cl)):
84 parents = cl.parentrevs(r)
84 parents = cl.parentrevs(r)
85 if parents[0] in tostrip or parents[1] in tostrip:
85 if parents[0] in tostrip or parents[1] in tostrip:
86 # r is a descendant of striprev
86 # r is a descendant of striprev
87 tostrip.add(r)
87 tostrip.add(r)
88 # if this is a merge and one of the parents does not descend
88 # if this is a merge and one of the parents does not descend
89 # from striprev, mark that parent as a savehead.
89 # from striprev, mark that parent as a savehead.
90 if parents[1] != nullrev:
90 if parents[1] != nullrev:
91 for p in parents:
91 for p in parents:
92 if p not in tostrip and p > striprev:
92 if p not in tostrip and p > striprev:
93 saveheads.add(p)
93 saveheads.add(p)
94 else:
94 else:
95 # if no parents of this revision will be stripped, mark it as
95 # if no parents of this revision will be stripped, mark it as
96 # a savebase
96 # a savebase
97 if parents[0] < striprev and parents[1] < striprev:
97 if parents[0] < striprev and parents[1] < striprev:
98 savebases.append(cl.node(r))
98 savebases.append(cl.node(r))
99
99
100 saveheads.difference_update(parents)
100 saveheads.difference_update(parents)
101 saveheads.add(r)
101 saveheads.add(r)
102
102
103 saveheads = [cl.node(r) for r in saveheads]
103 saveheads = [cl.node(r) for r in saveheads]
104 files = _collectfiles(repo, striprev)
104 files = _collectfiles(repo, striprev)
105
105
106 extranodes = _collectextranodes(repo, files, striprev)
106 extranodes = _collectextranodes(repo, files, striprev)
107
107
108 # create a changegroup for all the branches we need to keep
108 # create a changegroup for all the branches we need to keep
109 if backup == "all":
109 if backup == "all":
110 _bundle(repo, [node], cl.heads(), node, 'backup')
110 _bundle(repo, [node], cl.heads(), node, 'backup')
111 if saveheads or extranodes:
111 if saveheads or extranodes:
112 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
112 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
113 extranodes)
113 extranodes)
114
114
115 fs = [repo.file(name) for name in files]
115 fs = [repo.file(name) for name in files]
116 mfst = repo.manifest
116 mfst = repo.manifest
117
117
118 tr = repo.transaction()
118 tr = repo.transaction()
119 offset = len(tr.entries)
119 offset = len(tr.entries)
120
120
121 tr.startgroup()
121 tr.startgroup()
122 cl.strip(striprev, tr)
122 cl.strip(striprev, tr)
123 mfst.strip(striprev, tr)
123 mfst.strip(striprev, tr)
124 for f in fs:
124 for f in fs:
125 f.strip(striprev, tr)
125 f.strip(striprev, tr)
126 tr.endgroup()
126 tr.endgroup()
127
127
128 try:
128 try:
129 for i in xrange(offset, len(tr.entries)):
129 for i in xrange(offset, len(tr.entries)):
130 file, troffset, ignore = tr.entries[i]
130 file, troffset, ignore = tr.entries[i]
131 repo.sopener(file, 'a').truncate(troffset)
131 repo.sopener(file, 'a').truncate(troffset)
132 tr.close()
132 tr.close()
133 except:
133 except:
134 tr.abort()
134 tr.abort()
135 raise
135 raise
136
136
137 if saveheads or extranodes:
137 if saveheads or extranodes:
138 ui.status(_("adding branch\n"))
138 ui.status(_("adding branch\n"))
139 f = open(chgrpfile, "rb")
139 f = open(chgrpfile, "rb")
140 gen = changegroup.readbundle(f, chgrpfile)
140 gen = changegroup.readbundle(f, chgrpfile)
141 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
141 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
142 f.close()
142 f.close()
143 if backup != "strip":
143 if backup != "strip":
144 os.unlink(chgrpfile)
144 os.unlink(chgrpfile)
145
145
@@ -1,1390 +1,1390 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 from i18n import _
16 from i18n import _
17 import changegroup, ancestor, mdiff, parsers, error, util
17 import changegroup, ancestor, mdiff, parsers, error, util
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog flags
26 # revlog flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
30 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FORMAT = REVLOGNG
31 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
32 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33
33
34 _prereadsize = 1048576
34 _prereadsize = 1048576
35
35
36 RevlogError = error.RevlogError
36 RevlogError = error.RevlogError
37 LookupError = error.LookupError
37 LookupError = error.LookupError
38
38
39 def getoffset(q):
39 def getoffset(q):
40 return int(q >> 16)
40 return int(q >> 16)
41
41
42 def gettype(q):
42 def gettype(q):
43 return int(q & 0xFFFF)
43 return int(q & 0xFFFF)
44
44
45 def offset_type(offset, type):
45 def offset_type(offset, type):
46 return long(long(offset) << 16 | type)
46 return long(long(offset) << 16 | type)
47
47
48 nullhash = _sha(nullid)
48 nullhash = _sha(nullid)
49
49
50 def hash(text, p1, p2):
50 def hash(text, p1, p2):
51 """generate a hash from the given text and its parent hashes
51 """generate a hash from the given text and its parent hashes
52
52
53 This hash combines both the current file contents and its history
53 This hash combines both the current file contents and its history
54 in a manner that makes it easy to distinguish nodes with the same
54 in a manner that makes it easy to distinguish nodes with the same
55 content in the revision graph.
55 content in the revision graph.
56 """
56 """
57 # As of now, if one of the parent node is null, p2 is null
57 # As of now, if one of the parent node is null, p2 is null
58 if p2 == nullid:
58 if p2 == nullid:
59 # deep copy of a hash is faster than creating one
59 # deep copy of a hash is faster than creating one
60 s = nullhash.copy()
60 s = nullhash.copy()
61 s.update(p1)
61 s.update(p1)
62 else:
62 else:
63 # none of the parent nodes are nullid
63 # none of the parent nodes are nullid
64 l = [p1, p2]
64 l = [p1, p2]
65 l.sort()
65 l.sort()
66 s = _sha(l[0])
66 s = _sha(l[0])
67 s.update(l[1])
67 s.update(l[1])
68 s.update(text)
68 s.update(text)
69 return s.digest()
69 return s.digest()
70
70
71 def compress(text):
71 def compress(text):
72 """ generate a possibly-compressed representation of text """
72 """ generate a possibly-compressed representation of text """
73 if not text:
73 if not text:
74 return ("", text)
74 return ("", text)
75 l = len(text)
75 l = len(text)
76 bin = None
76 bin = None
77 if l < 44:
77 if l < 44:
78 pass
78 pass
79 elif l > 1000000:
79 elif l > 1000000:
80 # zlib makes an internal copy, thus doubling memory usage for
80 # zlib makes an internal copy, thus doubling memory usage for
81 # large files, so lets do this in pieces
81 # large files, so lets do this in pieces
82 z = zlib.compressobj()
82 z = zlib.compressobj()
83 p = []
83 p = []
84 pos = 0
84 pos = 0
85 while pos < l:
85 while pos < l:
86 pos2 = pos + 2**20
86 pos2 = pos + 2**20
87 p.append(z.compress(text[pos:pos2]))
87 p.append(z.compress(text[pos:pos2]))
88 pos = pos2
88 pos = pos2
89 p.append(z.flush())
89 p.append(z.flush())
90 if sum(map(len, p)) < l:
90 if sum(map(len, p)) < l:
91 bin = "".join(p)
91 bin = "".join(p)
92 else:
92 else:
93 bin = _compress(text)
93 bin = _compress(text)
94 if bin is None or len(bin) > l:
94 if bin is None or len(bin) > l:
95 if text[0] == '\0':
95 if text[0] == '\0':
96 return ("", text)
96 return ("", text)
97 return ('u', text)
97 return ('u', text)
98 return ("", bin)
98 return ("", bin)
99
99
100 def decompress(bin):
100 def decompress(bin):
101 """ decompress the given input """
101 """ decompress the given input """
102 if not bin:
102 if not bin:
103 return bin
103 return bin
104 t = bin[0]
104 t = bin[0]
105 if t == '\0':
105 if t == '\0':
106 return bin
106 return bin
107 if t == 'x':
107 if t == 'x':
108 return _decompress(bin)
108 return _decompress(bin)
109 if t == 'u':
109 if t == 'u':
110 return bin[1:]
110 return bin[1:]
111 raise RevlogError(_("unknown compression type %r") % t)
111 raise RevlogError(_("unknown compression type %r") % t)
112
112
113 class lazyparser(object):
113 class lazyparser(object):
114 """
114 """
115 this class avoids the need to parse the entirety of large indices
115 this class avoids the need to parse the entirety of large indices
116 """
116 """
117
117
118 # lazyparser is not safe to use on windows if win32 extensions not
118 # lazyparser is not safe to use on windows if win32 extensions not
119 # available. it keeps file handle open, which make it not possible
119 # available. it keeps file handle open, which make it not possible
120 # to break hardlinks on local cloned repos.
120 # to break hardlinks on local cloned repos.
121
121
122 def __init__(self, dataf, size):
122 def __init__(self, dataf, size):
123 self.dataf = dataf
123 self.dataf = dataf
124 self.s = struct.calcsize(indexformatng)
124 self.s = struct.calcsize(indexformatng)
125 self.datasize = size
125 self.datasize = size
126 self.l = size/self.s
126 self.l = size/self.s
127 self.index = [None] * self.l
127 self.index = [None] * self.l
128 self.map = {nullid: nullrev}
128 self.map = {nullid: nullrev}
129 self.allmap = 0
129 self.allmap = 0
130 self.all = 0
130 self.all = 0
131 self.mapfind_count = 0
131 self.mapfind_count = 0
132
132
133 def loadmap(self):
133 def loadmap(self):
134 """
134 """
135 during a commit, we need to make sure the rev being added is
135 during a commit, we need to make sure the rev being added is
136 not a duplicate. This requires loading the entire index,
136 not a duplicate. This requires loading the entire index,
137 which is fairly slow. loadmap can load up just the node map,
137 which is fairly slow. loadmap can load up just the node map,
138 which takes much less time.
138 which takes much less time.
139 """
139 """
140 if self.allmap:
140 if self.allmap:
141 return
141 return
142 end = self.datasize
142 end = self.datasize
143 self.allmap = 1
143 self.allmap = 1
144 cur = 0
144 cur = 0
145 count = 0
145 count = 0
146 blocksize = self.s * 256
146 blocksize = self.s * 256
147 self.dataf.seek(0)
147 self.dataf.seek(0)
148 while cur < end:
148 while cur < end:
149 data = self.dataf.read(blocksize)
149 data = self.dataf.read(blocksize)
150 off = 0
150 off = 0
151 for x in xrange(256):
151 for x in xrange(256):
152 n = data[off + ngshaoffset:off + ngshaoffset + 20]
152 n = data[off + ngshaoffset:off + ngshaoffset + 20]
153 self.map[n] = count
153 self.map[n] = count
154 count += 1
154 count += 1
155 if count >= self.l:
155 if count >= self.l:
156 break
156 break
157 off += self.s
157 off += self.s
158 cur += blocksize
158 cur += blocksize
159
159
160 def loadblock(self, blockstart, blocksize, data=None):
160 def loadblock(self, blockstart, blocksize, data=None):
161 if self.all:
161 if self.all:
162 return
162 return
163 if data is None:
163 if data is None:
164 self.dataf.seek(blockstart)
164 self.dataf.seek(blockstart)
165 if blockstart + blocksize > self.datasize:
165 if blockstart + blocksize > self.datasize:
166 # the revlog may have grown since we've started running,
166 # the revlog may have grown since we've started running,
167 # but we don't have space in self.index for more entries.
167 # but we don't have space in self.index for more entries.
168 # limit blocksize so that we don't get too much data.
168 # limit blocksize so that we don't get too much data.
169 blocksize = max(self.datasize - blockstart, 0)
169 blocksize = max(self.datasize - blockstart, 0)
170 data = self.dataf.read(blocksize)
170 data = self.dataf.read(blocksize)
171 lend = len(data) / self.s
171 lend = len(data) / self.s
172 i = blockstart / self.s
172 i = blockstart / self.s
173 off = 0
173 off = 0
174 # lazyindex supports __delitem__
174 # lazyindex supports __delitem__
175 if lend > len(self.index) - i:
175 if lend > len(self.index) - i:
176 lend = len(self.index) - i
176 lend = len(self.index) - i
177 for x in xrange(lend):
177 for x in xrange(lend):
178 if self.index[i + x] is None:
178 if self.index[i + x] is None:
179 b = data[off : off + self.s]
179 b = data[off : off + self.s]
180 self.index[i + x] = b
180 self.index[i + x] = b
181 n = b[ngshaoffset:ngshaoffset + 20]
181 n = b[ngshaoffset:ngshaoffset + 20]
182 self.map[n] = i + x
182 self.map[n] = i + x
183 off += self.s
183 off += self.s
184
184
185 def findnode(self, node):
185 def findnode(self, node):
186 """search backwards through the index file for a specific node"""
186 """search backwards through the index file for a specific node"""
187 if self.allmap:
187 if self.allmap:
188 return None
188 return None
189
189
190 # hg log will cause many many searches for the manifest
190 # hg log will cause many many searches for the manifest
191 # nodes. After we get called a few times, just load the whole
191 # nodes. After we get called a few times, just load the whole
192 # thing.
192 # thing.
193 if self.mapfind_count > 8:
193 if self.mapfind_count > 8:
194 self.loadmap()
194 self.loadmap()
195 if node in self.map:
195 if node in self.map:
196 return node
196 return node
197 return None
197 return None
198 self.mapfind_count += 1
198 self.mapfind_count += 1
199 last = self.l - 1
199 last = self.l - 1
200 while self.index[last] != None:
200 while self.index[last] != None:
201 if last == 0:
201 if last == 0:
202 self.all = 1
202 self.all = 1
203 self.allmap = 1
203 self.allmap = 1
204 return None
204 return None
205 last -= 1
205 last -= 1
206 end = (last + 1) * self.s
206 end = (last + 1) * self.s
207 blocksize = self.s * 256
207 blocksize = self.s * 256
208 while end >= 0:
208 while end >= 0:
209 start = max(end - blocksize, 0)
209 start = max(end - blocksize, 0)
210 self.dataf.seek(start)
210 self.dataf.seek(start)
211 data = self.dataf.read(end - start)
211 data = self.dataf.read(end - start)
212 findend = end - start
212 findend = end - start
213 while True:
213 while True:
214 # we're searching backwards, so we have to make sure
214 # we're searching backwards, so we have to make sure
215 # we don't find a changeset where this node is a parent
215 # we don't find a changeset where this node is a parent
216 off = data.find(node, 0, findend)
216 off = data.find(node, 0, findend)
217 findend = off
217 findend = off
218 if off >= 0:
218 if off >= 0:
219 i = off / self.s
219 i = off / self.s
220 off = i * self.s
220 off = i * self.s
221 n = data[off + ngshaoffset:off + ngshaoffset + 20]
221 n = data[off + ngshaoffset:off + ngshaoffset + 20]
222 if n == node:
222 if n == node:
223 self.map[n] = i + start / self.s
223 self.map[n] = i + start / self.s
224 return node
224 return node
225 else:
225 else:
226 break
226 break
227 end -= blocksize
227 end -= blocksize
228 return None
228 return None
229
229
230 def loadindex(self, i=None, end=None):
230 def loadindex(self, i=None, end=None):
231 if self.all:
231 if self.all:
232 return
232 return
233 all = False
233 all = False
234 if i is None:
234 if i is None:
235 blockstart = 0
235 blockstart = 0
236 blocksize = (65536 / self.s) * self.s
236 blocksize = (65536 / self.s) * self.s
237 end = self.datasize
237 end = self.datasize
238 all = True
238 all = True
239 else:
239 else:
240 if end:
240 if end:
241 blockstart = i * self.s
241 blockstart = i * self.s
242 end = end * self.s
242 end = end * self.s
243 blocksize = end - blockstart
243 blocksize = end - blockstart
244 else:
244 else:
245 blockstart = (i & ~1023) * self.s
245 blockstart = (i & ~1023) * self.s
246 blocksize = self.s * 1024
246 blocksize = self.s * 1024
247 end = blockstart + blocksize
247 end = blockstart + blocksize
248 while blockstart < end:
248 while blockstart < end:
249 self.loadblock(blockstart, blocksize)
249 self.loadblock(blockstart, blocksize)
250 blockstart += blocksize
250 blockstart += blocksize
251 if all:
251 if all:
252 self.all = True
252 self.all = True
253
253
254 class lazyindex(object):
254 class lazyindex(object):
255 """a lazy version of the index array"""
255 """a lazy version of the index array"""
256 def __init__(self, parser):
256 def __init__(self, parser):
257 self.p = parser
257 self.p = parser
258 def __len__(self):
258 def __len__(self):
259 return len(self.p.index)
259 return len(self.p.index)
260 def load(self, pos):
260 def load(self, pos):
261 if pos < 0:
261 if pos < 0:
262 pos += len(self.p.index)
262 pos += len(self.p.index)
263 self.p.loadindex(pos)
263 self.p.loadindex(pos)
264 return self.p.index[pos]
264 return self.p.index[pos]
265 def __getitem__(self, pos):
265 def __getitem__(self, pos):
266 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
266 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
267 def __setitem__(self, pos, item):
267 def __setitem__(self, pos, item):
268 self.p.index[pos] = _pack(indexformatng, *item)
268 self.p.index[pos] = _pack(indexformatng, *item)
269 def __delitem__(self, pos):
269 def __delitem__(self, pos):
270 del self.p.index[pos]
270 del self.p.index[pos]
271 def insert(self, pos, e):
271 def insert(self, pos, e):
272 self.p.index.insert(pos, _pack(indexformatng, *e))
272 self.p.index.insert(pos, _pack(indexformatng, *e))
273 def append(self, e):
273 def append(self, e):
274 self.p.index.append(_pack(indexformatng, *e))
274 self.p.index.append(_pack(indexformatng, *e))
275
275
276 class lazymap(object):
276 class lazymap(object):
277 """a lazy version of the node map"""
277 """a lazy version of the node map"""
278 def __init__(self, parser):
278 def __init__(self, parser):
279 self.p = parser
279 self.p = parser
280 def load(self, key):
280 def load(self, key):
281 n = self.p.findnode(key)
281 n = self.p.findnode(key)
282 if n is None:
282 if n is None:
283 raise KeyError(key)
283 raise KeyError(key)
284 def __contains__(self, key):
284 def __contains__(self, key):
285 if key in self.p.map:
285 if key in self.p.map:
286 return True
286 return True
287 self.p.loadmap()
287 self.p.loadmap()
288 return key in self.p.map
288 return key in self.p.map
289 def __iter__(self):
289 def __iter__(self):
290 yield nullid
290 yield nullid
291 for i in xrange(self.p.l):
291 for i in xrange(self.p.l):
292 ret = self.p.index[i]
292 ret = self.p.index[i]
293 if not ret:
293 if not ret:
294 self.p.loadindex(i)
294 self.p.loadindex(i)
295 ret = self.p.index[i]
295 ret = self.p.index[i]
296 if isinstance(ret, str):
296 if isinstance(ret, str):
297 ret = _unpack(indexformatng, ret)
297 ret = _unpack(indexformatng, ret)
298 yield ret[7]
298 yield ret[7]
299 def __getitem__(self, key):
299 def __getitem__(self, key):
300 try:
300 try:
301 return self.p.map[key]
301 return self.p.map[key]
302 except KeyError:
302 except KeyError:
303 try:
303 try:
304 self.load(key)
304 self.load(key)
305 return self.p.map[key]
305 return self.p.map[key]
306 except KeyError:
306 except KeyError:
307 raise KeyError("node " + hex(key))
307 raise KeyError("node " + hex(key))
308 def __setitem__(self, key, val):
308 def __setitem__(self, key, val):
309 self.p.map[key] = val
309 self.p.map[key] = val
310 def __delitem__(self, key):
310 def __delitem__(self, key):
311 del self.p.map[key]
311 del self.p.map[key]
312
312
313 indexformatv0 = ">4l20s20s20s"
313 indexformatv0 = ">4l20s20s20s"
314 v0shaoffset = 56
314 v0shaoffset = 56
315
315
316 class revlogoldio(object):
316 class revlogoldio(object):
317 def __init__(self):
317 def __init__(self):
318 self.size = struct.calcsize(indexformatv0)
318 self.size = struct.calcsize(indexformatv0)
319
319
320 def parseindex(self, fp, data, inline):
320 def parseindex(self, fp, data, inline):
321 s = self.size
321 s = self.size
322 index = []
322 index = []
323 nodemap = {nullid: nullrev}
323 nodemap = {nullid: nullrev}
324 n = off = 0
324 n = off = 0
325 if len(data) == _prereadsize:
325 if len(data) == _prereadsize:
326 data += fp.read() # read the rest
326 data += fp.read() # read the rest
327 l = len(data)
327 l = len(data)
328 while off + s <= l:
328 while off + s <= l:
329 cur = data[off:off + s]
329 cur = data[off:off + s]
330 off += s
330 off += s
331 e = _unpack(indexformatv0, cur)
331 e = _unpack(indexformatv0, cur)
332 # transform to revlogv1 format
332 # transform to revlogv1 format
333 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
333 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
334 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
334 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
335 index.append(e2)
335 index.append(e2)
336 nodemap[e[6]] = n
336 nodemap[e[6]] = n
337 n += 1
337 n += 1
338
338
339 return index, nodemap, None
339 return index, nodemap, None
340
340
341 def packentry(self, entry, node, version, rev):
341 def packentry(self, entry, node, version, rev):
342 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
342 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
343 node(entry[5]), node(entry[6]), entry[7])
343 node(entry[5]), node(entry[6]), entry[7])
344 return _pack(indexformatv0, *e2)
344 return _pack(indexformatv0, *e2)
345
345
346 # index ng:
346 # index ng:
347 # 6 bytes offset
347 # 6 bytes offset
348 # 2 bytes flags
348 # 2 bytes flags
349 # 4 bytes compressed length
349 # 4 bytes compressed length
350 # 4 bytes uncompressed length
350 # 4 bytes uncompressed length
351 # 4 bytes: base rev
351 # 4 bytes: base rev
352 # 4 bytes link rev
352 # 4 bytes link rev
353 # 4 bytes parent 1 rev
353 # 4 bytes parent 1 rev
354 # 4 bytes parent 2 rev
354 # 4 bytes parent 2 rev
355 # 32 bytes: nodeid
355 # 32 bytes: nodeid
356 indexformatng = ">Qiiiiii20s12x"
356 indexformatng = ">Qiiiiii20s12x"
357 ngshaoffset = 32
357 ngshaoffset = 32
358 versionformat = ">I"
358 versionformat = ">I"
359
359
360 class revlogio(object):
360 class revlogio(object):
361 def __init__(self):
361 def __init__(self):
362 self.size = struct.calcsize(indexformatng)
362 self.size = struct.calcsize(indexformatng)
363
363
364 def parseindex(self, fp, data, inline):
364 def parseindex(self, fp, data, inline):
365 size = len(data)
365 size = len(data)
366 if size == _prereadsize:
366 if size == _prereadsize:
367 if util.openhardlinks() and not inline:
367 if util.openhardlinks() and not inline:
368 try:
368 try:
369 size = util.fstat(fp).st_size
369 size = util.fstat(fp).st_size
370 except AttributeError:
370 except AttributeError:
371 size = 0
371 size = 0
372 # big index, let's parse it on demand
372 # big index, let's parse it on demand
373 parser = lazyparser(fp, size)
373 parser = lazyparser(fp, size)
374 index = lazyindex(parser)
374 index = lazyindex(parser)
375 nodemap = lazymap(parser)
375 nodemap = lazymap(parser)
376 e = list(index[0])
376 e = list(index[0])
377 type = gettype(e[0])
377 type = gettype(e[0])
378 e[0] = offset_type(0, type)
378 e[0] = offset_type(0, type)
379 index[0] = e
379 index[0] = e
380 return index, nodemap, None
380 return index, nodemap, None
381 else:
381 else:
382 data += fp.read()
382 data += fp.read()
383
383
384 # call the C implementation to parse the index data
384 # call the C implementation to parse the index data
385 index, nodemap, cache = parsers.parse_index(data, inline)
385 index, nodemap, cache = parsers.parse_index(data, inline)
386 return index, nodemap, cache
386 return index, nodemap, cache
387
387
388 def packentry(self, entry, node, version, rev):
388 def packentry(self, entry, node, version, rev):
389 p = _pack(indexformatng, *entry)
389 p = _pack(indexformatng, *entry)
390 if rev == 0:
390 if rev == 0:
391 p = _pack(versionformat, version) + p[4:]
391 p = _pack(versionformat, version) + p[4:]
392 return p
392 return p
393
393
394 class revlog(object):
394 class revlog(object):
395 """
395 """
396 the underlying revision storage object
396 the underlying revision storage object
397
397
398 A revlog consists of two parts, an index and the revision data.
398 A revlog consists of two parts, an index and the revision data.
399
399
400 The index is a file with a fixed record size containing
400 The index is a file with a fixed record size containing
401 information on each revision, including its nodeid (hash), the
401 information on each revision, including its nodeid (hash), the
402 nodeids of its parents, the position and offset of its data within
402 nodeids of its parents, the position and offset of its data within
403 the data file, and the revision it's based on. Finally, each entry
403 the data file, and the revision it's based on. Finally, each entry
404 contains a linkrev entry that can serve as a pointer to external
404 contains a linkrev entry that can serve as a pointer to external
405 data.
405 data.
406
406
407 The revision data itself is a linear collection of data chunks.
407 The revision data itself is a linear collection of data chunks.
408 Each chunk represents a revision and is usually represented as a
408 Each chunk represents a revision and is usually represented as a
409 delta against the previous chunk. To bound lookup time, runs of
409 delta against the previous chunk. To bound lookup time, runs of
410 deltas are limited to about 2 times the length of the original
410 deltas are limited to about 2 times the length of the original
411 version data. This makes retrieval of a version proportional to
411 version data. This makes retrieval of a version proportional to
412 its size, or O(1) relative to the number of revisions.
412 its size, or O(1) relative to the number of revisions.
413
413
414 Both pieces of the revlog are written to in an append-only
414 Both pieces of the revlog are written to in an append-only
415 fashion, which means we never need to rewrite a file to insert or
415 fashion, which means we never need to rewrite a file to insert or
416 remove data, and can use some simple techniques to avoid the need
416 remove data, and can use some simple techniques to avoid the need
417 for locking while reading.
417 for locking while reading.
418 """
418 """
419 def __init__(self, opener, indexfile):
419 def __init__(self, opener, indexfile):
420 """
420 """
421 create a revlog object
421 create a revlog object
422
422
423 opener is a function that abstracts the file opening operation
423 opener is a function that abstracts the file opening operation
424 and can be used to implement COW semantics or the like.
424 and can be used to implement COW semantics or the like.
425 """
425 """
426 self.indexfile = indexfile
426 self.indexfile = indexfile
427 self.datafile = indexfile[:-2] + ".d"
427 self.datafile = indexfile[:-2] + ".d"
428 self.opener = opener
428 self.opener = opener
429 self._cache = None
429 self._cache = None
430 self._chunkcache = (0, '')
430 self._chunkcache = (0, '')
431 self.nodemap = {nullid: nullrev}
431 self.nodemap = {nullid: nullrev}
432 self.index = []
432 self.index = []
433
433
434 v = REVLOG_DEFAULT_VERSION
434 v = REVLOG_DEFAULT_VERSION
435 if hasattr(opener, "defversion"):
435 if hasattr(opener, "defversion"):
436 v = opener.defversion
436 v = opener.defversion
437 if v & REVLOGNG:
437 if v & REVLOGNG:
438 v |= REVLOGNGINLINEDATA
438 v |= REVLOGNGINLINEDATA
439
439
440 i = ''
440 i = ''
441 try:
441 try:
442 f = self.opener(self.indexfile)
442 f = self.opener(self.indexfile)
443 i = f.read(_prereadsize)
443 i = f.read(_prereadsize)
444 if len(i) > 0:
444 if len(i) > 0:
445 v = struct.unpack(versionformat, i[:4])[0]
445 v = struct.unpack(versionformat, i[:4])[0]
446 except IOError, inst:
446 except IOError, inst:
447 if inst.errno != errno.ENOENT:
447 if inst.errno != errno.ENOENT:
448 raise
448 raise
449
449
450 self.version = v
450 self.version = v
451 self._inline = v & REVLOGNGINLINEDATA
451 self._inline = v & REVLOGNGINLINEDATA
452 flags = v & ~0xFFFF
452 flags = v & ~0xFFFF
453 fmt = v & 0xFFFF
453 fmt = v & 0xFFFF
454 if fmt == REVLOGV0 and flags:
454 if fmt == REVLOGV0 and flags:
455 raise RevlogError(_("index %s unknown flags %#04x for format v0")
455 raise RevlogError(_("index %s unknown flags %#04x for format v0")
456 % (self.indexfile, flags >> 16))
456 % (self.indexfile, flags >> 16))
457 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
457 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
458 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
458 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
459 % (self.indexfile, flags >> 16))
459 % (self.indexfile, flags >> 16))
460 elif fmt > REVLOGNG:
460 elif fmt > REVLOGNG:
461 raise RevlogError(_("index %s unknown format %d")
461 raise RevlogError(_("index %s unknown format %d")
462 % (self.indexfile, fmt))
462 % (self.indexfile, fmt))
463
463
464 self._io = revlogio()
464 self._io = revlogio()
465 if self.version == REVLOGV0:
465 if self.version == REVLOGV0:
466 self._io = revlogoldio()
466 self._io = revlogoldio()
467 if i:
467 if i:
468 try:
468 try:
469 d = self._io.parseindex(f, i, self._inline)
469 d = self._io.parseindex(f, i, self._inline)
470 except (ValueError, IndexError), e:
470 except (ValueError, IndexError), e:
471 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
471 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
472 self.index, self.nodemap, self._chunkcache = d
472 self.index, self.nodemap, self._chunkcache = d
473 if not self._chunkcache:
473 if not self._chunkcache:
474 self._chunkcache = (0, '')
474 self._chunkcache = (0, '')
475
475
476 # add the magic null revision at -1 (if it hasn't been done already)
476 # add the magic null revision at -1 (if it hasn't been done already)
477 if (self.index == [] or isinstance(self.index, lazyindex) or
477 if (self.index == [] or isinstance(self.index, lazyindex) or
478 self.index[-1][7] != nullid) :
478 self.index[-1][7] != nullid) :
479 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
479 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
480
480
481 def _loadindex(self, start, end):
481 def _loadindex(self, start, end):
482 """load a block of indexes all at once from the lazy parser"""
482 """load a block of indexes all at once from the lazy parser"""
483 if isinstance(self.index, lazyindex):
483 if isinstance(self.index, lazyindex):
484 self.index.p.loadindex(start, end)
484 self.index.p.loadindex(start, end)
485
485
486 def _loadindexmap(self):
486 def _loadindexmap(self):
487 """loads both the map and the index from the lazy parser"""
487 """loads both the map and the index from the lazy parser"""
488 if isinstance(self.index, lazyindex):
488 if isinstance(self.index, lazyindex):
489 p = self.index.p
489 p = self.index.p
490 p.loadindex()
490 p.loadindex()
491 self.nodemap = p.map
491 self.nodemap = p.map
492
492
493 def _loadmap(self):
493 def _loadmap(self):
494 """loads the map from the lazy parser"""
494 """loads the map from the lazy parser"""
495 if isinstance(self.nodemap, lazymap):
495 if isinstance(self.nodemap, lazymap):
496 self.nodemap.p.loadmap()
496 self.nodemap.p.loadmap()
497 self.nodemap = self.nodemap.p.map
497 self.nodemap = self.nodemap.p.map
498
498
499 def tip(self):
499 def tip(self):
500 return self.node(len(self.index) - 2)
500 return self.node(len(self.index) - 2)
501 def __len__(self):
501 def __len__(self):
502 return len(self.index) - 1
502 return len(self.index) - 1
503 def __iter__(self):
503 def __iter__(self):
504 for i in xrange(len(self)):
504 for i in xrange(len(self)):
505 yield i
505 yield i
506 def rev(self, node):
506 def rev(self, node):
507 try:
507 try:
508 return self.nodemap[node]
508 return self.nodemap[node]
509 except KeyError:
509 except KeyError:
510 raise LookupError(node, self.indexfile, _('no node'))
510 raise LookupError(node, self.indexfile, _('no node'))
511 def node(self, rev):
511 def node(self, rev):
512 return self.index[rev][7]
512 return self.index[rev][7]
513 def linkrev(self, rev):
513 def linkrev(self, rev):
514 return self.index[rev][4]
514 return self.index[rev][4]
515 def parents(self, node):
515 def parents(self, node):
516 i = self.index
516 i = self.index
517 d = i[self.rev(node)]
517 d = i[self.rev(node)]
518 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
518 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
519 def parentrevs(self, rev):
519 def parentrevs(self, rev):
520 return self.index[rev][5:7]
520 return self.index[rev][5:7]
521 def start(self, rev):
521 def start(self, rev):
522 return int(self.index[rev][0] >> 16)
522 return int(self.index[rev][0] >> 16)
523 def end(self, rev):
523 def end(self, rev):
524 return self.start(rev) + self.length(rev)
524 return self.start(rev) + self.length(rev)
525 def length(self, rev):
525 def length(self, rev):
526 return self.index[rev][1]
526 return self.index[rev][1]
527 def base(self, rev):
527 def base(self, rev):
528 return self.index[rev][3]
528 return self.index[rev][3]
529
529
530 def size(self, rev):
530 def size(self, rev):
531 """return the length of the uncompressed text for a given revision"""
531 """return the length of the uncompressed text for a given revision"""
532 l = self.index[rev][2]
532 l = self.index[rev][2]
533 if l >= 0:
533 if l >= 0:
534 return l
534 return l
535
535
536 t = self.revision(self.node(rev))
536 t = self.revision(self.node(rev))
537 return len(t)
537 return len(t)
538
538
539 # alternate implementation, The advantage to this code is it
539 # alternate implementation, The advantage to this code is it
540 # will be faster for a single revision. But, the results are not
540 # will be faster for a single revision. But, the results are not
541 # cached, so finding the size of every revision will be slower.
541 # cached, so finding the size of every revision will be slower.
542 """
542 """
543 if self.cache and self.cache[1] == rev:
543 if self.cache and self.cache[1] == rev:
544 return len(self.cache[2])
544 return len(self.cache[2])
545
545
546 base = self.base(rev)
546 base = self.base(rev)
547 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
547 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
548 base = self.cache[1]
548 base = self.cache[1]
549 text = self.cache[2]
549 text = self.cache[2]
550 else:
550 else:
551 text = self.revision(self.node(base))
551 text = self.revision(self.node(base))
552
552
553 l = len(text)
553 l = len(text)
554 for x in xrange(base + 1, rev + 1):
554 for x in xrange(base + 1, rev + 1):
555 l = mdiff.patchedsize(l, self.chunk(x))
555 l = mdiff.patchedsize(l, self.chunk(x))
556 return l
556 return l
557 """
557 """
558
558
559 def reachable(self, node, stop=None):
559 def reachable(self, node, stop=None):
560 """return the set of all nodes ancestral to a given node, including
560 """return the set of all nodes ancestral to a given node, including
561 the node itself, stopping when stop is matched"""
561 the node itself, stopping when stop is matched"""
562 reachable = set((node,))
562 reachable = set((node,))
563 visit = [node]
563 visit = [node]
564 if stop:
564 if stop:
565 stopn = self.rev(stop)
565 stopn = self.rev(stop)
566 else:
566 else:
567 stopn = 0
567 stopn = 0
568 while visit:
568 while visit:
569 n = visit.pop(0)
569 n = visit.pop(0)
570 if n == stop:
570 if n == stop:
571 continue
571 continue
572 if n == nullid:
572 if n == nullid:
573 continue
573 continue
574 for p in self.parents(n):
574 for p in self.parents(n):
575 if self.rev(p) < stopn:
575 if self.rev(p) < stopn:
576 continue
576 continue
577 if p not in reachable:
577 if p not in reachable:
578 reachable.add(p)
578 reachable.add(p)
579 visit.append(p)
579 visit.append(p)
580 return reachable
580 return reachable
581
581
582 def ancestors(self, *revs):
582 def ancestors(self, *revs):
583 'Generate the ancestors of revs using a breadth-first visit'
583 'Generate the ancestors of revs using a breadth-first visit'
584 visit = list(revs)
584 visit = list(revs)
585 seen = set([nullrev])
585 seen = set([nullrev])
586 while visit:
586 while visit:
587 for parent in self.parentrevs(visit.pop(0)):
587 for parent in self.parentrevs(visit.pop(0)):
588 if parent not in seen:
588 if parent not in seen:
589 visit.append(parent)
589 visit.append(parent)
590 seen.add(parent)
590 seen.add(parent)
591 yield parent
591 yield parent
592
592
593 def descendants(self, *revs):
593 def descendants(self, *revs):
594 'Generate the descendants of revs in topological order'
594 'Generate the descendants of revs in topological order'
595 seen = set(revs)
595 seen = set(revs)
596 for i in xrange(min(revs) + 1, len(self)):
596 for i in xrange(min(revs) + 1, len(self)):
597 for x in self.parentrevs(i):
597 for x in self.parentrevs(i):
598 if x != nullrev and x in seen:
598 if x != nullrev and x in seen:
599 seen.add(i)
599 seen.add(i)
600 yield i
600 yield i
601 break
601 break
602
602
603 def findmissing(self, common=None, heads=None):
603 def findmissing(self, common=None, heads=None):
604 '''
604 '''
605 returns the topologically sorted list of nodes from the set:
605 returns the topologically sorted list of nodes from the set:
606 missing = (ancestors(heads) \ ancestors(common))
606 missing = (ancestors(heads) \ ancestors(common))
607
607
608 where ancestors() is the set of ancestors from heads, heads included
608 where ancestors() is the set of ancestors from heads, heads included
609
609
610 if heads is None, the heads of the revlog are used
610 if heads is None, the heads of the revlog are used
611 if common is None, nullid is assumed to be a common node
611 if common is None, nullid is assumed to be a common node
612 '''
612 '''
613 if common is None:
613 if common is None:
614 common = [nullid]
614 common = [nullid]
615 if heads is None:
615 if heads is None:
616 heads = self.heads()
616 heads = self.heads()
617
617
618 common = [self.rev(n) for n in common]
618 common = [self.rev(n) for n in common]
619 heads = [self.rev(n) for n in heads]
619 heads = [self.rev(n) for n in heads]
620
620
621 # we want the ancestors, but inclusive
621 # we want the ancestors, but inclusive
622 has = set(self.ancestors(*common))
622 has = set(self.ancestors(*common))
623 has.add(nullrev)
623 has.add(nullrev)
624 has.update(common)
624 has.update(common)
625
625
626 # take all ancestors from heads that aren't in has
626 # take all ancestors from heads that aren't in has
627 missing = set()
627 missing = set()
628 visit = [r for r in heads if r not in has]
628 visit = [r for r in heads if r not in has]
629 while visit:
629 while visit:
630 r = visit.pop(0)
630 r = visit.pop(0)
631 if r in missing:
631 if r in missing:
632 continue
632 continue
633 else:
633 else:
634 missing.add(r)
634 missing.add(r)
635 for p in self.parentrevs(r):
635 for p in self.parentrevs(r):
636 if p not in has:
636 if p not in has:
637 visit.append(p)
637 visit.append(p)
638 missing = list(missing)
638 missing = list(missing)
639 missing.sort()
639 missing.sort()
640 return [self.node(r) for r in missing]
640 return [self.node(r) for r in missing]
641
641
642 def nodesbetween(self, roots=None, heads=None):
642 def nodesbetween(self, roots=None, heads=None):
643 """Return a tuple containing three elements. Elements 1 and 2 contain
643 """Return a tuple containing three elements. Elements 1 and 2 contain
644 a final list bases and heads after all the unreachable ones have been
644 a final list bases and heads after all the unreachable ones have been
645 pruned. Element 0 contains a topologically sorted list of all
645 pruned. Element 0 contains a topologically sorted list of all
646
646
647 nodes that satisfy these constraints:
647 nodes that satisfy these constraints:
648 1. All nodes must be descended from a node in roots (the nodes on
648 1. All nodes must be descended from a node in roots (the nodes on
649 roots are considered descended from themselves).
649 roots are considered descended from themselves).
650 2. All nodes must also be ancestors of a node in heads (the nodes in
650 2. All nodes must also be ancestors of a node in heads (the nodes in
651 heads are considered to be their own ancestors).
651 heads are considered to be their own ancestors).
652
652
653 If roots is unspecified, nullid is assumed as the only root.
653 If roots is unspecified, nullid is assumed as the only root.
654 If heads is unspecified, it is taken to be the output of the
654 If heads is unspecified, it is taken to be the output of the
655 heads method (i.e. a list of all nodes in the repository that
655 heads method (i.e. a list of all nodes in the repository that
656 have no children)."""
656 have no children)."""
657 nonodes = ([], [], [])
657 nonodes = ([], [], [])
658 if roots is not None:
658 if roots is not None:
659 roots = list(roots)
659 roots = list(roots)
660 if not roots:
660 if not roots:
661 return nonodes
661 return nonodes
662 lowestrev = min([self.rev(n) for n in roots])
662 lowestrev = min([self.rev(n) for n in roots])
663 else:
663 else:
664 roots = [nullid] # Everybody's a descendent of nullid
664 roots = [nullid] # Everybody's a descendent of nullid
665 lowestrev = nullrev
665 lowestrev = nullrev
666 if (lowestrev == nullrev) and (heads is None):
666 if (lowestrev == nullrev) and (heads is None):
667 # We want _all_ the nodes!
667 # We want _all_ the nodes!
668 return ([self.node(r) for r in self], [nullid], list(self.heads()))
668 return ([self.node(r) for r in self], [nullid], list(self.heads()))
669 if heads is None:
669 if heads is None:
670 # All nodes are ancestors, so the latest ancestor is the last
670 # All nodes are ancestors, so the latest ancestor is the last
671 # node.
671 # node.
672 highestrev = len(self) - 1
672 highestrev = len(self) - 1
673 # Set ancestors to None to signal that every node is an ancestor.
673 # Set ancestors to None to signal that every node is an ancestor.
674 ancestors = None
674 ancestors = None
675 # Set heads to an empty dictionary for later discovery of heads
675 # Set heads to an empty dictionary for later discovery of heads
676 heads = {}
676 heads = {}
677 else:
677 else:
678 heads = list(heads)
678 heads = list(heads)
679 if not heads:
679 if not heads:
680 return nonodes
680 return nonodes
681 ancestors = set()
681 ancestors = set()
682 # Turn heads into a dictionary so we can remove 'fake' heads.
682 # Turn heads into a dictionary so we can remove 'fake' heads.
683 # Also, later we will be using it to filter out the heads we can't
683 # Also, later we will be using it to filter out the heads we can't
684 # find from roots.
684 # find from roots.
685 heads = dict.fromkeys(heads, 0)
685 heads = dict.fromkeys(heads, 0)
686 # Start at the top and keep marking parents until we're done.
686 # Start at the top and keep marking parents until we're done.
687 nodestotag = set(heads)
687 nodestotag = set(heads)
688 # Remember where the top was so we can use it as a limit later.
688 # Remember where the top was so we can use it as a limit later.
689 highestrev = max([self.rev(n) for n in nodestotag])
689 highestrev = max([self.rev(n) for n in nodestotag])
690 while nodestotag:
690 while nodestotag:
691 # grab a node to tag
691 # grab a node to tag
692 n = nodestotag.pop()
692 n = nodestotag.pop()
693 # Never tag nullid
693 # Never tag nullid
694 if n == nullid:
694 if n == nullid:
695 continue
695 continue
696 # A node's revision number represents its place in a
696 # A node's revision number represents its place in a
697 # topologically sorted list of nodes.
697 # topologically sorted list of nodes.
698 r = self.rev(n)
698 r = self.rev(n)
699 if r >= lowestrev:
699 if r >= lowestrev:
700 if n not in ancestors:
700 if n not in ancestors:
701 # If we are possibly a descendent of one of the roots
701 # If we are possibly a descendent of one of the roots
702 # and we haven't already been marked as an ancestor
702 # and we haven't already been marked as an ancestor
703 ancestors.add(n) # Mark as ancestor
703 ancestors.add(n) # Mark as ancestor
704 # Add non-nullid parents to list of nodes to tag.
704 # Add non-nullid parents to list of nodes to tag.
705 nodestotag.update([p for p in self.parents(n) if
705 nodestotag.update([p for p in self.parents(n) if
706 p != nullid])
706 p != nullid])
707 elif n in heads: # We've seen it before, is it a fake head?
707 elif n in heads: # We've seen it before, is it a fake head?
708 # So it is, real heads should not be the ancestors of
708 # So it is, real heads should not be the ancestors of
709 # any other heads.
709 # any other heads.
710 heads.pop(n)
710 heads.pop(n)
711 if not ancestors:
711 if not ancestors:
712 return nonodes
712 return nonodes
713 # Now that we have our set of ancestors, we want to remove any
713 # Now that we have our set of ancestors, we want to remove any
714 # roots that are not ancestors.
714 # roots that are not ancestors.
715
715
716 # If one of the roots was nullid, everything is included anyway.
716 # If one of the roots was nullid, everything is included anyway.
717 if lowestrev > nullrev:
717 if lowestrev > nullrev:
718 # But, since we weren't, let's recompute the lowest rev to not
718 # But, since we weren't, let's recompute the lowest rev to not
719 # include roots that aren't ancestors.
719 # include roots that aren't ancestors.
720
720
721 # Filter out roots that aren't ancestors of heads
721 # Filter out roots that aren't ancestors of heads
722 roots = [n for n in roots if n in ancestors]
722 roots = [n for n in roots if n in ancestors]
723 # Recompute the lowest revision
723 # Recompute the lowest revision
724 if roots:
724 if roots:
725 lowestrev = min([self.rev(n) for n in roots])
725 lowestrev = min([self.rev(n) for n in roots])
726 else:
726 else:
727 # No more roots? Return empty list
727 # No more roots? Return empty list
728 return nonodes
728 return nonodes
729 else:
729 else:
730 # We are descending from nullid, and don't need to care about
730 # We are descending from nullid, and don't need to care about
731 # any other roots.
731 # any other roots.
732 lowestrev = nullrev
732 lowestrev = nullrev
733 roots = [nullid]
733 roots = [nullid]
734 # Transform our roots list into a set.
734 # Transform our roots list into a set.
735 descendents = set(roots)
735 descendents = set(roots)
736 # Also, keep the original roots so we can filter out roots that aren't
736 # Also, keep the original roots so we can filter out roots that aren't
737 # 'real' roots (i.e. are descended from other roots).
737 # 'real' roots (i.e. are descended from other roots).
738 roots = descendents.copy()
738 roots = descendents.copy()
739 # Our topologically sorted list of output nodes.
739 # Our topologically sorted list of output nodes.
740 orderedout = []
740 orderedout = []
741 # Don't start at nullid since we don't want nullid in our output list,
741 # Don't start at nullid since we don't want nullid in our output list,
742 # and if nullid shows up in descedents, empty parents will look like
742 # and if nullid shows up in descedents, empty parents will look like
743 # they're descendents.
743 # they're descendents.
744 for r in xrange(max(lowestrev, 0), highestrev + 1):
744 for r in xrange(max(lowestrev, 0), highestrev + 1):
745 n = self.node(r)
745 n = self.node(r)
746 isdescendent = False
746 isdescendent = False
747 if lowestrev == nullrev: # Everybody is a descendent of nullid
747 if lowestrev == nullrev: # Everybody is a descendent of nullid
748 isdescendent = True
748 isdescendent = True
749 elif n in descendents:
749 elif n in descendents:
750 # n is already a descendent
750 # n is already a descendent
751 isdescendent = True
751 isdescendent = True
752 # This check only needs to be done here because all the roots
752 # This check only needs to be done here because all the roots
753 # will start being marked is descendents before the loop.
753 # will start being marked is descendents before the loop.
754 if n in roots:
754 if n in roots:
755 # If n was a root, check if it's a 'real' root.
755 # If n was a root, check if it's a 'real' root.
756 p = tuple(self.parents(n))
756 p = tuple(self.parents(n))
757 # If any of its parents are descendents, it's not a root.
757 # If any of its parents are descendents, it's not a root.
758 if (p[0] in descendents) or (p[1] in descendents):
758 if (p[0] in descendents) or (p[1] in descendents):
759 roots.remove(n)
759 roots.remove(n)
760 else:
760 else:
761 p = tuple(self.parents(n))
761 p = tuple(self.parents(n))
762 # A node is a descendent if either of its parents are
762 # A node is a descendent if either of its parents are
763 # descendents. (We seeded the dependents list with the roots
763 # descendents. (We seeded the dependents list with the roots
764 # up there, remember?)
764 # up there, remember?)
765 if (p[0] in descendents) or (p[1] in descendents):
765 if (p[0] in descendents) or (p[1] in descendents):
766 descendents.add(n)
766 descendents.add(n)
767 isdescendent = True
767 isdescendent = True
768 if isdescendent and ((ancestors is None) or (n in ancestors)):
768 if isdescendent and ((ancestors is None) or (n in ancestors)):
769 # Only include nodes that are both descendents and ancestors.
769 # Only include nodes that are both descendents and ancestors.
770 orderedout.append(n)
770 orderedout.append(n)
771 if (ancestors is not None) and (n in heads):
771 if (ancestors is not None) and (n in heads):
772 # We're trying to figure out which heads are reachable
772 # We're trying to figure out which heads are reachable
773 # from roots.
773 # from roots.
774 # Mark this head as having been reached
774 # Mark this head as having been reached
775 heads[n] = 1
775 heads[n] = 1
776 elif ancestors is None:
776 elif ancestors is None:
777 # Otherwise, we're trying to discover the heads.
777 # Otherwise, we're trying to discover the heads.
778 # Assume this is a head because if it isn't, the next step
778 # Assume this is a head because if it isn't, the next step
779 # will eventually remove it.
779 # will eventually remove it.
780 heads[n] = 1
780 heads[n] = 1
781 # But, obviously its parents aren't.
781 # But, obviously its parents aren't.
782 for p in self.parents(n):
782 for p in self.parents(n):
783 heads.pop(p, None)
783 heads.pop(p, None)
784 heads = [n for n in heads.iterkeys() if heads[n] != 0]
784 heads = [n for n in heads.iterkeys() if heads[n] != 0]
785 roots = list(roots)
785 roots = list(roots)
786 assert orderedout
786 assert orderedout
787 assert roots
787 assert roots
788 assert heads
788 assert heads
789 return (orderedout, roots, heads)
789 return (orderedout, roots, heads)
790
790
791 def heads(self, start=None, stop=None):
791 def heads(self, start=None, stop=None):
792 """return the list of all nodes that have no children
792 """return the list of all nodes that have no children
793
793
794 if start is specified, only heads that are descendants of
794 if start is specified, only heads that are descendants of
795 start will be returned
795 start will be returned
796 if stop is specified, it will consider all the revs from stop
796 if stop is specified, it will consider all the revs from stop
797 as if they had no children
797 as if they had no children
798 """
798 """
799 if start is None and stop is None:
799 if start is None and stop is None:
800 count = len(self)
800 count = len(self)
801 if not count:
801 if not count:
802 return [nullid]
802 return [nullid]
803 ishead = [1] * (count + 1)
803 ishead = [1] * (count + 1)
804 index = self.index
804 index = self.index
805 for r in xrange(count):
805 for r in xrange(count):
806 e = index[r]
806 e = index[r]
807 ishead[e[5]] = ishead[e[6]] = 0
807 ishead[e[5]] = ishead[e[6]] = 0
808 return [self.node(r) for r in xrange(count) if ishead[r]]
808 return [self.node(r) for r in xrange(count) if ishead[r]]
809
809
810 if start is None:
810 if start is None:
811 start = nullid
811 start = nullid
812 if stop is None:
812 if stop is None:
813 stop = []
813 stop = []
814 stoprevs = set([self.rev(n) for n in stop])
814 stoprevs = set([self.rev(n) for n in stop])
815 startrev = self.rev(start)
815 startrev = self.rev(start)
816 reachable = set((startrev,))
816 reachable = set((startrev,))
817 heads = set((startrev,))
817 heads = set((startrev,))
818
818
819 parentrevs = self.parentrevs
819 parentrevs = self.parentrevs
820 for r in xrange(startrev + 1, len(self)):
820 for r in xrange(startrev + 1, len(self)):
821 for p in parentrevs(r):
821 for p in parentrevs(r):
822 if p in reachable:
822 if p in reachable:
823 if r not in stoprevs:
823 if r not in stoprevs:
824 reachable.add(r)
824 reachable.add(r)
825 heads.add(r)
825 heads.add(r)
826 if p in heads and p not in stoprevs:
826 if p in heads and p not in stoprevs:
827 heads.remove(p)
827 heads.remove(p)
828
828
829 return [self.node(r) for r in heads]
829 return [self.node(r) for r in heads]
830
830
831 def children(self, node):
831 def children(self, node):
832 """find the children of a given node"""
832 """find the children of a given node"""
833 c = []
833 c = []
834 p = self.rev(node)
834 p = self.rev(node)
835 for r in range(p + 1, len(self)):
835 for r in range(p + 1, len(self)):
836 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
836 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
837 if prevs:
837 if prevs:
838 for pr in prevs:
838 for pr in prevs:
839 if pr == p:
839 if pr == p:
840 c.append(self.node(r))
840 c.append(self.node(r))
841 elif p == nullrev:
841 elif p == nullrev:
842 c.append(self.node(r))
842 c.append(self.node(r))
843 return c
843 return c
844
844
845 def _match(self, id):
845 def _match(self, id):
846 if isinstance(id, (long, int)):
846 if isinstance(id, (long, int)):
847 # rev
847 # rev
848 return self.node(id)
848 return self.node(id)
849 if len(id) == 20:
849 if len(id) == 20:
850 # possibly a binary node
850 # possibly a binary node
851 # odds of a binary node being all hex in ASCII are 1 in 10**25
851 # odds of a binary node being all hex in ASCII are 1 in 10**25
852 try:
852 try:
853 node = id
853 node = id
854 self.rev(node) # quick search the index
854 self.rev(node) # quick search the index
855 return node
855 return node
856 except LookupError:
856 except LookupError:
857 pass # may be partial hex id
857 pass # may be partial hex id
858 try:
858 try:
859 # str(rev)
859 # str(rev)
860 rev = int(id)
860 rev = int(id)
861 if str(rev) != id:
861 if str(rev) != id:
862 raise ValueError
862 raise ValueError
863 if rev < 0:
863 if rev < 0:
864 rev = len(self) + rev
864 rev = len(self) + rev
865 if rev < 0 or rev >= len(self):
865 if rev < 0 or rev >= len(self):
866 raise ValueError
866 raise ValueError
867 return self.node(rev)
867 return self.node(rev)
868 except (ValueError, OverflowError):
868 except (ValueError, OverflowError):
869 pass
869 pass
870 if len(id) == 40:
870 if len(id) == 40:
871 try:
871 try:
872 # a full hex nodeid?
872 # a full hex nodeid?
873 node = bin(id)
873 node = bin(id)
874 self.rev(node)
874 self.rev(node)
875 return node
875 return node
876 except (TypeError, LookupError):
876 except (TypeError, LookupError):
877 pass
877 pass
878
878
879 def _partialmatch(self, id):
879 def _partialmatch(self, id):
880 if len(id) < 40:
880 if len(id) < 40:
881 try:
881 try:
882 # hex(node)[:...]
882 # hex(node)[:...]
883 l = len(id) / 2 # grab an even number of digits
883 l = len(id) / 2 # grab an even number of digits
884 bin_id = bin(id[:l*2])
884 bin_id = bin(id[:l*2])
885 nl = [n for n in self.nodemap if n[:l] == bin_id]
885 nl = [n for n in self.nodemap if n[:l] == bin_id]
886 nl = [n for n in nl if hex(n).startswith(id)]
886 nl = [n for n in nl if hex(n).startswith(id)]
887 if len(nl) > 0:
887 if len(nl) > 0:
888 if len(nl) == 1:
888 if len(nl) == 1:
889 return nl[0]
889 return nl[0]
890 raise LookupError(id, self.indexfile,
890 raise LookupError(id, self.indexfile,
891 _('ambiguous identifier'))
891 _('ambiguous identifier'))
892 return None
892 return None
893 except TypeError:
893 except TypeError:
894 pass
894 pass
895
895
896 def lookup(self, id):
896 def lookup(self, id):
897 """locate a node based on:
897 """locate a node based on:
898 - revision number or str(revision number)
898 - revision number or str(revision number)
899 - nodeid or subset of hex nodeid
899 - nodeid or subset of hex nodeid
900 """
900 """
901 n = self._match(id)
901 n = self._match(id)
902 if n is not None:
902 if n is not None:
903 return n
903 return n
904 n = self._partialmatch(id)
904 n = self._partialmatch(id)
905 if n:
905 if n:
906 return n
906 return n
907
907
908 raise LookupError(id, self.indexfile, _('no match found'))
908 raise LookupError(id, self.indexfile, _('no match found'))
909
909
910 def cmp(self, node, text):
910 def cmp(self, node, text):
911 """compare text with a given file revision"""
911 """compare text with a given file revision"""
912 p1, p2 = self.parents(node)
912 p1, p2 = self.parents(node)
913 return hash(text, p1, p2) != node
913 return hash(text, p1, p2) != node
914
914
915 def _addchunk(self, offset, data):
915 def _addchunk(self, offset, data):
916 o, d = self._chunkcache
916 o, d = self._chunkcache
917 # try to add to existing cache
917 # try to add to existing cache
918 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
918 if o + len(d) == offset and len(d) + len(data) < _prereadsize:
919 self._chunkcache = o, d + data
919 self._chunkcache = o, d + data
920 else:
920 else:
921 self._chunkcache = offset, data
921 self._chunkcache = offset, data
922
922
923 def _loadchunk(self, offset, length, df=None):
923 def _loadchunk(self, offset, length, df=None):
924 if not df:
924 if not df:
925 if self._inline:
925 if self._inline:
926 df = self.opener(self.indexfile)
926 df = self.opener(self.indexfile)
927 else:
927 else:
928 df = self.opener(self.datafile)
928 df = self.opener(self.datafile)
929
929
930 readahead = max(65536, length)
930 readahead = max(65536, length)
931 df.seek(offset)
931 df.seek(offset)
932 d = df.read(readahead)
932 d = df.read(readahead)
933 self._addchunk(offset, d)
933 self._addchunk(offset, d)
934 if readahead > length:
934 if readahead > length:
935 return d[:length]
935 return d[:length]
936 return d
936 return d
937
937
938 def _getchunk(self, offset, length, df=None):
938 def _getchunk(self, offset, length, df=None):
939 o, d = self._chunkcache
939 o, d = self._chunkcache
940 l = len(d)
940 l = len(d)
941
941
942 # is it in the cache?
942 # is it in the cache?
943 cachestart = offset - o
943 cachestart = offset - o
944 cacheend = cachestart + length
944 cacheend = cachestart + length
945 if cachestart >= 0 and cacheend <= l:
945 if cachestart >= 0 and cacheend <= l:
946 if cachestart == 0 and cacheend == l:
946 if cachestart == 0 and cacheend == l:
947 return d # avoid a copy
947 return d # avoid a copy
948 return d[cachestart:cacheend]
948 return d[cachestart:cacheend]
949
949
950 return self._loadchunk(offset, length, df)
950 return self._loadchunk(offset, length, df)
951
951
952 def _prime(self, startrev, endrev, df):
952 def _prime(self, startrev, endrev, df):
953 start = self.start(startrev)
953 start = self.start(startrev)
954 end = self.end(endrev)
954 end = self.end(endrev)
955 if self._inline:
955 if self._inline:
956 start += (startrev + 1) * self._io.size
956 start += (startrev + 1) * self._io.size
957 end += (startrev + 1) * self._io.size
957 end += (startrev + 1) * self._io.size
958 self._loadchunk(start, end - start, df)
958 self._loadchunk(start, end - start, df)
959
959
960 def chunk(self, rev, df=None):
960 def chunk(self, rev, df=None):
961 start, length = self.start(rev), self.length(rev)
961 start, length = self.start(rev), self.length(rev)
962 if self._inline:
962 if self._inline:
963 start += (rev + 1) * self._io.size
963 start += (rev + 1) * self._io.size
964 return decompress(self._getchunk(start, length, df))
964 return decompress(self._getchunk(start, length, df))
965
965
966 def revdiff(self, rev1, rev2):
966 def revdiff(self, rev1, rev2):
967 """return or calculate a delta between two revisions"""
967 """return or calculate a delta between two revisions"""
968 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
968 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
969 return self.chunk(rev2)
969 return self.chunk(rev2)
970
970
971 return mdiff.textdiff(self.revision(self.node(rev1)),
971 return mdiff.textdiff(self.revision(self.node(rev1)),
972 self.revision(self.node(rev2)))
972 self.revision(self.node(rev2)))
973
973
974 def revision(self, node):
974 def revision(self, node):
975 """return an uncompressed revision of a given node"""
975 """return an uncompressed revision of a given node"""
976 if node == nullid:
976 if node == nullid:
977 return ""
977 return ""
978 if self._cache and self._cache[0] == node:
978 if self._cache and self._cache[0] == node:
979 return str(self._cache[2])
979 return str(self._cache[2])
980
980
981 # look up what we need to read
981 # look up what we need to read
982 text = None
982 text = None
983 rev = self.rev(node)
983 rev = self.rev(node)
984 base = self.base(rev)
984 base = self.base(rev)
985
985
986 # check rev flags
986 # check rev flags
987 if self.index[rev][0] & 0xFFFF:
987 if self.index[rev][0] & 0xFFFF:
988 raise RevlogError(_('incompatible revision flag %x') %
988 raise RevlogError(_('incompatible revision flag %x') %
989 (self.index[rev][0] & 0xFFFF))
989 (self.index[rev][0] & 0xFFFF))
990
990
991 df = None
991 df = None
992
992
993 # do we have useful data cached?
993 # do we have useful data cached?
994 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
994 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
995 base = self._cache[1]
995 base = self._cache[1]
996 text = str(self._cache[2])
996 text = str(self._cache[2])
997 self._loadindex(base, rev + 1)
997 self._loadindex(base, rev + 1)
998 if not self._inline and rev > base + 1:
998 if not self._inline and rev > base + 1:
999 df = self.opener(self.datafile)
999 df = self.opener(self.datafile)
1000 self._prime(base, rev, df)
1000 self._prime(base, rev, df)
1001 else:
1001 else:
1002 self._loadindex(base, rev + 1)
1002 self._loadindex(base, rev + 1)
1003 if not self._inline and rev > base:
1003 if not self._inline and rev > base:
1004 df = self.opener(self.datafile)
1004 df = self.opener(self.datafile)
1005 self._prime(base, rev, df)
1005 self._prime(base, rev, df)
1006 text = self.chunk(base, df=df)
1006 text = self.chunk(base, df=df)
1007
1007
1008 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
1008 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
1009 text = mdiff.patches(text, bins)
1009 text = mdiff.patches(text, bins)
1010 p1, p2 = self.parents(node)
1010 p1, p2 = self.parents(node)
1011 if node != hash(text, p1, p2):
1011 if node != hash(text, p1, p2):
1012 raise RevlogError(_("integrity check failed on %s:%d")
1012 raise RevlogError(_("integrity check failed on %s:%d")
1013 % (self.datafile, rev))
1013 % (self.datafile, rev))
1014
1014
1015 self._cache = (node, rev, text)
1015 self._cache = (node, rev, text)
1016 return text
1016 return text
1017
1017
1018 def checkinlinesize(self, tr, fp=None):
1018 def checkinlinesize(self, tr, fp=None):
1019 if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
1019 if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
1020 return
1020 return
1021
1021
1022 trinfo = tr.find(self.indexfile)
1022 trinfo = tr.find(self.indexfile)
1023 if trinfo is None:
1023 if trinfo is None:
1024 raise RevlogError(_("%s not found in the transaction")
1024 raise RevlogError(_("%s not found in the transaction")
1025 % self.indexfile)
1025 % self.indexfile)
1026
1026
1027 trindex = trinfo[2]
1027 trindex = trinfo[2]
1028 dataoff = self.start(trindex)
1028 dataoff = self.start(trindex)
1029
1029
1030 tr.add(self.datafile, dataoff)
1030 tr.add(self.datafile, dataoff)
1031
1031
1032 if fp:
1032 if fp:
1033 fp.flush()
1033 fp.flush()
1034 fp.close()
1034 fp.close()
1035
1035
1036 df = self.opener(self.datafile, 'w')
1036 df = self.opener(self.datafile, 'w')
1037 try:
1037 try:
1038 calc = self._io.size
1038 calc = self._io.size
1039 for r in self:
1039 for r in self:
1040 start = self.start(r) + (r + 1) * calc
1040 start = self.start(r) + (r + 1) * calc
1041 length = self.length(r)
1041 length = self.length(r)
1042 d = self._getchunk(start, length)
1042 d = self._getchunk(start, length)
1043 df.write(d)
1043 df.write(d)
1044 finally:
1044 finally:
1045 df.close()
1045 df.close()
1046
1046
1047 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1047 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1048 self.version &= ~(REVLOGNGINLINEDATA)
1048 self.version &= ~(REVLOGNGINLINEDATA)
1049 self._inline = False
1049 self._inline = False
1050 for i in self:
1050 for i in self:
1051 e = self._io.packentry(self.index[i], self.node, self.version, i)
1051 e = self._io.packentry(self.index[i], self.node, self.version, i)
1052 fp.write(e)
1052 fp.write(e)
1053
1053
1054 # if we don't call rename, the temp file will never replace the
1054 # if we don't call rename, the temp file will never replace the
1055 # real index
1055 # real index
1056 fp.rename()
1056 fp.rename()
1057
1057
1058 tr.replace(self.indexfile, trindex * calc)
1058 tr.replace(self.indexfile, trindex * calc)
1059 self._chunkcache = (0, '')
1059 self._chunkcache = (0, '')
1060
1060
1061 def addrevision(self, text, transaction, link, p1, p2, d=None):
1061 def addrevision(self, text, transaction, link, p1, p2, d=None):
1062 """add a revision to the log
1062 """add a revision to the log
1063
1063
1064 text - the revision data to add
1064 text - the revision data to add
1065 transaction - the transaction object used for rollback
1065 transaction - the transaction object used for rollback
1066 link - the linkrev data to add
1066 link - the linkrev data to add
1067 p1, p2 - the parent nodeids of the revision
1067 p1, p2 - the parent nodeids of the revision
1068 d - an optional precomputed delta
1068 d - an optional precomputed delta
1069 """
1069 """
1070 dfh = None
1070 dfh = None
1071 if not self._inline:
1071 if not self._inline:
1072 dfh = self.opener(self.datafile, "a")
1072 dfh = self.opener(self.datafile, "a")
1073 ifh = self.opener(self.indexfile, "a+")
1073 ifh = self.opener(self.indexfile, "a+")
1074 try:
1074 try:
1075 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1075 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1076 finally:
1076 finally:
1077 if dfh:
1077 if dfh:
1078 dfh.close()
1078 dfh.close()
1079 ifh.close()
1079 ifh.close()
1080
1080
1081 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1081 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1082 node = hash(text, p1, p2)
1082 node = hash(text, p1, p2)
1083 if node in self.nodemap:
1083 if node in self.nodemap:
1084 return node
1084 return node
1085
1085
1086 curr = len(self)
1086 curr = len(self)
1087 prev = curr - 1
1087 prev = curr - 1
1088 base = self.base(prev)
1088 base = self.base(prev)
1089 offset = self.end(prev)
1089 offset = self.end(prev)
1090
1090
1091 if curr:
1091 if curr:
1092 if not d:
1092 if not d:
1093 ptext = self.revision(self.node(prev))
1093 ptext = self.revision(self.node(prev))
1094 d = mdiff.textdiff(ptext, text)
1094 d = mdiff.textdiff(ptext, text)
1095 data = compress(d)
1095 data = compress(d)
1096 l = len(data[1]) + len(data[0])
1096 l = len(data[1]) + len(data[0])
1097 dist = l + offset - self.start(base)
1097 dist = l + offset - self.start(base)
1098
1098
1099 # full versions are inserted when the needed deltas
1099 # full versions are inserted when the needed deltas
1100 # become comparable to the uncompressed text
1100 # become comparable to the uncompressed text
1101 if not curr or dist > len(text) * 2:
1101 if not curr or dist > len(text) * 2:
1102 data = compress(text)
1102 data = compress(text)
1103 l = len(data[1]) + len(data[0])
1103 l = len(data[1]) + len(data[0])
1104 base = curr
1104 base = curr
1105
1105
1106 e = (offset_type(offset, 0), l, len(text),
1106 e = (offset_type(offset, 0), l, len(text),
1107 base, link, self.rev(p1), self.rev(p2), node)
1107 base, link, self.rev(p1), self.rev(p2), node)
1108 self.index.insert(-1, e)
1108 self.index.insert(-1, e)
1109 self.nodemap[node] = curr
1109 self.nodemap[node] = curr
1110
1110
1111 entry = self._io.packentry(e, self.node, self.version, curr)
1111 entry = self._io.packentry(e, self.node, self.version, curr)
1112 if not self._inline:
1112 if not self._inline:
1113 transaction.add(self.datafile, offset)
1113 transaction.add(self.datafile, offset)
1114 transaction.add(self.indexfile, curr * len(entry))
1114 transaction.add(self.indexfile, curr * len(entry))
1115 if data[0]:
1115 if data[0]:
1116 dfh.write(data[0])
1116 dfh.write(data[0])
1117 dfh.write(data[1])
1117 dfh.write(data[1])
1118 dfh.flush()
1118 dfh.flush()
1119 ifh.write(entry)
1119 ifh.write(entry)
1120 else:
1120 else:
1121 offset += curr * self._io.size
1121 offset += curr * self._io.size
1122 transaction.add(self.indexfile, offset, curr)
1122 transaction.add(self.indexfile, offset, curr)
1123 ifh.write(entry)
1123 ifh.write(entry)
1124 ifh.write(data[0])
1124 ifh.write(data[0])
1125 ifh.write(data[1])
1125 ifh.write(data[1])
1126 self.checkinlinesize(transaction, ifh)
1126 self.checkinlinesize(transaction, ifh)
1127
1127
1128 self._cache = (node, curr, text)
1128 self._cache = (node, curr, text)
1129 return node
1129 return node
1130
1130
1131 def ancestor(self, a, b):
1131 def ancestor(self, a, b):
1132 """calculate the least common ancestor of nodes a and b"""
1132 """calculate the least common ancestor of nodes a and b"""
1133
1133
1134 def parents(rev):
1134 def parents(rev):
1135 return [p for p in self.parentrevs(rev) if p != nullrev]
1135 return [p for p in self.parentrevs(rev) if p != nullrev]
1136
1136
1137 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1137 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1138 if c is None:
1138 if c is None:
1139 return nullid
1139 return nullid
1140
1140
1141 return self.node(c)
1141 return self.node(c)
1142
1142
1143 def group(self, nodelist, lookup, infocollect=None):
1143 def group(self, nodelist, lookup, infocollect=None):
1144 """calculate a delta group
1144 """calculate a delta group
1145
1145
1146 Given a list of changeset revs, return a set of deltas and
1146 Given a list of changeset revs, return a set of deltas and
1147 metadata corresponding to nodes. the first delta is
1147 metadata corresponding to nodes. the first delta is
1148 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1148 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1149 have this parent as it has all history before these
1149 have this parent as it has all history before these
1150 changesets. parent is parent[0]
1150 changesets. parent is parent[0]
1151 """
1151 """
1152
1152
1153 # if we don't have any revisions touched by these changesets, bail
1153 # if we don't have any revisions touched by these changesets, bail
1154 if not nodelist:
1154 if not nodelist:
1155 yield changegroup.closechunk()
1155 yield changegroup.closechunk()
1156 return
1156 return
1157
1157
1158 revs = [self.rev(n) for n in nodelist]
1158 revs = [self.rev(n) for n in nodelist]
1159
1159
1160 # add the parent of the first rev
1160 # add the parent of the first rev
1161 p = self.parentrevs(revs[0])[0]
1161 p = self.parentrevs(revs[0])[0]
1162 revs.insert(0, p)
1162 revs.insert(0, p)
1163
1163
1164 # build deltas
1164 # build deltas
1165 for d in xrange(0, len(revs) - 1):
1165 for d in xrange(len(revs) - 1):
1166 a, b = revs[d], revs[d + 1]
1166 a, b = revs[d], revs[d + 1]
1167 nb = self.node(b)
1167 nb = self.node(b)
1168
1168
1169 if infocollect is not None:
1169 if infocollect is not None:
1170 infocollect(nb)
1170 infocollect(nb)
1171
1171
1172 p = self.parents(nb)
1172 p = self.parents(nb)
1173 meta = nb + p[0] + p[1] + lookup(nb)
1173 meta = nb + p[0] + p[1] + lookup(nb)
1174 if a == -1:
1174 if a == -1:
1175 d = self.revision(nb)
1175 d = self.revision(nb)
1176 meta += mdiff.trivialdiffheader(len(d))
1176 meta += mdiff.trivialdiffheader(len(d))
1177 else:
1177 else:
1178 d = self.revdiff(a, b)
1178 d = self.revdiff(a, b)
1179 yield changegroup.chunkheader(len(meta) + len(d))
1179 yield changegroup.chunkheader(len(meta) + len(d))
1180 yield meta
1180 yield meta
1181 if len(d) > 2**20:
1181 if len(d) > 2**20:
1182 pos = 0
1182 pos = 0
1183 while pos < len(d):
1183 while pos < len(d):
1184 pos2 = pos + 2 ** 18
1184 pos2 = pos + 2 ** 18
1185 yield d[pos:pos2]
1185 yield d[pos:pos2]
1186 pos = pos2
1186 pos = pos2
1187 else:
1187 else:
1188 yield d
1188 yield d
1189
1189
1190 yield changegroup.closechunk()
1190 yield changegroup.closechunk()
1191
1191
1192 def addgroup(self, revs, linkmapper, transaction):
1192 def addgroup(self, revs, linkmapper, transaction):
1193 """
1193 """
1194 add a delta group
1194 add a delta group
1195
1195
1196 given a set of deltas, add them to the revision log. the
1196 given a set of deltas, add them to the revision log. the
1197 first delta is against its parent, which should be in our
1197 first delta is against its parent, which should be in our
1198 log, the rest are against the previous delta.
1198 log, the rest are against the previous delta.
1199 """
1199 """
1200
1200
1201 #track the base of the current delta log
1201 #track the base of the current delta log
1202 r = len(self)
1202 r = len(self)
1203 t = r - 1
1203 t = r - 1
1204 node = None
1204 node = None
1205
1205
1206 base = prev = nullrev
1206 base = prev = nullrev
1207 start = end = textlen = 0
1207 start = end = textlen = 0
1208 if r:
1208 if r:
1209 end = self.end(t)
1209 end = self.end(t)
1210
1210
1211 ifh = self.opener(self.indexfile, "a+")
1211 ifh = self.opener(self.indexfile, "a+")
1212 isize = r * self._io.size
1212 isize = r * self._io.size
1213 if self._inline:
1213 if self._inline:
1214 transaction.add(self.indexfile, end + isize, r)
1214 transaction.add(self.indexfile, end + isize, r)
1215 dfh = None
1215 dfh = None
1216 else:
1216 else:
1217 transaction.add(self.indexfile, isize, r)
1217 transaction.add(self.indexfile, isize, r)
1218 transaction.add(self.datafile, end)
1218 transaction.add(self.datafile, end)
1219 dfh = self.opener(self.datafile, "a")
1219 dfh = self.opener(self.datafile, "a")
1220
1220
1221 try:
1221 try:
1222 # loop through our set of deltas
1222 # loop through our set of deltas
1223 chain = None
1223 chain = None
1224 for chunk in revs:
1224 for chunk in revs:
1225 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1225 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1226 link = linkmapper(cs)
1226 link = linkmapper(cs)
1227 if node in self.nodemap:
1227 if node in self.nodemap:
1228 # this can happen if two branches make the same change
1228 # this can happen if two branches make the same change
1229 chain = node
1229 chain = node
1230 continue
1230 continue
1231 delta = buffer(chunk, 80)
1231 delta = buffer(chunk, 80)
1232 del chunk
1232 del chunk
1233
1233
1234 for p in (p1, p2):
1234 for p in (p1, p2):
1235 if not p in self.nodemap:
1235 if not p in self.nodemap:
1236 raise LookupError(p, self.indexfile, _('unknown parent'))
1236 raise LookupError(p, self.indexfile, _('unknown parent'))
1237
1237
1238 if not chain:
1238 if not chain:
1239 # retrieve the parent revision of the delta chain
1239 # retrieve the parent revision of the delta chain
1240 chain = p1
1240 chain = p1
1241 if not chain in self.nodemap:
1241 if not chain in self.nodemap:
1242 raise LookupError(chain, self.indexfile, _('unknown base'))
1242 raise LookupError(chain, self.indexfile, _('unknown base'))
1243
1243
1244 # full versions are inserted when the needed deltas become
1244 # full versions are inserted when the needed deltas become
1245 # comparable to the uncompressed text or when the previous
1245 # comparable to the uncompressed text or when the previous
1246 # version is not the one we have a delta against. We use
1246 # version is not the one we have a delta against. We use
1247 # the size of the previous full rev as a proxy for the
1247 # the size of the previous full rev as a proxy for the
1248 # current size.
1248 # current size.
1249
1249
1250 if chain == prev:
1250 if chain == prev:
1251 cdelta = compress(delta)
1251 cdelta = compress(delta)
1252 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1252 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1253 textlen = mdiff.patchedsize(textlen, delta)
1253 textlen = mdiff.patchedsize(textlen, delta)
1254
1254
1255 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1255 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1256 # flush our writes here so we can read it in revision
1256 # flush our writes here so we can read it in revision
1257 if dfh:
1257 if dfh:
1258 dfh.flush()
1258 dfh.flush()
1259 ifh.flush()
1259 ifh.flush()
1260 text = self.revision(chain)
1260 text = self.revision(chain)
1261 if len(text) == 0:
1261 if len(text) == 0:
1262 # skip over trivial delta header
1262 # skip over trivial delta header
1263 text = buffer(delta, 12)
1263 text = buffer(delta, 12)
1264 else:
1264 else:
1265 text = mdiff.patches(text, [delta])
1265 text = mdiff.patches(text, [delta])
1266 del delta
1266 del delta
1267 chk = self._addrevision(text, transaction, link, p1, p2, None,
1267 chk = self._addrevision(text, transaction, link, p1, p2, None,
1268 ifh, dfh)
1268 ifh, dfh)
1269 if not dfh and not self._inline:
1269 if not dfh and not self._inline:
1270 # addrevision switched from inline to conventional
1270 # addrevision switched from inline to conventional
1271 # reopen the index
1271 # reopen the index
1272 dfh = self.opener(self.datafile, "a")
1272 dfh = self.opener(self.datafile, "a")
1273 ifh = self.opener(self.indexfile, "a")
1273 ifh = self.opener(self.indexfile, "a")
1274 if chk != node:
1274 if chk != node:
1275 raise RevlogError(_("consistency error adding group"))
1275 raise RevlogError(_("consistency error adding group"))
1276 textlen = len(text)
1276 textlen = len(text)
1277 else:
1277 else:
1278 e = (offset_type(end, 0), cdeltalen, textlen, base,
1278 e = (offset_type(end, 0), cdeltalen, textlen, base,
1279 link, self.rev(p1), self.rev(p2), node)
1279 link, self.rev(p1), self.rev(p2), node)
1280 self.index.insert(-1, e)
1280 self.index.insert(-1, e)
1281 self.nodemap[node] = r
1281 self.nodemap[node] = r
1282 entry = self._io.packentry(e, self.node, self.version, r)
1282 entry = self._io.packentry(e, self.node, self.version, r)
1283 if self._inline:
1283 if self._inline:
1284 ifh.write(entry)
1284 ifh.write(entry)
1285 ifh.write(cdelta[0])
1285 ifh.write(cdelta[0])
1286 ifh.write(cdelta[1])
1286 ifh.write(cdelta[1])
1287 self.checkinlinesize(transaction, ifh)
1287 self.checkinlinesize(transaction, ifh)
1288 if not self._inline:
1288 if not self._inline:
1289 dfh = self.opener(self.datafile, "a")
1289 dfh = self.opener(self.datafile, "a")
1290 ifh = self.opener(self.indexfile, "a")
1290 ifh = self.opener(self.indexfile, "a")
1291 else:
1291 else:
1292 dfh.write(cdelta[0])
1292 dfh.write(cdelta[0])
1293 dfh.write(cdelta[1])
1293 dfh.write(cdelta[1])
1294 ifh.write(entry)
1294 ifh.write(entry)
1295
1295
1296 t, r, chain, prev = r, r + 1, node, node
1296 t, r, chain, prev = r, r + 1, node, node
1297 base = self.base(t)
1297 base = self.base(t)
1298 start = self.start(base)
1298 start = self.start(base)
1299 end = self.end(t)
1299 end = self.end(t)
1300 finally:
1300 finally:
1301 if dfh:
1301 if dfh:
1302 dfh.close()
1302 dfh.close()
1303 ifh.close()
1303 ifh.close()
1304
1304
1305 return node
1305 return node
1306
1306
1307 def strip(self, minlink, transaction):
1307 def strip(self, minlink, transaction):
1308 """truncate the revlog on the first revision with a linkrev >= minlink
1308 """truncate the revlog on the first revision with a linkrev >= minlink
1309
1309
1310 This function is called when we're stripping revision minlink and
1310 This function is called when we're stripping revision minlink and
1311 its descendants from the repository.
1311 its descendants from the repository.
1312
1312
1313 We have to remove all revisions with linkrev >= minlink, because
1313 We have to remove all revisions with linkrev >= minlink, because
1314 the equivalent changelog revisions will be renumbered after the
1314 the equivalent changelog revisions will be renumbered after the
1315 strip.
1315 strip.
1316
1316
1317 So we truncate the revlog on the first of these revisions, and
1317 So we truncate the revlog on the first of these revisions, and
1318 trust that the caller has saved the revisions that shouldn't be
1318 trust that the caller has saved the revisions that shouldn't be
1319 removed and that it'll readd them after this truncation.
1319 removed and that it'll readd them after this truncation.
1320 """
1320 """
1321 if len(self) == 0:
1321 if len(self) == 0:
1322 return
1322 return
1323
1323
1324 if isinstance(self.index, lazyindex):
1324 if isinstance(self.index, lazyindex):
1325 self._loadindexmap()
1325 self._loadindexmap()
1326
1326
1327 for rev in self:
1327 for rev in self:
1328 if self.index[rev][4] >= minlink:
1328 if self.index[rev][4] >= minlink:
1329 break
1329 break
1330 else:
1330 else:
1331 return
1331 return
1332
1332
1333 # first truncate the files on disk
1333 # first truncate the files on disk
1334 end = self.start(rev)
1334 end = self.start(rev)
1335 if not self._inline:
1335 if not self._inline:
1336 transaction.add(self.datafile, end)
1336 transaction.add(self.datafile, end)
1337 end = rev * self._io.size
1337 end = rev * self._io.size
1338 else:
1338 else:
1339 end += rev * self._io.size
1339 end += rev * self._io.size
1340
1340
1341 transaction.add(self.indexfile, end)
1341 transaction.add(self.indexfile, end)
1342
1342
1343 # then reset internal state in memory to forget those revisions
1343 # then reset internal state in memory to forget those revisions
1344 self._cache = None
1344 self._cache = None
1345 self._chunkcache = (0, '')
1345 self._chunkcache = (0, '')
1346 for x in xrange(rev, len(self)):
1346 for x in xrange(rev, len(self)):
1347 del self.nodemap[self.node(x)]
1347 del self.nodemap[self.node(x)]
1348
1348
1349 del self.index[rev:-1]
1349 del self.index[rev:-1]
1350
1350
1351 def checksize(self):
1351 def checksize(self):
1352 expected = 0
1352 expected = 0
1353 if len(self):
1353 if len(self):
1354 expected = max(0, self.end(len(self) - 1))
1354 expected = max(0, self.end(len(self) - 1))
1355
1355
1356 try:
1356 try:
1357 f = self.opener(self.datafile)
1357 f = self.opener(self.datafile)
1358 f.seek(0, 2)
1358 f.seek(0, 2)
1359 actual = f.tell()
1359 actual = f.tell()
1360 dd = actual - expected
1360 dd = actual - expected
1361 except IOError, inst:
1361 except IOError, inst:
1362 if inst.errno != errno.ENOENT:
1362 if inst.errno != errno.ENOENT:
1363 raise
1363 raise
1364 dd = 0
1364 dd = 0
1365
1365
1366 try:
1366 try:
1367 f = self.opener(self.indexfile)
1367 f = self.opener(self.indexfile)
1368 f.seek(0, 2)
1368 f.seek(0, 2)
1369 actual = f.tell()
1369 actual = f.tell()
1370 s = self._io.size
1370 s = self._io.size
1371 i = max(0, actual / s)
1371 i = max(0, actual / s)
1372 di = actual - (i * s)
1372 di = actual - (i * s)
1373 if self._inline:
1373 if self._inline:
1374 databytes = 0
1374 databytes = 0
1375 for r in self:
1375 for r in self:
1376 databytes += max(0, self.length(r))
1376 databytes += max(0, self.length(r))
1377 dd = 0
1377 dd = 0
1378 di = actual - len(self) * s - databytes
1378 di = actual - len(self) * s - databytes
1379 except IOError, inst:
1379 except IOError, inst:
1380 if inst.errno != errno.ENOENT:
1380 if inst.errno != errno.ENOENT:
1381 raise
1381 raise
1382 di = 0
1382 di = 0
1383
1383
1384 return (dd, di)
1384 return (dd, di)
1385
1385
1386 def files(self):
1386 def files(self):
1387 res = [ self.indexfile ]
1387 res = [ self.indexfile ]
1388 if not self._inline:
1388 if not self._inline:
1389 res.append(self.datafile)
1389 res.append(self.datafile)
1390 return res
1390 return res
General Comments 0
You need to be logged in to leave comments. Login now