##// END OF EJS Templates
move canonpath from util to scmutil
Adrian Buehlmann -
r13971:bfeaa88b default
parent child Browse files
Show More
@@ -1,342 +1,342 b''
1 # ASCII graph log extension for Mercurial
1 # ASCII graph log extension for Mercurial
2 #
2 #
3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to view revision graphs from a shell
8 '''command to view revision graphs from a shell
9
9
10 This extension adds a --graph option to the incoming, outgoing and log
10 This extension adds a --graph option to the incoming, outgoing and log
11 commands. When this options is given, an ASCII representation of the
11 commands. When this options is given, an ASCII representation of the
12 revision graph is also shown.
12 revision graph is also shown.
13 '''
13 '''
14
14
15 import os
15 import os
16 from mercurial.cmdutil import revrange, show_changeset
16 from mercurial.cmdutil import revrange, show_changeset
17 from mercurial.commands import templateopts
17 from mercurial.commands import templateopts
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19 from mercurial.node import nullrev
19 from mercurial.node import nullrev
20 from mercurial import cmdutil, commands, extensions
20 from mercurial import cmdutil, commands, extensions
21 from mercurial import hg, util, graphmod
21 from mercurial import hg, scmutil, util, graphmod
22
22
23 ASCIIDATA = 'ASC'
23 ASCIIDATA = 'ASC'
24
24
25 def asciiedges(seen, rev, parents):
25 def asciiedges(seen, rev, parents):
26 """adds edge info to changelog DAG walk suitable for ascii()"""
26 """adds edge info to changelog DAG walk suitable for ascii()"""
27 if rev not in seen:
27 if rev not in seen:
28 seen.append(rev)
28 seen.append(rev)
29 nodeidx = seen.index(rev)
29 nodeidx = seen.index(rev)
30
30
31 knownparents = []
31 knownparents = []
32 newparents = []
32 newparents = []
33 for parent in parents:
33 for parent in parents:
34 if parent in seen:
34 if parent in seen:
35 knownparents.append(parent)
35 knownparents.append(parent)
36 else:
36 else:
37 newparents.append(parent)
37 newparents.append(parent)
38
38
39 ncols = len(seen)
39 ncols = len(seen)
40 seen[nodeidx:nodeidx + 1] = newparents
40 seen[nodeidx:nodeidx + 1] = newparents
41 edges = [(nodeidx, seen.index(p)) for p in knownparents]
41 edges = [(nodeidx, seen.index(p)) for p in knownparents]
42
42
43 if len(newparents) > 0:
43 if len(newparents) > 0:
44 edges.append((nodeidx, nodeidx))
44 edges.append((nodeidx, nodeidx))
45 if len(newparents) > 1:
45 if len(newparents) > 1:
46 edges.append((nodeidx, nodeidx + 1))
46 edges.append((nodeidx, nodeidx + 1))
47
47
48 nmorecols = len(seen) - ncols
48 nmorecols = len(seen) - ncols
49 return nodeidx, edges, ncols, nmorecols
49 return nodeidx, edges, ncols, nmorecols
50
50
51 def fix_long_right_edges(edges):
51 def fix_long_right_edges(edges):
52 for (i, (start, end)) in enumerate(edges):
52 for (i, (start, end)) in enumerate(edges):
53 if end > start:
53 if end > start:
54 edges[i] = (start, end + 1)
54 edges[i] = (start, end + 1)
55
55
56 def get_nodeline_edges_tail(
56 def get_nodeline_edges_tail(
57 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
57 node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
58 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
58 if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
59 # Still going in the same non-vertical direction.
59 # Still going in the same non-vertical direction.
60 if n_columns_diff == -1:
60 if n_columns_diff == -1:
61 start = max(node_index + 1, p_node_index)
61 start = max(node_index + 1, p_node_index)
62 tail = ["|", " "] * (start - node_index - 1)
62 tail = ["|", " "] * (start - node_index - 1)
63 tail.extend(["/", " "] * (n_columns - start))
63 tail.extend(["/", " "] * (n_columns - start))
64 return tail
64 return tail
65 else:
65 else:
66 return ["\\", " "] * (n_columns - node_index - 1)
66 return ["\\", " "] * (n_columns - node_index - 1)
67 else:
67 else:
68 return ["|", " "] * (n_columns - node_index - 1)
68 return ["|", " "] * (n_columns - node_index - 1)
69
69
70 def draw_edges(edges, nodeline, interline):
70 def draw_edges(edges, nodeline, interline):
71 for (start, end) in edges:
71 for (start, end) in edges:
72 if start == end + 1:
72 if start == end + 1:
73 interline[2 * end + 1] = "/"
73 interline[2 * end + 1] = "/"
74 elif start == end - 1:
74 elif start == end - 1:
75 interline[2 * start + 1] = "\\"
75 interline[2 * start + 1] = "\\"
76 elif start == end:
76 elif start == end:
77 interline[2 * start] = "|"
77 interline[2 * start] = "|"
78 else:
78 else:
79 nodeline[2 * end] = "+"
79 nodeline[2 * end] = "+"
80 if start > end:
80 if start > end:
81 (start, end) = (end, start)
81 (start, end) = (end, start)
82 for i in range(2 * start + 1, 2 * end):
82 for i in range(2 * start + 1, 2 * end):
83 if nodeline[i] != "+":
83 if nodeline[i] != "+":
84 nodeline[i] = "-"
84 nodeline[i] = "-"
85
85
86 def get_padding_line(ni, n_columns, edges):
86 def get_padding_line(ni, n_columns, edges):
87 line = []
87 line = []
88 line.extend(["|", " "] * ni)
88 line.extend(["|", " "] * ni)
89 if (ni, ni - 1) in edges or (ni, ni) in edges:
89 if (ni, ni - 1) in edges or (ni, ni) in edges:
90 # (ni, ni - 1) (ni, ni)
90 # (ni, ni - 1) (ni, ni)
91 # | | | | | | | |
91 # | | | | | | | |
92 # +---o | | o---+
92 # +---o | | o---+
93 # | | c | | c | |
93 # | | c | | c | |
94 # | |/ / | |/ /
94 # | |/ / | |/ /
95 # | | | | | |
95 # | | | | | |
96 c = "|"
96 c = "|"
97 else:
97 else:
98 c = " "
98 c = " "
99 line.extend([c, " "])
99 line.extend([c, " "])
100 line.extend(["|", " "] * (n_columns - ni - 1))
100 line.extend(["|", " "] * (n_columns - ni - 1))
101 return line
101 return line
102
102
103 def asciistate():
103 def asciistate():
104 """returns the initial value for the "state" argument to ascii()"""
104 """returns the initial value for the "state" argument to ascii()"""
105 return [0, 0]
105 return [0, 0]
106
106
107 def ascii(ui, state, type, char, text, coldata):
107 def ascii(ui, state, type, char, text, coldata):
108 """prints an ASCII graph of the DAG
108 """prints an ASCII graph of the DAG
109
109
110 takes the following arguments (one call per node in the graph):
110 takes the following arguments (one call per node in the graph):
111
111
112 - ui to write to
112 - ui to write to
113 - Somewhere to keep the needed state in (init to asciistate())
113 - Somewhere to keep the needed state in (init to asciistate())
114 - Column of the current node in the set of ongoing edges.
114 - Column of the current node in the set of ongoing edges.
115 - Type indicator of node data == ASCIIDATA.
115 - Type indicator of node data == ASCIIDATA.
116 - Payload: (char, lines):
116 - Payload: (char, lines):
117 - Character to use as node's symbol.
117 - Character to use as node's symbol.
118 - List of lines to display as the node's text.
118 - List of lines to display as the node's text.
119 - Edges; a list of (col, next_col) indicating the edges between
119 - Edges; a list of (col, next_col) indicating the edges between
120 the current node and its parents.
120 the current node and its parents.
121 - Number of columns (ongoing edges) in the current revision.
121 - Number of columns (ongoing edges) in the current revision.
122 - The difference between the number of columns (ongoing edges)
122 - The difference between the number of columns (ongoing edges)
123 in the next revision and the number of columns (ongoing edges)
123 in the next revision and the number of columns (ongoing edges)
124 in the current revision. That is: -1 means one column removed;
124 in the current revision. That is: -1 means one column removed;
125 0 means no columns added or removed; 1 means one column added.
125 0 means no columns added or removed; 1 means one column added.
126 """
126 """
127
127
128 idx, edges, ncols, coldiff = coldata
128 idx, edges, ncols, coldiff = coldata
129 assert -2 < coldiff < 2
129 assert -2 < coldiff < 2
130 if coldiff == -1:
130 if coldiff == -1:
131 # Transform
131 # Transform
132 #
132 #
133 # | | | | | |
133 # | | | | | |
134 # o | | into o---+
134 # o | | into o---+
135 # |X / |/ /
135 # |X / |/ /
136 # | | | |
136 # | | | |
137 fix_long_right_edges(edges)
137 fix_long_right_edges(edges)
138
138
139 # add_padding_line says whether to rewrite
139 # add_padding_line says whether to rewrite
140 #
140 #
141 # | | | | | | | |
141 # | | | | | | | |
142 # | o---+ into | o---+
142 # | o---+ into | o---+
143 # | / / | | | # <--- padding line
143 # | / / | | | # <--- padding line
144 # o | | | / /
144 # o | | | / /
145 # o | |
145 # o | |
146 add_padding_line = (len(text) > 2 and coldiff == -1 and
146 add_padding_line = (len(text) > 2 and coldiff == -1 and
147 [x for (x, y) in edges if x + 1 < y])
147 [x for (x, y) in edges if x + 1 < y])
148
148
149 # fix_nodeline_tail says whether to rewrite
149 # fix_nodeline_tail says whether to rewrite
150 #
150 #
151 # | | o | | | | o | |
151 # | | o | | | | o | |
152 # | | |/ / | | |/ /
152 # | | |/ / | | |/ /
153 # | o | | into | o / / # <--- fixed nodeline tail
153 # | o | | into | o / / # <--- fixed nodeline tail
154 # | |/ / | |/ /
154 # | |/ / | |/ /
155 # o | | o | |
155 # o | | o | |
156 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
156 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
157
157
158 # nodeline is the line containing the node character (typically o)
158 # nodeline is the line containing the node character (typically o)
159 nodeline = ["|", " "] * idx
159 nodeline = ["|", " "] * idx
160 nodeline.extend([char, " "])
160 nodeline.extend([char, " "])
161
161
162 nodeline.extend(
162 nodeline.extend(
163 get_nodeline_edges_tail(idx, state[1], ncols, coldiff,
163 get_nodeline_edges_tail(idx, state[1], ncols, coldiff,
164 state[0], fix_nodeline_tail))
164 state[0], fix_nodeline_tail))
165
165
166 # shift_interline is the line containing the non-vertical
166 # shift_interline is the line containing the non-vertical
167 # edges between this entry and the next
167 # edges between this entry and the next
168 shift_interline = ["|", " "] * idx
168 shift_interline = ["|", " "] * idx
169 if coldiff == -1:
169 if coldiff == -1:
170 n_spaces = 1
170 n_spaces = 1
171 edge_ch = "/"
171 edge_ch = "/"
172 elif coldiff == 0:
172 elif coldiff == 0:
173 n_spaces = 2
173 n_spaces = 2
174 edge_ch = "|"
174 edge_ch = "|"
175 else:
175 else:
176 n_spaces = 3
176 n_spaces = 3
177 edge_ch = "\\"
177 edge_ch = "\\"
178 shift_interline.extend(n_spaces * [" "])
178 shift_interline.extend(n_spaces * [" "])
179 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
179 shift_interline.extend([edge_ch, " "] * (ncols - idx - 1))
180
180
181 # draw edges from the current node to its parents
181 # draw edges from the current node to its parents
182 draw_edges(edges, nodeline, shift_interline)
182 draw_edges(edges, nodeline, shift_interline)
183
183
184 # lines is the list of all graph lines to print
184 # lines is the list of all graph lines to print
185 lines = [nodeline]
185 lines = [nodeline]
186 if add_padding_line:
186 if add_padding_line:
187 lines.append(get_padding_line(idx, ncols, edges))
187 lines.append(get_padding_line(idx, ncols, edges))
188 lines.append(shift_interline)
188 lines.append(shift_interline)
189
189
190 # make sure that there are as many graph lines as there are
190 # make sure that there are as many graph lines as there are
191 # log strings
191 # log strings
192 while len(text) < len(lines):
192 while len(text) < len(lines):
193 text.append("")
193 text.append("")
194 if len(lines) < len(text):
194 if len(lines) < len(text):
195 extra_interline = ["|", " "] * (ncols + coldiff)
195 extra_interline = ["|", " "] * (ncols + coldiff)
196 while len(lines) < len(text):
196 while len(lines) < len(text):
197 lines.append(extra_interline)
197 lines.append(extra_interline)
198
198
199 # print lines
199 # print lines
200 indentation_level = max(ncols, ncols + coldiff)
200 indentation_level = max(ncols, ncols + coldiff)
201 for (line, logstr) in zip(lines, text):
201 for (line, logstr) in zip(lines, text):
202 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
202 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
203 ui.write(ln.rstrip() + '\n')
203 ui.write(ln.rstrip() + '\n')
204
204
205 # ... and start over
205 # ... and start over
206 state[0] = coldiff
206 state[0] = coldiff
207 state[1] = idx
207 state[1] = idx
208
208
209 def get_revs(repo, rev_opt):
209 def get_revs(repo, rev_opt):
210 if rev_opt:
210 if rev_opt:
211 revs = revrange(repo, rev_opt)
211 revs = revrange(repo, rev_opt)
212 if len(revs) == 0:
212 if len(revs) == 0:
213 return (nullrev, nullrev)
213 return (nullrev, nullrev)
214 return (max(revs), min(revs))
214 return (max(revs), min(revs))
215 else:
215 else:
216 return (len(repo) - 1, 0)
216 return (len(repo) - 1, 0)
217
217
218 def check_unsupported_flags(opts):
218 def check_unsupported_flags(opts):
219 for op in ["follow", "follow_first", "date", "copies", "keyword", "remove",
219 for op in ["follow", "follow_first", "date", "copies", "keyword", "remove",
220 "only_merges", "user", "branch", "only_branch", "prune",
220 "only_merges", "user", "branch", "only_branch", "prune",
221 "newest_first", "no_merges", "include", "exclude"]:
221 "newest_first", "no_merges", "include", "exclude"]:
222 if op in opts and opts[op]:
222 if op in opts and opts[op]:
223 raise util.Abort(_("--graph option is incompatible with --%s")
223 raise util.Abort(_("--graph option is incompatible with --%s")
224 % op.replace("_", "-"))
224 % op.replace("_", "-"))
225
225
226 def generate(ui, dag, displayer, showparents, edgefn):
226 def generate(ui, dag, displayer, showparents, edgefn):
227 seen, state = [], asciistate()
227 seen, state = [], asciistate()
228 for rev, type, ctx, parents in dag:
228 for rev, type, ctx, parents in dag:
229 char = ctx.node() in showparents and '@' or 'o'
229 char = ctx.node() in showparents and '@' or 'o'
230 displayer.show(ctx)
230 displayer.show(ctx)
231 lines = displayer.hunk.pop(rev).split('\n')[:-1]
231 lines = displayer.hunk.pop(rev).split('\n')[:-1]
232 displayer.flush(rev)
232 displayer.flush(rev)
233 ascii(ui, state, type, char, lines, edgefn(seen, rev, parents))
233 ascii(ui, state, type, char, lines, edgefn(seen, rev, parents))
234 displayer.close()
234 displayer.close()
235
235
236 def graphlog(ui, repo, path=None, **opts):
236 def graphlog(ui, repo, path=None, **opts):
237 """show revision history alongside an ASCII revision graph
237 """show revision history alongside an ASCII revision graph
238
238
239 Print a revision history alongside a revision graph drawn with
239 Print a revision history alongside a revision graph drawn with
240 ASCII characters.
240 ASCII characters.
241
241
242 Nodes printed as an @ character are parents of the working
242 Nodes printed as an @ character are parents of the working
243 directory.
243 directory.
244 """
244 """
245
245
246 check_unsupported_flags(opts)
246 check_unsupported_flags(opts)
247 limit = cmdutil.loglimit(opts)
247 limit = cmdutil.loglimit(opts)
248 start, stop = get_revs(repo, opts["rev"])
248 start, stop = get_revs(repo, opts["rev"])
249 if start == nullrev:
249 if start == nullrev:
250 return
250 return
251
251
252 if path:
252 if path:
253 path = util.canonpath(repo.root, os.getcwd(), path)
253 path = scmutil.canonpath(repo.root, os.getcwd(), path)
254 if path: # could be reset in canonpath
254 if path: # could be reset in canonpath
255 revdag = graphmod.filerevs(repo, path, start, stop, limit)
255 revdag = graphmod.filerevs(repo, path, start, stop, limit)
256 else:
256 else:
257 if limit is not None:
257 if limit is not None:
258 stop = max(stop, start - limit + 1)
258 stop = max(stop, start - limit + 1)
259 revdag = graphmod.revisions(repo, start, stop)
259 revdag = graphmod.revisions(repo, start, stop)
260
260
261 displayer = show_changeset(ui, repo, opts, buffered=True)
261 displayer = show_changeset(ui, repo, opts, buffered=True)
262 showparents = [ctx.node() for ctx in repo[None].parents()]
262 showparents = [ctx.node() for ctx in repo[None].parents()]
263 generate(ui, revdag, displayer, showparents, asciiedges)
263 generate(ui, revdag, displayer, showparents, asciiedges)
264
264
265 def graphrevs(repo, nodes, opts):
265 def graphrevs(repo, nodes, opts):
266 limit = cmdutil.loglimit(opts)
266 limit = cmdutil.loglimit(opts)
267 nodes.reverse()
267 nodes.reverse()
268 if limit is not None:
268 if limit is not None:
269 nodes = nodes[:limit]
269 nodes = nodes[:limit]
270 return graphmod.nodes(repo, nodes)
270 return graphmod.nodes(repo, nodes)
271
271
272 def goutgoing(ui, repo, dest=None, **opts):
272 def goutgoing(ui, repo, dest=None, **opts):
273 """show the outgoing changesets alongside an ASCII revision graph
273 """show the outgoing changesets alongside an ASCII revision graph
274
274
275 Print the outgoing changesets alongside a revision graph drawn with
275 Print the outgoing changesets alongside a revision graph drawn with
276 ASCII characters.
276 ASCII characters.
277
277
278 Nodes printed as an @ character are parents of the working
278 Nodes printed as an @ character are parents of the working
279 directory.
279 directory.
280 """
280 """
281
281
282 check_unsupported_flags(opts)
282 check_unsupported_flags(opts)
283 o = hg._outgoing(ui, repo, dest, opts)
283 o = hg._outgoing(ui, repo, dest, opts)
284 if o is None:
284 if o is None:
285 return
285 return
286
286
287 revdag = graphrevs(repo, o, opts)
287 revdag = graphrevs(repo, o, opts)
288 displayer = show_changeset(ui, repo, opts, buffered=True)
288 displayer = show_changeset(ui, repo, opts, buffered=True)
289 showparents = [ctx.node() for ctx in repo[None].parents()]
289 showparents = [ctx.node() for ctx in repo[None].parents()]
290 generate(ui, revdag, displayer, showparents, asciiedges)
290 generate(ui, revdag, displayer, showparents, asciiedges)
291
291
292 def gincoming(ui, repo, source="default", **opts):
292 def gincoming(ui, repo, source="default", **opts):
293 """show the incoming changesets alongside an ASCII revision graph
293 """show the incoming changesets alongside an ASCII revision graph
294
294
295 Print the incoming changesets alongside a revision graph drawn with
295 Print the incoming changesets alongside a revision graph drawn with
296 ASCII characters.
296 ASCII characters.
297
297
298 Nodes printed as an @ character are parents of the working
298 Nodes printed as an @ character are parents of the working
299 directory.
299 directory.
300 """
300 """
301 def subreporecurse():
301 def subreporecurse():
302 return 1
302 return 1
303
303
304 check_unsupported_flags(opts)
304 check_unsupported_flags(opts)
305 def display(other, chlist, displayer):
305 def display(other, chlist, displayer):
306 revdag = graphrevs(other, chlist, opts)
306 revdag = graphrevs(other, chlist, opts)
307 showparents = [ctx.node() for ctx in repo[None].parents()]
307 showparents = [ctx.node() for ctx in repo[None].parents()]
308 generate(ui, revdag, displayer, showparents, asciiedges)
308 generate(ui, revdag, displayer, showparents, asciiedges)
309
309
310 hg._incoming(display, subreporecurse, ui, repo, source, opts, buffered=True)
310 hg._incoming(display, subreporecurse, ui, repo, source, opts, buffered=True)
311
311
312 def uisetup(ui):
312 def uisetup(ui):
313 '''Initialize the extension.'''
313 '''Initialize the extension.'''
314 _wrapcmd(ui, 'log', commands.table, graphlog)
314 _wrapcmd(ui, 'log', commands.table, graphlog)
315 _wrapcmd(ui, 'incoming', commands.table, gincoming)
315 _wrapcmd(ui, 'incoming', commands.table, gincoming)
316 _wrapcmd(ui, 'outgoing', commands.table, goutgoing)
316 _wrapcmd(ui, 'outgoing', commands.table, goutgoing)
317
317
318 def _wrapcmd(ui, cmd, table, wrapfn):
318 def _wrapcmd(ui, cmd, table, wrapfn):
319 '''wrap the command'''
319 '''wrap the command'''
320 def graph(orig, *args, **kwargs):
320 def graph(orig, *args, **kwargs):
321 if kwargs['graph']:
321 if kwargs['graph']:
322 try:
322 try:
323 return wrapfn(*args, **kwargs)
323 return wrapfn(*args, **kwargs)
324 except TypeError, e:
324 except TypeError, e:
325 if len(args) > wrapfn.func_code.co_argcount:
325 if len(args) > wrapfn.func_code.co_argcount:
326 raise util.Abort(_('--graph option allows at most one file'))
326 raise util.Abort(_('--graph option allows at most one file'))
327 raise
327 raise
328 return orig(*args, **kwargs)
328 return orig(*args, **kwargs)
329 entry = extensions.wrapcommand(table, cmd, graph)
329 entry = extensions.wrapcommand(table, cmd, graph)
330 entry[1].append(('G', 'graph', None, _("show the revision DAG")))
330 entry[1].append(('G', 'graph', None, _("show the revision DAG")))
331
331
332 cmdtable = {
332 cmdtable = {
333 "glog":
333 "glog":
334 (graphlog,
334 (graphlog,
335 [('l', 'limit', '',
335 [('l', 'limit', '',
336 _('limit number of changes displayed'), _('NUM')),
336 _('limit number of changes displayed'), _('NUM')),
337 ('p', 'patch', False, _('show patch')),
337 ('p', 'patch', False, _('show patch')),
338 ('r', 'rev', [],
338 ('r', 'rev', [],
339 _('show the specified revision or range'), _('REV')),
339 _('show the specified revision or range'), _('REV')),
340 ] + templateopts,
340 ] + templateopts,
341 _('hg glog [OPTION]... [FILE]')),
341 _('hg glog [OPTION]... [FILE]')),
342 }
342 }
@@ -1,694 +1,695 b''
1 # keyword.py - $Keyword$ expansion for Mercurial
1 # keyword.py - $Keyword$ expansion for Mercurial
2 #
2 #
3 # Copyright 2007-2010 Christian Ebert <blacktrash@gmx.net>
3 # Copyright 2007-2010 Christian Ebert <blacktrash@gmx.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # $Id$
8 # $Id$
9 #
9 #
10 # Keyword expansion hack against the grain of a DSCM
10 # Keyword expansion hack against the grain of a DSCM
11 #
11 #
12 # There are many good reasons why this is not needed in a distributed
12 # There are many good reasons why this is not needed in a distributed
13 # SCM, still it may be useful in very small projects based on single
13 # SCM, still it may be useful in very small projects based on single
14 # files (like LaTeX packages), that are mostly addressed to an
14 # files (like LaTeX packages), that are mostly addressed to an
15 # audience not running a version control system.
15 # audience not running a version control system.
16 #
16 #
17 # For in-depth discussion refer to
17 # For in-depth discussion refer to
18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
19 #
19 #
20 # Keyword expansion is based on Mercurial's changeset template mappings.
20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 #
21 #
22 # Binary files are not touched.
22 # Binary files are not touched.
23 #
23 #
24 # Files to act upon/ignore are specified in the [keyword] section.
24 # Files to act upon/ignore are specified in the [keyword] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
26 #
26 #
27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28
28
29 '''expand keywords in tracked files
29 '''expand keywords in tracked files
30
30
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 tracked text files selected by your configuration.
32 tracked text files selected by your configuration.
33
33
34 Keywords are only expanded in local repositories and not stored in the
34 Keywords are only expanded in local repositories and not stored in the
35 change history. The mechanism can be regarded as a convenience for the
35 change history. The mechanism can be regarded as a convenience for the
36 current user or for archive distribution.
36 current user or for archive distribution.
37
37
38 Keywords expand to the changeset data pertaining to the latest change
38 Keywords expand to the changeset data pertaining to the latest change
39 relative to the working directory parent of each file.
39 relative to the working directory parent of each file.
40
40
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 sections of hgrc files.
42 sections of hgrc files.
43
43
44 Example::
44 Example::
45
45
46 [keyword]
46 [keyword]
47 # expand keywords in every python file except those matching "x*"
47 # expand keywords in every python file except those matching "x*"
48 **.py =
48 **.py =
49 x* = ignore
49 x* = ignore
50
50
51 [keywordset]
51 [keywordset]
52 # prefer svn- over cvs-like default keywordmaps
52 # prefer svn- over cvs-like default keywordmaps
53 svn = True
53 svn = True
54
54
55 .. note::
55 .. note::
56 The more specific you are in your filename patterns the less you
56 The more specific you are in your filename patterns the less you
57 lose speed in huge repositories.
57 lose speed in huge repositories.
58
58
59 For [keywordmaps] template mapping and expansion demonstration and
59 For [keywordmaps] template mapping and expansion demonstration and
60 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
60 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 available templates and filters.
61 available templates and filters.
62
62
63 Three additional date template filters are provided:
63 Three additional date template filters are provided:
64
64
65 :``utcdate``: "2006/09/18 15:13:13"
65 :``utcdate``: "2006/09/18 15:13:13"
66 :``svnutcdate``: "2006-09-18 15:13:13Z"
66 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
67 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68
68
69 The default template mappings (view with :hg:`kwdemo -d`) can be
69 The default template mappings (view with :hg:`kwdemo -d`) can be
70 replaced with customized keywords and templates. Again, run
70 replaced with customized keywords and templates. Again, run
71 :hg:`kwdemo` to control the results of your configuration changes.
71 :hg:`kwdemo` to control the results of your configuration changes.
72
72
73 Before changing/disabling active keywords, you must run :hg:`kwshrink`
73 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 to avoid storing expanded keywords in the change history.
74 to avoid storing expanded keywords in the change history.
75
75
76 To force expansion after enabling it, or a configuration change, run
76 To force expansion after enabling it, or a configuration change, run
77 :hg:`kwexpand`.
77 :hg:`kwexpand`.
78
78
79 Expansions spanning more than one line and incremental expansions,
79 Expansions spanning more than one line and incremental expansions,
80 like CVS' $Log$, are not supported. A keyword template map "Log =
80 like CVS' $Log$, are not supported. A keyword template map "Log =
81 {desc}" expands to the first line of the changeset description.
81 {desc}" expands to the first line of the changeset description.
82 '''
82 '''
83
83
84 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
84 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
85 from mercurial import localrepo, match, patch, templatefilters, templater, util
85 from mercurial import localrepo, match, patch, templatefilters, templater, util
86 from mercurial import scmutil
86 from mercurial.hgweb import webcommands
87 from mercurial.hgweb import webcommands
87 from mercurial.i18n import _
88 from mercurial.i18n import _
88 import os, re, shutil, tempfile
89 import os, re, shutil, tempfile
89
90
90 commands.optionalrepo += ' kwdemo'
91 commands.optionalrepo += ' kwdemo'
91
92
92 # hg commands that do not act on keywords
93 # hg commands that do not act on keywords
93 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
94 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
94 ' outgoing push tip verify convert email glog')
95 ' outgoing push tip verify convert email glog')
95
96
96 # hg commands that trigger expansion only when writing to working dir,
97 # hg commands that trigger expansion only when writing to working dir,
97 # not when reading filelog, and unexpand when reading from working dir
98 # not when reading filelog, and unexpand when reading from working dir
98 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
99 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
99
100
100 # names of extensions using dorecord
101 # names of extensions using dorecord
101 recordextensions = 'record'
102 recordextensions = 'record'
102
103
103 colortable = {
104 colortable = {
104 'kwfiles.enabled': 'green bold',
105 'kwfiles.enabled': 'green bold',
105 'kwfiles.deleted': 'cyan bold underline',
106 'kwfiles.deleted': 'cyan bold underline',
106 'kwfiles.enabledunknown': 'green',
107 'kwfiles.enabledunknown': 'green',
107 'kwfiles.ignored': 'bold',
108 'kwfiles.ignored': 'bold',
108 'kwfiles.ignoredunknown': 'none'
109 'kwfiles.ignoredunknown': 'none'
109 }
110 }
110
111
111 # date like in cvs' $Date
112 # date like in cvs' $Date
112 def utcdate(text):
113 def utcdate(text):
113 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
114 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
114 '''
115 '''
115 return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S')
116 return util.datestr((text[0], 0), '%Y/%m/%d %H:%M:%S')
116 # date like in svn's $Date
117 # date like in svn's $Date
117 def svnisodate(text):
118 def svnisodate(text):
118 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
119 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
119 +0200 (Tue, 18 Aug 2009)".
120 +0200 (Tue, 18 Aug 2009)".
120 '''
121 '''
121 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
122 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
122 # date like in svn's $Id
123 # date like in svn's $Id
123 def svnutcdate(text):
124 def svnutcdate(text):
124 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
125 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
125 11:00:13Z".
126 11:00:13Z".
126 '''
127 '''
127 return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ')
128 return util.datestr((text[0], 0), '%Y-%m-%d %H:%M:%SZ')
128
129
129 templatefilters.filters.update({'utcdate': utcdate,
130 templatefilters.filters.update({'utcdate': utcdate,
130 'svnisodate': svnisodate,
131 'svnisodate': svnisodate,
131 'svnutcdate': svnutcdate})
132 'svnutcdate': svnutcdate})
132
133
133 # make keyword tools accessible
134 # make keyword tools accessible
134 kwtools = {'templater': None, 'hgcmd': ''}
135 kwtools = {'templater': None, 'hgcmd': ''}
135
136
136 def _defaultkwmaps(ui):
137 def _defaultkwmaps(ui):
137 '''Returns default keywordmaps according to keywordset configuration.'''
138 '''Returns default keywordmaps according to keywordset configuration.'''
138 templates = {
139 templates = {
139 'Revision': '{node|short}',
140 'Revision': '{node|short}',
140 'Author': '{author|user}',
141 'Author': '{author|user}',
141 }
142 }
142 kwsets = ({
143 kwsets = ({
143 'Date': '{date|utcdate}',
144 'Date': '{date|utcdate}',
144 'RCSfile': '{file|basename},v',
145 'RCSfile': '{file|basename},v',
145 'RCSFile': '{file|basename},v', # kept for backwards compatibility
146 'RCSFile': '{file|basename},v', # kept for backwards compatibility
146 # with hg-keyword
147 # with hg-keyword
147 'Source': '{root}/{file},v',
148 'Source': '{root}/{file},v',
148 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
149 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
149 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
150 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
150 }, {
151 }, {
151 'Date': '{date|svnisodate}',
152 'Date': '{date|svnisodate}',
152 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
153 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
153 'LastChangedRevision': '{node|short}',
154 'LastChangedRevision': '{node|short}',
154 'LastChangedBy': '{author|user}',
155 'LastChangedBy': '{author|user}',
155 'LastChangedDate': '{date|svnisodate}',
156 'LastChangedDate': '{date|svnisodate}',
156 })
157 })
157 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
158 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
158 return templates
159 return templates
159
160
160 def _shrinktext(text, subfunc):
161 def _shrinktext(text, subfunc):
161 '''Helper for keyword expansion removal in text.
162 '''Helper for keyword expansion removal in text.
162 Depending on subfunc also returns number of substitutions.'''
163 Depending on subfunc also returns number of substitutions.'''
163 return subfunc(r'$\1$', text)
164 return subfunc(r'$\1$', text)
164
165
165 def _preselect(wstatus, changed):
166 def _preselect(wstatus, changed):
166 '''Retrieves modfied and added files from a working directory state
167 '''Retrieves modfied and added files from a working directory state
167 and returns the subset of each contained in given changed files
168 and returns the subset of each contained in given changed files
168 retrieved from a change context.'''
169 retrieved from a change context.'''
169 modified, added = wstatus[:2]
170 modified, added = wstatus[:2]
170 modified = [f for f in modified if f in changed]
171 modified = [f for f in modified if f in changed]
171 added = [f for f in added if f in changed]
172 added = [f for f in added if f in changed]
172 return modified, added
173 return modified, added
173
174
174
175
175 class kwtemplater(object):
176 class kwtemplater(object):
176 '''
177 '''
177 Sets up keyword templates, corresponding keyword regex, and
178 Sets up keyword templates, corresponding keyword regex, and
178 provides keyword substitution functions.
179 provides keyword substitution functions.
179 '''
180 '''
180
181
181 def __init__(self, ui, repo, inc, exc):
182 def __init__(self, ui, repo, inc, exc):
182 self.ui = ui
183 self.ui = ui
183 self.repo = repo
184 self.repo = repo
184 self.match = match.match(repo.root, '', [], inc, exc)
185 self.match = match.match(repo.root, '', [], inc, exc)
185 self.restrict = kwtools['hgcmd'] in restricted.split()
186 self.restrict = kwtools['hgcmd'] in restricted.split()
186 self.record = False
187 self.record = False
187
188
188 kwmaps = self.ui.configitems('keywordmaps')
189 kwmaps = self.ui.configitems('keywordmaps')
189 if kwmaps: # override default templates
190 if kwmaps: # override default templates
190 self.templates = dict((k, templater.parsestring(v, False))
191 self.templates = dict((k, templater.parsestring(v, False))
191 for k, v in kwmaps)
192 for k, v in kwmaps)
192 else:
193 else:
193 self.templates = _defaultkwmaps(self.ui)
194 self.templates = _defaultkwmaps(self.ui)
194
195
195 @util.propertycache
196 @util.propertycache
196 def escape(self):
197 def escape(self):
197 '''Returns bar-separated and escaped keywords.'''
198 '''Returns bar-separated and escaped keywords.'''
198 return '|'.join(map(re.escape, self.templates.keys()))
199 return '|'.join(map(re.escape, self.templates.keys()))
199
200
200 @util.propertycache
201 @util.propertycache
201 def rekw(self):
202 def rekw(self):
202 '''Returns regex for unexpanded keywords.'''
203 '''Returns regex for unexpanded keywords.'''
203 return re.compile(r'\$(%s)\$' % self.escape)
204 return re.compile(r'\$(%s)\$' % self.escape)
204
205
205 @util.propertycache
206 @util.propertycache
206 def rekwexp(self):
207 def rekwexp(self):
207 '''Returns regex for expanded keywords.'''
208 '''Returns regex for expanded keywords.'''
208 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
209 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
209
210
210 def substitute(self, data, path, ctx, subfunc):
211 def substitute(self, data, path, ctx, subfunc):
211 '''Replaces keywords in data with expanded template.'''
212 '''Replaces keywords in data with expanded template.'''
212 def kwsub(mobj):
213 def kwsub(mobj):
213 kw = mobj.group(1)
214 kw = mobj.group(1)
214 ct = cmdutil.changeset_templater(self.ui, self.repo,
215 ct = cmdutil.changeset_templater(self.ui, self.repo,
215 False, None, '', False)
216 False, None, '', False)
216 ct.use_template(self.templates[kw])
217 ct.use_template(self.templates[kw])
217 self.ui.pushbuffer()
218 self.ui.pushbuffer()
218 ct.show(ctx, root=self.repo.root, file=path)
219 ct.show(ctx, root=self.repo.root, file=path)
219 ekw = templatefilters.firstline(self.ui.popbuffer())
220 ekw = templatefilters.firstline(self.ui.popbuffer())
220 return '$%s: %s $' % (kw, ekw)
221 return '$%s: %s $' % (kw, ekw)
221 return subfunc(kwsub, data)
222 return subfunc(kwsub, data)
222
223
223 def linkctx(self, path, fileid):
224 def linkctx(self, path, fileid):
224 '''Similar to filelog.linkrev, but returns a changectx.'''
225 '''Similar to filelog.linkrev, but returns a changectx.'''
225 return self.repo.filectx(path, fileid=fileid).changectx()
226 return self.repo.filectx(path, fileid=fileid).changectx()
226
227
227 def expand(self, path, node, data):
228 def expand(self, path, node, data):
228 '''Returns data with keywords expanded.'''
229 '''Returns data with keywords expanded.'''
229 if not self.restrict and self.match(path) and not util.binary(data):
230 if not self.restrict and self.match(path) and not util.binary(data):
230 ctx = self.linkctx(path, node)
231 ctx = self.linkctx(path, node)
231 return self.substitute(data, path, ctx, self.rekw.sub)
232 return self.substitute(data, path, ctx, self.rekw.sub)
232 return data
233 return data
233
234
234 def iskwfile(self, cand, ctx):
235 def iskwfile(self, cand, ctx):
235 '''Returns subset of candidates which are configured for keyword
236 '''Returns subset of candidates which are configured for keyword
236 expansion are not symbolic links.'''
237 expansion are not symbolic links.'''
237 return [f for f in cand if self.match(f) and not 'l' in ctx.flags(f)]
238 return [f for f in cand if self.match(f) and not 'l' in ctx.flags(f)]
238
239
239 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
240 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
240 '''Overwrites selected files expanding/shrinking keywords.'''
241 '''Overwrites selected files expanding/shrinking keywords.'''
241 if self.restrict or lookup or self.record: # exclude kw_copy
242 if self.restrict or lookup or self.record: # exclude kw_copy
242 candidates = self.iskwfile(candidates, ctx)
243 candidates = self.iskwfile(candidates, ctx)
243 if not candidates:
244 if not candidates:
244 return
245 return
245 kwcmd = self.restrict and lookup # kwexpand/kwshrink
246 kwcmd = self.restrict and lookup # kwexpand/kwshrink
246 if self.restrict or expand and lookup:
247 if self.restrict or expand and lookup:
247 mf = ctx.manifest()
248 mf = ctx.manifest()
248 lctx = ctx
249 lctx = ctx
249 re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp
250 re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp
250 msg = (expand and _('overwriting %s expanding keywords\n')
251 msg = (expand and _('overwriting %s expanding keywords\n')
251 or _('overwriting %s shrinking keywords\n'))
252 or _('overwriting %s shrinking keywords\n'))
252 for f in candidates:
253 for f in candidates:
253 if self.restrict:
254 if self.restrict:
254 data = self.repo.file(f).read(mf[f])
255 data = self.repo.file(f).read(mf[f])
255 else:
256 else:
256 data = self.repo.wread(f)
257 data = self.repo.wread(f)
257 if util.binary(data):
258 if util.binary(data):
258 continue
259 continue
259 if expand:
260 if expand:
260 if lookup:
261 if lookup:
261 lctx = self.linkctx(f, mf[f])
262 lctx = self.linkctx(f, mf[f])
262 data, found = self.substitute(data, f, lctx, re_kw.subn)
263 data, found = self.substitute(data, f, lctx, re_kw.subn)
263 elif self.restrict:
264 elif self.restrict:
264 found = re_kw.search(data)
265 found = re_kw.search(data)
265 else:
266 else:
266 data, found = _shrinktext(data, re_kw.subn)
267 data, found = _shrinktext(data, re_kw.subn)
267 if found:
268 if found:
268 self.ui.note(msg % f)
269 self.ui.note(msg % f)
269 self.repo.wwrite(f, data, ctx.flags(f))
270 self.repo.wwrite(f, data, ctx.flags(f))
270 if kwcmd:
271 if kwcmd:
271 self.repo.dirstate.normal(f)
272 self.repo.dirstate.normal(f)
272 elif self.record:
273 elif self.record:
273 self.repo.dirstate.normallookup(f)
274 self.repo.dirstate.normallookup(f)
274
275
275 def shrink(self, fname, text):
276 def shrink(self, fname, text):
276 '''Returns text with all keyword substitutions removed.'''
277 '''Returns text with all keyword substitutions removed.'''
277 if self.match(fname) and not util.binary(text):
278 if self.match(fname) and not util.binary(text):
278 return _shrinktext(text, self.rekwexp.sub)
279 return _shrinktext(text, self.rekwexp.sub)
279 return text
280 return text
280
281
281 def shrinklines(self, fname, lines):
282 def shrinklines(self, fname, lines):
282 '''Returns lines with keyword substitutions removed.'''
283 '''Returns lines with keyword substitutions removed.'''
283 if self.match(fname):
284 if self.match(fname):
284 text = ''.join(lines)
285 text = ''.join(lines)
285 if not util.binary(text):
286 if not util.binary(text):
286 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
287 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
287 return lines
288 return lines
288
289
289 def wread(self, fname, data):
290 def wread(self, fname, data):
290 '''If in restricted mode returns data read from wdir with
291 '''If in restricted mode returns data read from wdir with
291 keyword substitutions removed.'''
292 keyword substitutions removed.'''
292 return self.restrict and self.shrink(fname, data) or data
293 return self.restrict and self.shrink(fname, data) or data
293
294
294 class kwfilelog(filelog.filelog):
295 class kwfilelog(filelog.filelog):
295 '''
296 '''
296 Subclass of filelog to hook into its read, add, cmp methods.
297 Subclass of filelog to hook into its read, add, cmp methods.
297 Keywords are "stored" unexpanded, and processed on reading.
298 Keywords are "stored" unexpanded, and processed on reading.
298 '''
299 '''
299 def __init__(self, opener, kwt, path):
300 def __init__(self, opener, kwt, path):
300 super(kwfilelog, self).__init__(opener, path)
301 super(kwfilelog, self).__init__(opener, path)
301 self.kwt = kwt
302 self.kwt = kwt
302 self.path = path
303 self.path = path
303
304
304 def read(self, node):
305 def read(self, node):
305 '''Expands keywords when reading filelog.'''
306 '''Expands keywords when reading filelog.'''
306 data = super(kwfilelog, self).read(node)
307 data = super(kwfilelog, self).read(node)
307 if self.renamed(node):
308 if self.renamed(node):
308 return data
309 return data
309 return self.kwt.expand(self.path, node, data)
310 return self.kwt.expand(self.path, node, data)
310
311
311 def add(self, text, meta, tr, link, p1=None, p2=None):
312 def add(self, text, meta, tr, link, p1=None, p2=None):
312 '''Removes keyword substitutions when adding to filelog.'''
313 '''Removes keyword substitutions when adding to filelog.'''
313 text = self.kwt.shrink(self.path, text)
314 text = self.kwt.shrink(self.path, text)
314 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
315 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
315
316
316 def cmp(self, node, text):
317 def cmp(self, node, text):
317 '''Removes keyword substitutions for comparison.'''
318 '''Removes keyword substitutions for comparison.'''
318 text = self.kwt.shrink(self.path, text)
319 text = self.kwt.shrink(self.path, text)
319 return super(kwfilelog, self).cmp(node, text)
320 return super(kwfilelog, self).cmp(node, text)
320
321
321 def _status(ui, repo, kwt, *pats, **opts):
322 def _status(ui, repo, kwt, *pats, **opts):
322 '''Bails out if [keyword] configuration is not active.
323 '''Bails out if [keyword] configuration is not active.
323 Returns status of working directory.'''
324 Returns status of working directory.'''
324 if kwt:
325 if kwt:
325 return repo.status(match=cmdutil.match(repo, pats, opts), clean=True,
326 return repo.status(match=cmdutil.match(repo, pats, opts), clean=True,
326 unknown=opts.get('unknown') or opts.get('all'))
327 unknown=opts.get('unknown') or opts.get('all'))
327 if ui.configitems('keyword'):
328 if ui.configitems('keyword'):
328 raise util.Abort(_('[keyword] patterns cannot match'))
329 raise util.Abort(_('[keyword] patterns cannot match'))
329 raise util.Abort(_('no [keyword] patterns configured'))
330 raise util.Abort(_('no [keyword] patterns configured'))
330
331
331 def _kwfwrite(ui, repo, expand, *pats, **opts):
332 def _kwfwrite(ui, repo, expand, *pats, **opts):
332 '''Selects files and passes them to kwtemplater.overwrite.'''
333 '''Selects files and passes them to kwtemplater.overwrite.'''
333 wctx = repo[None]
334 wctx = repo[None]
334 if len(wctx.parents()) > 1:
335 if len(wctx.parents()) > 1:
335 raise util.Abort(_('outstanding uncommitted merge'))
336 raise util.Abort(_('outstanding uncommitted merge'))
336 kwt = kwtools['templater']
337 kwt = kwtools['templater']
337 wlock = repo.wlock()
338 wlock = repo.wlock()
338 try:
339 try:
339 status = _status(ui, repo, kwt, *pats, **opts)
340 status = _status(ui, repo, kwt, *pats, **opts)
340 modified, added, removed, deleted, unknown, ignored, clean = status
341 modified, added, removed, deleted, unknown, ignored, clean = status
341 if modified or added or removed or deleted:
342 if modified or added or removed or deleted:
342 raise util.Abort(_('outstanding uncommitted changes'))
343 raise util.Abort(_('outstanding uncommitted changes'))
343 kwt.overwrite(wctx, clean, True, expand)
344 kwt.overwrite(wctx, clean, True, expand)
344 finally:
345 finally:
345 wlock.release()
346 wlock.release()
346
347
347 def demo(ui, repo, *args, **opts):
348 def demo(ui, repo, *args, **opts):
348 '''print [keywordmaps] configuration and an expansion example
349 '''print [keywordmaps] configuration and an expansion example
349
350
350 Show current, custom, or default keyword template maps and their
351 Show current, custom, or default keyword template maps and their
351 expansions.
352 expansions.
352
353
353 Extend the current configuration by specifying maps as arguments
354 Extend the current configuration by specifying maps as arguments
354 and using -f/--rcfile to source an external hgrc file.
355 and using -f/--rcfile to source an external hgrc file.
355
356
356 Use -d/--default to disable current configuration.
357 Use -d/--default to disable current configuration.
357
358
358 See :hg:`help templates` for information on templates and filters.
359 See :hg:`help templates` for information on templates and filters.
359 '''
360 '''
360 def demoitems(section, items):
361 def demoitems(section, items):
361 ui.write('[%s]\n' % section)
362 ui.write('[%s]\n' % section)
362 for k, v in sorted(items):
363 for k, v in sorted(items):
363 ui.write('%s = %s\n' % (k, v))
364 ui.write('%s = %s\n' % (k, v))
364
365
365 fn = 'demo.txt'
366 fn = 'demo.txt'
366 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
367 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
367 ui.note(_('creating temporary repository at %s\n') % tmpdir)
368 ui.note(_('creating temporary repository at %s\n') % tmpdir)
368 repo = localrepo.localrepository(ui, tmpdir, True)
369 repo = localrepo.localrepository(ui, tmpdir, True)
369 ui.setconfig('keyword', fn, '')
370 ui.setconfig('keyword', fn, '')
370 svn = ui.configbool('keywordset', 'svn')
371 svn = ui.configbool('keywordset', 'svn')
371 # explicitly set keywordset for demo output
372 # explicitly set keywordset for demo output
372 ui.setconfig('keywordset', 'svn', svn)
373 ui.setconfig('keywordset', 'svn', svn)
373
374
374 uikwmaps = ui.configitems('keywordmaps')
375 uikwmaps = ui.configitems('keywordmaps')
375 if args or opts.get('rcfile'):
376 if args or opts.get('rcfile'):
376 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
377 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
377 if uikwmaps:
378 if uikwmaps:
378 ui.status(_('\textending current template maps\n'))
379 ui.status(_('\textending current template maps\n'))
379 if opts.get('default') or not uikwmaps:
380 if opts.get('default') or not uikwmaps:
380 if svn:
381 if svn:
381 ui.status(_('\toverriding default svn keywordset\n'))
382 ui.status(_('\toverriding default svn keywordset\n'))
382 else:
383 else:
383 ui.status(_('\toverriding default cvs keywordset\n'))
384 ui.status(_('\toverriding default cvs keywordset\n'))
384 if opts.get('rcfile'):
385 if opts.get('rcfile'):
385 ui.readconfig(opts.get('rcfile'))
386 ui.readconfig(opts.get('rcfile'))
386 if args:
387 if args:
387 # simulate hgrc parsing
388 # simulate hgrc parsing
388 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
389 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
389 fp = repo.opener('hgrc', 'w')
390 fp = repo.opener('hgrc', 'w')
390 fp.writelines(rcmaps)
391 fp.writelines(rcmaps)
391 fp.close()
392 fp.close()
392 ui.readconfig(repo.join('hgrc'))
393 ui.readconfig(repo.join('hgrc'))
393 kwmaps = dict(ui.configitems('keywordmaps'))
394 kwmaps = dict(ui.configitems('keywordmaps'))
394 elif opts.get('default'):
395 elif opts.get('default'):
395 if svn:
396 if svn:
396 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
397 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
397 else:
398 else:
398 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
399 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
399 kwmaps = _defaultkwmaps(ui)
400 kwmaps = _defaultkwmaps(ui)
400 if uikwmaps:
401 if uikwmaps:
401 ui.status(_('\tdisabling current template maps\n'))
402 ui.status(_('\tdisabling current template maps\n'))
402 for k, v in kwmaps.iteritems():
403 for k, v in kwmaps.iteritems():
403 ui.setconfig('keywordmaps', k, v)
404 ui.setconfig('keywordmaps', k, v)
404 else:
405 else:
405 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
406 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
406 kwmaps = dict(uikwmaps) or _defaultkwmaps(ui)
407 kwmaps = dict(uikwmaps) or _defaultkwmaps(ui)
407
408
408 uisetup(ui)
409 uisetup(ui)
409 reposetup(ui, repo)
410 reposetup(ui, repo)
410 ui.write('[extensions]\nkeyword =\n')
411 ui.write('[extensions]\nkeyword =\n')
411 demoitems('keyword', ui.configitems('keyword'))
412 demoitems('keyword', ui.configitems('keyword'))
412 demoitems('keywordset', ui.configitems('keywordset'))
413 demoitems('keywordset', ui.configitems('keywordset'))
413 demoitems('keywordmaps', kwmaps.iteritems())
414 demoitems('keywordmaps', kwmaps.iteritems())
414 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
415 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
415 repo.wopener(fn, 'w').write(keywords)
416 repo.wopener(fn, 'w').write(keywords)
416 repo[None].add([fn])
417 repo[None].add([fn])
417 ui.note(_('\nkeywords written to %s:\n') % fn)
418 ui.note(_('\nkeywords written to %s:\n') % fn)
418 ui.note(keywords)
419 ui.note(keywords)
419 repo.dirstate.setbranch('demobranch')
420 repo.dirstate.setbranch('demobranch')
420 for name, cmd in ui.configitems('hooks'):
421 for name, cmd in ui.configitems('hooks'):
421 if name.split('.', 1)[0].find('commit') > -1:
422 if name.split('.', 1)[0].find('commit') > -1:
422 repo.ui.setconfig('hooks', name, '')
423 repo.ui.setconfig('hooks', name, '')
423 msg = _('hg keyword configuration and expansion example')
424 msg = _('hg keyword configuration and expansion example')
424 ui.note("hg ci -m '%s'\n" % msg)
425 ui.note("hg ci -m '%s'\n" % msg)
425 repo.commit(text=msg)
426 repo.commit(text=msg)
426 ui.status(_('\n\tkeywords expanded\n'))
427 ui.status(_('\n\tkeywords expanded\n'))
427 ui.write(repo.wread(fn))
428 ui.write(repo.wread(fn))
428 shutil.rmtree(tmpdir, ignore_errors=True)
429 shutil.rmtree(tmpdir, ignore_errors=True)
429
430
430 def expand(ui, repo, *pats, **opts):
431 def expand(ui, repo, *pats, **opts):
431 '''expand keywords in the working directory
432 '''expand keywords in the working directory
432
433
433 Run after (re)enabling keyword expansion.
434 Run after (re)enabling keyword expansion.
434
435
435 kwexpand refuses to run if given files contain local changes.
436 kwexpand refuses to run if given files contain local changes.
436 '''
437 '''
437 # 3rd argument sets expansion to True
438 # 3rd argument sets expansion to True
438 _kwfwrite(ui, repo, True, *pats, **opts)
439 _kwfwrite(ui, repo, True, *pats, **opts)
439
440
440 def files(ui, repo, *pats, **opts):
441 def files(ui, repo, *pats, **opts):
441 '''show files configured for keyword expansion
442 '''show files configured for keyword expansion
442
443
443 List which files in the working directory are matched by the
444 List which files in the working directory are matched by the
444 [keyword] configuration patterns.
445 [keyword] configuration patterns.
445
446
446 Useful to prevent inadvertent keyword expansion and to speed up
447 Useful to prevent inadvertent keyword expansion and to speed up
447 execution by including only files that are actual candidates for
448 execution by including only files that are actual candidates for
448 expansion.
449 expansion.
449
450
450 See :hg:`help keyword` on how to construct patterns both for
451 See :hg:`help keyword` on how to construct patterns both for
451 inclusion and exclusion of files.
452 inclusion and exclusion of files.
452
453
453 With -A/--all and -v/--verbose the codes used to show the status
454 With -A/--all and -v/--verbose the codes used to show the status
454 of files are::
455 of files are::
455
456
456 K = keyword expansion candidate
457 K = keyword expansion candidate
457 k = keyword expansion candidate (not tracked)
458 k = keyword expansion candidate (not tracked)
458 I = ignored
459 I = ignored
459 i = ignored (not tracked)
460 i = ignored (not tracked)
460 '''
461 '''
461 kwt = kwtools['templater']
462 kwt = kwtools['templater']
462 status = _status(ui, repo, kwt, *pats, **opts)
463 status = _status(ui, repo, kwt, *pats, **opts)
463 cwd = pats and repo.getcwd() or ''
464 cwd = pats and repo.getcwd() or ''
464 modified, added, removed, deleted, unknown, ignored, clean = status
465 modified, added, removed, deleted, unknown, ignored, clean = status
465 files = []
466 files = []
466 if not opts.get('unknown') or opts.get('all'):
467 if not opts.get('unknown') or opts.get('all'):
467 files = sorted(modified + added + clean)
468 files = sorted(modified + added + clean)
468 wctx = repo[None]
469 wctx = repo[None]
469 kwfiles = kwt.iskwfile(files, wctx)
470 kwfiles = kwt.iskwfile(files, wctx)
470 kwdeleted = kwt.iskwfile(deleted, wctx)
471 kwdeleted = kwt.iskwfile(deleted, wctx)
471 kwunknown = kwt.iskwfile(unknown, wctx)
472 kwunknown = kwt.iskwfile(unknown, wctx)
472 if not opts.get('ignore') or opts.get('all'):
473 if not opts.get('ignore') or opts.get('all'):
473 showfiles = kwfiles, kwdeleted, kwunknown
474 showfiles = kwfiles, kwdeleted, kwunknown
474 else:
475 else:
475 showfiles = [], [], []
476 showfiles = [], [], []
476 if opts.get('all') or opts.get('ignore'):
477 if opts.get('all') or opts.get('ignore'):
477 showfiles += ([f for f in files if f not in kwfiles],
478 showfiles += ([f for f in files if f not in kwfiles],
478 [f for f in unknown if f not in kwunknown])
479 [f for f in unknown if f not in kwunknown])
479 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
480 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
480 kwstates = zip('K!kIi', showfiles, kwlabels)
481 kwstates = zip('K!kIi', showfiles, kwlabels)
481 for char, filenames, kwstate in kwstates:
482 for char, filenames, kwstate in kwstates:
482 fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
483 fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
483 for f in filenames:
484 for f in filenames:
484 ui.write(fmt % repo.pathto(f, cwd), label='kwfiles.' + kwstate)
485 ui.write(fmt % repo.pathto(f, cwd), label='kwfiles.' + kwstate)
485
486
486 def shrink(ui, repo, *pats, **opts):
487 def shrink(ui, repo, *pats, **opts):
487 '''revert expanded keywords in the working directory
488 '''revert expanded keywords in the working directory
488
489
489 Must be run before changing/disabling active keywords.
490 Must be run before changing/disabling active keywords.
490
491
491 kwshrink refuses to run if given files contain local changes.
492 kwshrink refuses to run if given files contain local changes.
492 '''
493 '''
493 # 3rd argument sets expansion to False
494 # 3rd argument sets expansion to False
494 _kwfwrite(ui, repo, False, *pats, **opts)
495 _kwfwrite(ui, repo, False, *pats, **opts)
495
496
496
497
497 def uisetup(ui):
498 def uisetup(ui):
498 ''' Monkeypatches dispatch._parse to retrieve user command.'''
499 ''' Monkeypatches dispatch._parse to retrieve user command.'''
499
500
500 def kwdispatch_parse(orig, ui, args):
501 def kwdispatch_parse(orig, ui, args):
501 '''Monkeypatch dispatch._parse to obtain running hg command.'''
502 '''Monkeypatch dispatch._parse to obtain running hg command.'''
502 cmd, func, args, options, cmdoptions = orig(ui, args)
503 cmd, func, args, options, cmdoptions = orig(ui, args)
503 kwtools['hgcmd'] = cmd
504 kwtools['hgcmd'] = cmd
504 return cmd, func, args, options, cmdoptions
505 return cmd, func, args, options, cmdoptions
505
506
506 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
507 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
507
508
508 def reposetup(ui, repo):
509 def reposetup(ui, repo):
509 '''Sets up repo as kwrepo for keyword substitution.
510 '''Sets up repo as kwrepo for keyword substitution.
510 Overrides file method to return kwfilelog instead of filelog
511 Overrides file method to return kwfilelog instead of filelog
511 if file matches user configuration.
512 if file matches user configuration.
512 Wraps commit to overwrite configured files with updated
513 Wraps commit to overwrite configured files with updated
513 keyword substitutions.
514 keyword substitutions.
514 Monkeypatches patch and webcommands.'''
515 Monkeypatches patch and webcommands.'''
515
516
516 try:
517 try:
517 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
518 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
518 or '.hg' in util.splitpath(repo.root)
519 or '.hg' in util.splitpath(repo.root)
519 or repo._url.startswith('bundle:')):
520 or repo._url.startswith('bundle:')):
520 return
521 return
521 except AttributeError:
522 except AttributeError:
522 pass
523 pass
523
524
524 inc, exc = [], ['.hg*']
525 inc, exc = [], ['.hg*']
525 for pat, opt in ui.configitems('keyword'):
526 for pat, opt in ui.configitems('keyword'):
526 if opt != 'ignore':
527 if opt != 'ignore':
527 inc.append(pat)
528 inc.append(pat)
528 else:
529 else:
529 exc.append(pat)
530 exc.append(pat)
530 if not inc:
531 if not inc:
531 return
532 return
532
533
533 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
534 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
534
535
535 class kwrepo(repo.__class__):
536 class kwrepo(repo.__class__):
536 def file(self, f):
537 def file(self, f):
537 if f[0] == '/':
538 if f[0] == '/':
538 f = f[1:]
539 f = f[1:]
539 return kwfilelog(self.sopener, kwt, f)
540 return kwfilelog(self.sopener, kwt, f)
540
541
541 def wread(self, filename):
542 def wread(self, filename):
542 data = super(kwrepo, self).wread(filename)
543 data = super(kwrepo, self).wread(filename)
543 return kwt.wread(filename, data)
544 return kwt.wread(filename, data)
544
545
545 def commit(self, *args, **opts):
546 def commit(self, *args, **opts):
546 # use custom commitctx for user commands
547 # use custom commitctx for user commands
547 # other extensions can still wrap repo.commitctx directly
548 # other extensions can still wrap repo.commitctx directly
548 self.commitctx = self.kwcommitctx
549 self.commitctx = self.kwcommitctx
549 try:
550 try:
550 return super(kwrepo, self).commit(*args, **opts)
551 return super(kwrepo, self).commit(*args, **opts)
551 finally:
552 finally:
552 del self.commitctx
553 del self.commitctx
553
554
554 def kwcommitctx(self, ctx, error=False):
555 def kwcommitctx(self, ctx, error=False):
555 n = super(kwrepo, self).commitctx(ctx, error)
556 n = super(kwrepo, self).commitctx(ctx, error)
556 # no lock needed, only called from repo.commit() which already locks
557 # no lock needed, only called from repo.commit() which already locks
557 if not kwt.record:
558 if not kwt.record:
558 restrict = kwt.restrict
559 restrict = kwt.restrict
559 kwt.restrict = True
560 kwt.restrict = True
560 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
561 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
561 False, True)
562 False, True)
562 kwt.restrict = restrict
563 kwt.restrict = restrict
563 return n
564 return n
564
565
565 def rollback(self, dryrun=False):
566 def rollback(self, dryrun=False):
566 wlock = self.wlock()
567 wlock = self.wlock()
567 try:
568 try:
568 if not dryrun:
569 if not dryrun:
569 changed = self['.'].files()
570 changed = self['.'].files()
570 ret = super(kwrepo, self).rollback(dryrun)
571 ret = super(kwrepo, self).rollback(dryrun)
571 if not dryrun:
572 if not dryrun:
572 ctx = self['.']
573 ctx = self['.']
573 modified, added = _preselect(self[None].status(), changed)
574 modified, added = _preselect(self[None].status(), changed)
574 kwt.overwrite(ctx, modified, True, True)
575 kwt.overwrite(ctx, modified, True, True)
575 kwt.overwrite(ctx, added, True, False)
576 kwt.overwrite(ctx, added, True, False)
576 return ret
577 return ret
577 finally:
578 finally:
578 wlock.release()
579 wlock.release()
579
580
580 # monkeypatches
581 # monkeypatches
581 def kwpatchfile_init(orig, self, ui, fname, opener,
582 def kwpatchfile_init(orig, self, ui, fname, opener,
582 missing=False, eolmode=None):
583 missing=False, eolmode=None):
583 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
584 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
584 rejects or conflicts due to expanded keywords in working dir.'''
585 rejects or conflicts due to expanded keywords in working dir.'''
585 orig(self, ui, fname, opener, missing, eolmode)
586 orig(self, ui, fname, opener, missing, eolmode)
586 # shrink keywords read from working dir
587 # shrink keywords read from working dir
587 self.lines = kwt.shrinklines(self.fname, self.lines)
588 self.lines = kwt.shrinklines(self.fname, self.lines)
588
589
589 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
590 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
590 opts=None, prefix=''):
591 opts=None, prefix=''):
591 '''Monkeypatch patch.diff to avoid expansion.'''
592 '''Monkeypatch patch.diff to avoid expansion.'''
592 kwt.restrict = True
593 kwt.restrict = True
593 return orig(repo, node1, node2, match, changes, opts, prefix)
594 return orig(repo, node1, node2, match, changes, opts, prefix)
594
595
595 def kwweb_skip(orig, web, req, tmpl):
596 def kwweb_skip(orig, web, req, tmpl):
596 '''Wraps webcommands.x turning off keyword expansion.'''
597 '''Wraps webcommands.x turning off keyword expansion.'''
597 kwt.match = util.never
598 kwt.match = util.never
598 return orig(web, req, tmpl)
599 return orig(web, req, tmpl)
599
600
600 def kw_copy(orig, ui, repo, pats, opts, rename=False):
601 def kw_copy(orig, ui, repo, pats, opts, rename=False):
601 '''Wraps cmdutil.copy so that copy/rename destinations do not
602 '''Wraps cmdutil.copy so that copy/rename destinations do not
602 contain expanded keywords.
603 contain expanded keywords.
603 Note that the source of a regular file destination may also be a
604 Note that the source of a regular file destination may also be a
604 symlink:
605 symlink:
605 hg cp sym x -> x is symlink
606 hg cp sym x -> x is symlink
606 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
607 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
607 For the latter we have to follow the symlink to find out whether its
608 For the latter we have to follow the symlink to find out whether its
608 target is configured for expansion and we therefore must unexpand the
609 target is configured for expansion and we therefore must unexpand the
609 keywords in the destination.'''
610 keywords in the destination.'''
610 orig(ui, repo, pats, opts, rename)
611 orig(ui, repo, pats, opts, rename)
611 if opts.get('dry_run'):
612 if opts.get('dry_run'):
612 return
613 return
613 wctx = repo[None]
614 wctx = repo[None]
614 cwd = repo.getcwd()
615 cwd = repo.getcwd()
615
616
616 def haskwsource(dest):
617 def haskwsource(dest):
617 '''Returns true if dest is a regular file and configured for
618 '''Returns true if dest is a regular file and configured for
618 expansion or a symlink which points to a file configured for
619 expansion or a symlink which points to a file configured for
619 expansion. '''
620 expansion. '''
620 source = repo.dirstate.copied(dest)
621 source = repo.dirstate.copied(dest)
621 if 'l' in wctx.flags(source):
622 if 'l' in wctx.flags(source):
622 source = util.canonpath(repo.root, cwd,
623 source = scmutil.canonpath(repo.root, cwd,
623 os.path.realpath(source))
624 os.path.realpath(source))
624 return kwt.match(source)
625 return kwt.match(source)
625
626
626 candidates = [f for f in repo.dirstate.copies() if
627 candidates = [f for f in repo.dirstate.copies() if
627 not 'l' in wctx.flags(f) and haskwsource(f)]
628 not 'l' in wctx.flags(f) and haskwsource(f)]
628 kwt.overwrite(wctx, candidates, False, False)
629 kwt.overwrite(wctx, candidates, False, False)
629
630
630 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
631 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
631 '''Wraps record.dorecord expanding keywords after recording.'''
632 '''Wraps record.dorecord expanding keywords after recording.'''
632 wlock = repo.wlock()
633 wlock = repo.wlock()
633 try:
634 try:
634 # record returns 0 even when nothing has changed
635 # record returns 0 even when nothing has changed
635 # therefore compare nodes before and after
636 # therefore compare nodes before and after
636 kwt.record = True
637 kwt.record = True
637 ctx = repo['.']
638 ctx = repo['.']
638 wstatus = repo[None].status()
639 wstatus = repo[None].status()
639 ret = orig(ui, repo, commitfunc, *pats, **opts)
640 ret = orig(ui, repo, commitfunc, *pats, **opts)
640 recctx = repo['.']
641 recctx = repo['.']
641 if ctx != recctx:
642 if ctx != recctx:
642 modified, added = _preselect(wstatus, recctx.files())
643 modified, added = _preselect(wstatus, recctx.files())
643 kwt.restrict = False
644 kwt.restrict = False
644 kwt.overwrite(recctx, modified, False, True)
645 kwt.overwrite(recctx, modified, False, True)
645 kwt.overwrite(recctx, added, False, True, True)
646 kwt.overwrite(recctx, added, False, True, True)
646 kwt.restrict = True
647 kwt.restrict = True
647 return ret
648 return ret
648 finally:
649 finally:
649 wlock.release()
650 wlock.release()
650
651
651 def kwfilectx_cmp(orig, self, fctx):
652 def kwfilectx_cmp(orig, self, fctx):
652 # keyword affects data size, comparing wdir and filelog size does
653 # keyword affects data size, comparing wdir and filelog size does
653 # not make sense
654 # not make sense
654 if (fctx._filerev is None and
655 if (fctx._filerev is None and
655 (self._repo._encodefilterpats or
656 (self._repo._encodefilterpats or
656 kwt.match(fctx.path()) and not 'l' in fctx.flags()) or
657 kwt.match(fctx.path()) and not 'l' in fctx.flags()) or
657 self.size() == fctx.size()):
658 self.size() == fctx.size()):
658 return self._filelog.cmp(self._filenode, fctx.data())
659 return self._filelog.cmp(self._filenode, fctx.data())
659 return True
660 return True
660
661
661 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
662 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
662 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
663 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
663 extensions.wrapfunction(patch, 'diff', kw_diff)
664 extensions.wrapfunction(patch, 'diff', kw_diff)
664 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
665 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
665 for c in 'annotate changeset rev filediff diff'.split():
666 for c in 'annotate changeset rev filediff diff'.split():
666 extensions.wrapfunction(webcommands, c, kwweb_skip)
667 extensions.wrapfunction(webcommands, c, kwweb_skip)
667 for name in recordextensions.split():
668 for name in recordextensions.split():
668 try:
669 try:
669 record = extensions.find(name)
670 record = extensions.find(name)
670 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
671 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
671 except KeyError:
672 except KeyError:
672 pass
673 pass
673
674
674 repo.__class__ = kwrepo
675 repo.__class__ = kwrepo
675
676
676 cmdtable = {
677 cmdtable = {
677 'kwdemo':
678 'kwdemo':
678 (demo,
679 (demo,
679 [('d', 'default', None, _('show default keyword template maps')),
680 [('d', 'default', None, _('show default keyword template maps')),
680 ('f', 'rcfile', '',
681 ('f', 'rcfile', '',
681 _('read maps from rcfile'), _('FILE'))],
682 _('read maps from rcfile'), _('FILE'))],
682 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...')),
683 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...')),
683 'kwexpand': (expand, commands.walkopts,
684 'kwexpand': (expand, commands.walkopts,
684 _('hg kwexpand [OPTION]... [FILE]...')),
685 _('hg kwexpand [OPTION]... [FILE]...')),
685 'kwfiles':
686 'kwfiles':
686 (files,
687 (files,
687 [('A', 'all', None, _('show keyword status flags of all files')),
688 [('A', 'all', None, _('show keyword status flags of all files')),
688 ('i', 'ignore', None, _('show files excluded from expansion')),
689 ('i', 'ignore', None, _('show files excluded from expansion')),
689 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
690 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
690 ] + commands.walkopts,
691 ] + commands.walkopts,
691 _('hg kwfiles [OPTION]... [FILE]...')),
692 _('hg kwfiles [OPTION]... [FILE]...')),
692 'kwshrink': (shrink, commands.walkopts,
693 'kwshrink': (shrink, commands.walkopts,
693 _('hg kwshrink [OPTION]... [FILE]...')),
694 _('hg kwshrink [OPTION]... [FILE]...')),
694 }
695 }
@@ -1,1391 +1,1391 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, glob, tempfile
10 import os, sys, errno, re, glob, tempfile
11 import util, scmutil, templater, patch, error, encoding, templatekw
11 import util, scmutil, templater, patch, error, encoding, templatekw
12 import match as matchmod
12 import match as matchmod
13 import similar, revset, subrepo
13 import similar, revset, subrepo
14
14
15 revrangesep = ':'
15 revrangesep = ':'
16
16
17 def parsealiases(cmd):
17 def parsealiases(cmd):
18 return cmd.lstrip("^").split("|")
18 return cmd.lstrip("^").split("|")
19
19
20 def findpossible(cmd, table, strict=False):
20 def findpossible(cmd, table, strict=False):
21 """
21 """
22 Return cmd -> (aliases, command table entry)
22 Return cmd -> (aliases, command table entry)
23 for each matching command.
23 for each matching command.
24 Return debug commands (or their aliases) only if no normal command matches.
24 Return debug commands (or their aliases) only if no normal command matches.
25 """
25 """
26 choice = {}
26 choice = {}
27 debugchoice = {}
27 debugchoice = {}
28 for e in table.keys():
28 for e in table.keys():
29 aliases = parsealiases(e)
29 aliases = parsealiases(e)
30 found = None
30 found = None
31 if cmd in aliases:
31 if cmd in aliases:
32 found = cmd
32 found = cmd
33 elif not strict:
33 elif not strict:
34 for a in aliases:
34 for a in aliases:
35 if a.startswith(cmd):
35 if a.startswith(cmd):
36 found = a
36 found = a
37 break
37 break
38 if found is not None:
38 if found is not None:
39 if aliases[0].startswith("debug") or found.startswith("debug"):
39 if aliases[0].startswith("debug") or found.startswith("debug"):
40 debugchoice[found] = (aliases, table[e])
40 debugchoice[found] = (aliases, table[e])
41 else:
41 else:
42 choice[found] = (aliases, table[e])
42 choice[found] = (aliases, table[e])
43
43
44 if not choice and debugchoice:
44 if not choice and debugchoice:
45 choice = debugchoice
45 choice = debugchoice
46
46
47 return choice
47 return choice
48
48
49 def findcmd(cmd, table, strict=True):
49 def findcmd(cmd, table, strict=True):
50 """Return (aliases, command table entry) for command string."""
50 """Return (aliases, command table entry) for command string."""
51 choice = findpossible(cmd, table, strict)
51 choice = findpossible(cmd, table, strict)
52
52
53 if cmd in choice:
53 if cmd in choice:
54 return choice[cmd]
54 return choice[cmd]
55
55
56 if len(choice) > 1:
56 if len(choice) > 1:
57 clist = choice.keys()
57 clist = choice.keys()
58 clist.sort()
58 clist.sort()
59 raise error.AmbiguousCommand(cmd, clist)
59 raise error.AmbiguousCommand(cmd, clist)
60
60
61 if choice:
61 if choice:
62 return choice.values()[0]
62 return choice.values()[0]
63
63
64 raise error.UnknownCommand(cmd)
64 raise error.UnknownCommand(cmd)
65
65
66 def findrepo(p):
66 def findrepo(p):
67 while not os.path.isdir(os.path.join(p, ".hg")):
67 while not os.path.isdir(os.path.join(p, ".hg")):
68 oldp, p = p, os.path.dirname(p)
68 oldp, p = p, os.path.dirname(p)
69 if p == oldp:
69 if p == oldp:
70 return None
70 return None
71
71
72 return p
72 return p
73
73
74 def bail_if_changed(repo):
74 def bail_if_changed(repo):
75 if repo.dirstate.p2() != nullid:
75 if repo.dirstate.p2() != nullid:
76 raise util.Abort(_('outstanding uncommitted merge'))
76 raise util.Abort(_('outstanding uncommitted merge'))
77 modified, added, removed, deleted = repo.status()[:4]
77 modified, added, removed, deleted = repo.status()[:4]
78 if modified or added or removed or deleted:
78 if modified or added or removed or deleted:
79 raise util.Abort(_("outstanding uncommitted changes"))
79 raise util.Abort(_("outstanding uncommitted changes"))
80
80
81 def logmessage(opts):
81 def logmessage(opts):
82 """ get the log message according to -m and -l option """
82 """ get the log message according to -m and -l option """
83 message = opts.get('message')
83 message = opts.get('message')
84 logfile = opts.get('logfile')
84 logfile = opts.get('logfile')
85
85
86 if message and logfile:
86 if message and logfile:
87 raise util.Abort(_('options --message and --logfile are mutually '
87 raise util.Abort(_('options --message and --logfile are mutually '
88 'exclusive'))
88 'exclusive'))
89 if not message and logfile:
89 if not message and logfile:
90 try:
90 try:
91 if logfile == '-':
91 if logfile == '-':
92 message = sys.stdin.read()
92 message = sys.stdin.read()
93 else:
93 else:
94 message = open(logfile).read()
94 message = open(logfile).read()
95 except IOError, inst:
95 except IOError, inst:
96 raise util.Abort(_("can't read commit message '%s': %s") %
96 raise util.Abort(_("can't read commit message '%s': %s") %
97 (logfile, inst.strerror))
97 (logfile, inst.strerror))
98 return message
98 return message
99
99
100 def loglimit(opts):
100 def loglimit(opts):
101 """get the log limit according to option -l/--limit"""
101 """get the log limit according to option -l/--limit"""
102 limit = opts.get('limit')
102 limit = opts.get('limit')
103 if limit:
103 if limit:
104 try:
104 try:
105 limit = int(limit)
105 limit = int(limit)
106 except ValueError:
106 except ValueError:
107 raise util.Abort(_('limit must be a positive integer'))
107 raise util.Abort(_('limit must be a positive integer'))
108 if limit <= 0:
108 if limit <= 0:
109 raise util.Abort(_('limit must be positive'))
109 raise util.Abort(_('limit must be positive'))
110 else:
110 else:
111 limit = None
111 limit = None
112 return limit
112 return limit
113
113
114 def revsingle(repo, revspec, default='.'):
114 def revsingle(repo, revspec, default='.'):
115 if not revspec:
115 if not revspec:
116 return repo[default]
116 return repo[default]
117
117
118 l = revrange(repo, [revspec])
118 l = revrange(repo, [revspec])
119 if len(l) < 1:
119 if len(l) < 1:
120 raise util.Abort(_('empty revision set'))
120 raise util.Abort(_('empty revision set'))
121 return repo[l[-1]]
121 return repo[l[-1]]
122
122
123 def revpair(repo, revs):
123 def revpair(repo, revs):
124 if not revs:
124 if not revs:
125 return repo.dirstate.p1(), None
125 return repo.dirstate.p1(), None
126
126
127 l = revrange(repo, revs)
127 l = revrange(repo, revs)
128
128
129 if len(l) == 0:
129 if len(l) == 0:
130 return repo.dirstate.p1(), None
130 return repo.dirstate.p1(), None
131
131
132 if len(l) == 1:
132 if len(l) == 1:
133 return repo.lookup(l[0]), None
133 return repo.lookup(l[0]), None
134
134
135 return repo.lookup(l[0]), repo.lookup(l[-1])
135 return repo.lookup(l[0]), repo.lookup(l[-1])
136
136
137 def revrange(repo, revs):
137 def revrange(repo, revs):
138 """Yield revision as strings from a list of revision specifications."""
138 """Yield revision as strings from a list of revision specifications."""
139
139
140 def revfix(repo, val, defval):
140 def revfix(repo, val, defval):
141 if not val and val != 0 and defval is not None:
141 if not val and val != 0 and defval is not None:
142 return defval
142 return defval
143 return repo.changelog.rev(repo.lookup(val))
143 return repo.changelog.rev(repo.lookup(val))
144
144
145 seen, l = set(), []
145 seen, l = set(), []
146 for spec in revs:
146 for spec in revs:
147 # attempt to parse old-style ranges first to deal with
147 # attempt to parse old-style ranges first to deal with
148 # things like old-tag which contain query metacharacters
148 # things like old-tag which contain query metacharacters
149 try:
149 try:
150 if isinstance(spec, int):
150 if isinstance(spec, int):
151 seen.add(spec)
151 seen.add(spec)
152 l.append(spec)
152 l.append(spec)
153 continue
153 continue
154
154
155 if revrangesep in spec:
155 if revrangesep in spec:
156 start, end = spec.split(revrangesep, 1)
156 start, end = spec.split(revrangesep, 1)
157 start = revfix(repo, start, 0)
157 start = revfix(repo, start, 0)
158 end = revfix(repo, end, len(repo) - 1)
158 end = revfix(repo, end, len(repo) - 1)
159 step = start > end and -1 or 1
159 step = start > end and -1 or 1
160 for rev in xrange(start, end + step, step):
160 for rev in xrange(start, end + step, step):
161 if rev in seen:
161 if rev in seen:
162 continue
162 continue
163 seen.add(rev)
163 seen.add(rev)
164 l.append(rev)
164 l.append(rev)
165 continue
165 continue
166 elif spec and spec in repo: # single unquoted rev
166 elif spec and spec in repo: # single unquoted rev
167 rev = revfix(repo, spec, None)
167 rev = revfix(repo, spec, None)
168 if rev in seen:
168 if rev in seen:
169 continue
169 continue
170 seen.add(rev)
170 seen.add(rev)
171 l.append(rev)
171 l.append(rev)
172 continue
172 continue
173 except error.RepoLookupError:
173 except error.RepoLookupError:
174 pass
174 pass
175
175
176 # fall through to new-style queries if old-style fails
176 # fall through to new-style queries if old-style fails
177 m = revset.match(spec)
177 m = revset.match(spec)
178 for r in m(repo, range(len(repo))):
178 for r in m(repo, range(len(repo))):
179 if r not in seen:
179 if r not in seen:
180 l.append(r)
180 l.append(r)
181 seen.update(l)
181 seen.update(l)
182
182
183 return l
183 return l
184
184
185 def make_filename(repo, pat, node,
185 def make_filename(repo, pat, node,
186 total=None, seqno=None, revwidth=None, pathname=None):
186 total=None, seqno=None, revwidth=None, pathname=None):
187 node_expander = {
187 node_expander = {
188 'H': lambda: hex(node),
188 'H': lambda: hex(node),
189 'R': lambda: str(repo.changelog.rev(node)),
189 'R': lambda: str(repo.changelog.rev(node)),
190 'h': lambda: short(node),
190 'h': lambda: short(node),
191 }
191 }
192 expander = {
192 expander = {
193 '%': lambda: '%',
193 '%': lambda: '%',
194 'b': lambda: os.path.basename(repo.root),
194 'b': lambda: os.path.basename(repo.root),
195 }
195 }
196
196
197 try:
197 try:
198 if node:
198 if node:
199 expander.update(node_expander)
199 expander.update(node_expander)
200 if node:
200 if node:
201 expander['r'] = (lambda:
201 expander['r'] = (lambda:
202 str(repo.changelog.rev(node)).zfill(revwidth or 0))
202 str(repo.changelog.rev(node)).zfill(revwidth or 0))
203 if total is not None:
203 if total is not None:
204 expander['N'] = lambda: str(total)
204 expander['N'] = lambda: str(total)
205 if seqno is not None:
205 if seqno is not None:
206 expander['n'] = lambda: str(seqno)
206 expander['n'] = lambda: str(seqno)
207 if total is not None and seqno is not None:
207 if total is not None and seqno is not None:
208 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
208 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
209 if pathname is not None:
209 if pathname is not None:
210 expander['s'] = lambda: os.path.basename(pathname)
210 expander['s'] = lambda: os.path.basename(pathname)
211 expander['d'] = lambda: os.path.dirname(pathname) or '.'
211 expander['d'] = lambda: os.path.dirname(pathname) or '.'
212 expander['p'] = lambda: pathname
212 expander['p'] = lambda: pathname
213
213
214 newname = []
214 newname = []
215 patlen = len(pat)
215 patlen = len(pat)
216 i = 0
216 i = 0
217 while i < patlen:
217 while i < patlen:
218 c = pat[i]
218 c = pat[i]
219 if c == '%':
219 if c == '%':
220 i += 1
220 i += 1
221 c = pat[i]
221 c = pat[i]
222 c = expander[c]()
222 c = expander[c]()
223 newname.append(c)
223 newname.append(c)
224 i += 1
224 i += 1
225 return ''.join(newname)
225 return ''.join(newname)
226 except KeyError, inst:
226 except KeyError, inst:
227 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
227 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
228 inst.args[0])
228 inst.args[0])
229
229
230 def make_file(repo, pat, node=None,
230 def make_file(repo, pat, node=None,
231 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
231 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
232
232
233 writable = mode not in ('r', 'rb')
233 writable = mode not in ('r', 'rb')
234
234
235 if not pat or pat == '-':
235 if not pat or pat == '-':
236 fp = writable and sys.stdout or sys.stdin
236 fp = writable and sys.stdout or sys.stdin
237 return os.fdopen(os.dup(fp.fileno()), mode)
237 return os.fdopen(os.dup(fp.fileno()), mode)
238 if hasattr(pat, 'write') and writable:
238 if hasattr(pat, 'write') and writable:
239 return pat
239 return pat
240 if hasattr(pat, 'read') and 'r' in mode:
240 if hasattr(pat, 'read') and 'r' in mode:
241 return pat
241 return pat
242 return open(make_filename(repo, pat, node, total, seqno, revwidth,
242 return open(make_filename(repo, pat, node, total, seqno, revwidth,
243 pathname),
243 pathname),
244 mode)
244 mode)
245
245
246 def expandpats(pats):
246 def expandpats(pats):
247 if not util.expandglobs:
247 if not util.expandglobs:
248 return list(pats)
248 return list(pats)
249 ret = []
249 ret = []
250 for p in pats:
250 for p in pats:
251 kind, name = matchmod._patsplit(p, None)
251 kind, name = matchmod._patsplit(p, None)
252 if kind is None:
252 if kind is None:
253 try:
253 try:
254 globbed = glob.glob(name)
254 globbed = glob.glob(name)
255 except re.error:
255 except re.error:
256 globbed = [name]
256 globbed = [name]
257 if globbed:
257 if globbed:
258 ret.extend(globbed)
258 ret.extend(globbed)
259 continue
259 continue
260 ret.append(p)
260 ret.append(p)
261 return ret
261 return ret
262
262
263 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
263 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
264 if pats == ("",):
264 if pats == ("",):
265 pats = []
265 pats = []
266 if not globbed and default == 'relpath':
266 if not globbed and default == 'relpath':
267 pats = expandpats(pats or [])
267 pats = expandpats(pats or [])
268 m = matchmod.match(repo.root, repo.getcwd(), pats,
268 m = matchmod.match(repo.root, repo.getcwd(), pats,
269 opts.get('include'), opts.get('exclude'), default,
269 opts.get('include'), opts.get('exclude'), default,
270 auditor=repo.auditor)
270 auditor=repo.auditor)
271 def badfn(f, msg):
271 def badfn(f, msg):
272 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
272 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
273 m.bad = badfn
273 m.bad = badfn
274 return m
274 return m
275
275
276 def matchall(repo):
276 def matchall(repo):
277 return matchmod.always(repo.root, repo.getcwd())
277 return matchmod.always(repo.root, repo.getcwd())
278
278
279 def matchfiles(repo, files):
279 def matchfiles(repo, files):
280 return matchmod.exact(repo.root, repo.getcwd(), files)
280 return matchmod.exact(repo.root, repo.getcwd(), files)
281
281
282 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
282 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
283 if dry_run is None:
283 if dry_run is None:
284 dry_run = opts.get('dry_run')
284 dry_run = opts.get('dry_run')
285 if similarity is None:
285 if similarity is None:
286 similarity = float(opts.get('similarity') or 0)
286 similarity = float(opts.get('similarity') or 0)
287 # we'd use status here, except handling of symlinks and ignore is tricky
287 # we'd use status here, except handling of symlinks and ignore is tricky
288 added, unknown, deleted, removed = [], [], [], []
288 added, unknown, deleted, removed = [], [], [], []
289 audit_path = util.path_auditor(repo.root)
289 audit_path = util.path_auditor(repo.root)
290 m = match(repo, pats, opts)
290 m = match(repo, pats, opts)
291 for abs in repo.walk(m):
291 for abs in repo.walk(m):
292 target = repo.wjoin(abs)
292 target = repo.wjoin(abs)
293 good = True
293 good = True
294 try:
294 try:
295 audit_path(abs)
295 audit_path(abs)
296 except:
296 except:
297 good = False
297 good = False
298 rel = m.rel(abs)
298 rel = m.rel(abs)
299 exact = m.exact(abs)
299 exact = m.exact(abs)
300 if good and abs not in repo.dirstate:
300 if good and abs not in repo.dirstate:
301 unknown.append(abs)
301 unknown.append(abs)
302 if repo.ui.verbose or not exact:
302 if repo.ui.verbose or not exact:
303 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
303 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
304 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
304 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
305 or (os.path.isdir(target) and not os.path.islink(target))):
305 or (os.path.isdir(target) and not os.path.islink(target))):
306 deleted.append(abs)
306 deleted.append(abs)
307 if repo.ui.verbose or not exact:
307 if repo.ui.verbose or not exact:
308 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
308 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
309 # for finding renames
309 # for finding renames
310 elif repo.dirstate[abs] == 'r':
310 elif repo.dirstate[abs] == 'r':
311 removed.append(abs)
311 removed.append(abs)
312 elif repo.dirstate[abs] == 'a':
312 elif repo.dirstate[abs] == 'a':
313 added.append(abs)
313 added.append(abs)
314 copies = {}
314 copies = {}
315 if similarity > 0:
315 if similarity > 0:
316 for old, new, score in similar.findrenames(repo,
316 for old, new, score in similar.findrenames(repo,
317 added + unknown, removed + deleted, similarity):
317 added + unknown, removed + deleted, similarity):
318 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
318 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
319 repo.ui.status(_('recording removal of %s as rename to %s '
319 repo.ui.status(_('recording removal of %s as rename to %s '
320 '(%d%% similar)\n') %
320 '(%d%% similar)\n') %
321 (m.rel(old), m.rel(new), score * 100))
321 (m.rel(old), m.rel(new), score * 100))
322 copies[new] = old
322 copies[new] = old
323
323
324 if not dry_run:
324 if not dry_run:
325 wctx = repo[None]
325 wctx = repo[None]
326 wlock = repo.wlock()
326 wlock = repo.wlock()
327 try:
327 try:
328 wctx.remove(deleted)
328 wctx.remove(deleted)
329 wctx.add(unknown)
329 wctx.add(unknown)
330 for new, old in copies.iteritems():
330 for new, old in copies.iteritems():
331 wctx.copy(old, new)
331 wctx.copy(old, new)
332 finally:
332 finally:
333 wlock.release()
333 wlock.release()
334
334
335 def updatedir(ui, repo, patches, similarity=0):
335 def updatedir(ui, repo, patches, similarity=0):
336 '''Update dirstate after patch application according to metadata'''
336 '''Update dirstate after patch application according to metadata'''
337 if not patches:
337 if not patches:
338 return
338 return
339 copies = []
339 copies = []
340 removes = set()
340 removes = set()
341 cfiles = patches.keys()
341 cfiles = patches.keys()
342 cwd = repo.getcwd()
342 cwd = repo.getcwd()
343 if cwd:
343 if cwd:
344 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
344 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
345 for f in patches:
345 for f in patches:
346 gp = patches[f]
346 gp = patches[f]
347 if not gp:
347 if not gp:
348 continue
348 continue
349 if gp.op == 'RENAME':
349 if gp.op == 'RENAME':
350 copies.append((gp.oldpath, gp.path))
350 copies.append((gp.oldpath, gp.path))
351 removes.add(gp.oldpath)
351 removes.add(gp.oldpath)
352 elif gp.op == 'COPY':
352 elif gp.op == 'COPY':
353 copies.append((gp.oldpath, gp.path))
353 copies.append((gp.oldpath, gp.path))
354 elif gp.op == 'DELETE':
354 elif gp.op == 'DELETE':
355 removes.add(gp.path)
355 removes.add(gp.path)
356
356
357 wctx = repo[None]
357 wctx = repo[None]
358 for src, dst in copies:
358 for src, dst in copies:
359 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
359 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
360 if (not similarity) and removes:
360 if (not similarity) and removes:
361 wctx.remove(sorted(removes), True)
361 wctx.remove(sorted(removes), True)
362
362
363 for f in patches:
363 for f in patches:
364 gp = patches[f]
364 gp = patches[f]
365 if gp and gp.mode:
365 if gp and gp.mode:
366 islink, isexec = gp.mode
366 islink, isexec = gp.mode
367 dst = repo.wjoin(gp.path)
367 dst = repo.wjoin(gp.path)
368 # patch won't create empty files
368 # patch won't create empty files
369 if gp.op == 'ADD' and not os.path.lexists(dst):
369 if gp.op == 'ADD' and not os.path.lexists(dst):
370 flags = (isexec and 'x' or '') + (islink and 'l' or '')
370 flags = (isexec and 'x' or '') + (islink and 'l' or '')
371 repo.wwrite(gp.path, '', flags)
371 repo.wwrite(gp.path, '', flags)
372 util.set_flags(dst, islink, isexec)
372 util.set_flags(dst, islink, isexec)
373 addremove(repo, cfiles, similarity=similarity)
373 addremove(repo, cfiles, similarity=similarity)
374 files = patches.keys()
374 files = patches.keys()
375 files.extend([r for r in removes if r not in files])
375 files.extend([r for r in removes if r not in files])
376 return sorted(files)
376 return sorted(files)
377
377
378 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
378 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
379 """Update the dirstate to reflect the intent of copying src to dst. For
379 """Update the dirstate to reflect the intent of copying src to dst. For
380 different reasons it might not end with dst being marked as copied from src.
380 different reasons it might not end with dst being marked as copied from src.
381 """
381 """
382 origsrc = repo.dirstate.copied(src) or src
382 origsrc = repo.dirstate.copied(src) or src
383 if dst == origsrc: # copying back a copy?
383 if dst == origsrc: # copying back a copy?
384 if repo.dirstate[dst] not in 'mn' and not dryrun:
384 if repo.dirstate[dst] not in 'mn' and not dryrun:
385 repo.dirstate.normallookup(dst)
385 repo.dirstate.normallookup(dst)
386 else:
386 else:
387 if repo.dirstate[origsrc] == 'a' and origsrc == src:
387 if repo.dirstate[origsrc] == 'a' and origsrc == src:
388 if not ui.quiet:
388 if not ui.quiet:
389 ui.warn(_("%s has not been committed yet, so no copy "
389 ui.warn(_("%s has not been committed yet, so no copy "
390 "data will be stored for %s.\n")
390 "data will be stored for %s.\n")
391 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
391 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
392 if repo.dirstate[dst] in '?r' and not dryrun:
392 if repo.dirstate[dst] in '?r' and not dryrun:
393 wctx.add([dst])
393 wctx.add([dst])
394 elif not dryrun:
394 elif not dryrun:
395 wctx.copy(origsrc, dst)
395 wctx.copy(origsrc, dst)
396
396
397 def copy(ui, repo, pats, opts, rename=False):
397 def copy(ui, repo, pats, opts, rename=False):
398 # called with the repo lock held
398 # called with the repo lock held
399 #
399 #
400 # hgsep => pathname that uses "/" to separate directories
400 # hgsep => pathname that uses "/" to separate directories
401 # ossep => pathname that uses os.sep to separate directories
401 # ossep => pathname that uses os.sep to separate directories
402 cwd = repo.getcwd()
402 cwd = repo.getcwd()
403 targets = {}
403 targets = {}
404 after = opts.get("after")
404 after = opts.get("after")
405 dryrun = opts.get("dry_run")
405 dryrun = opts.get("dry_run")
406 wctx = repo[None]
406 wctx = repo[None]
407
407
408 def walkpat(pat):
408 def walkpat(pat):
409 srcs = []
409 srcs = []
410 badstates = after and '?' or '?r'
410 badstates = after and '?' or '?r'
411 m = match(repo, [pat], opts, globbed=True)
411 m = match(repo, [pat], opts, globbed=True)
412 for abs in repo.walk(m):
412 for abs in repo.walk(m):
413 state = repo.dirstate[abs]
413 state = repo.dirstate[abs]
414 rel = m.rel(abs)
414 rel = m.rel(abs)
415 exact = m.exact(abs)
415 exact = m.exact(abs)
416 if state in badstates:
416 if state in badstates:
417 if exact and state == '?':
417 if exact and state == '?':
418 ui.warn(_('%s: not copying - file is not managed\n') % rel)
418 ui.warn(_('%s: not copying - file is not managed\n') % rel)
419 if exact and state == 'r':
419 if exact and state == 'r':
420 ui.warn(_('%s: not copying - file has been marked for'
420 ui.warn(_('%s: not copying - file has been marked for'
421 ' remove\n') % rel)
421 ' remove\n') % rel)
422 continue
422 continue
423 # abs: hgsep
423 # abs: hgsep
424 # rel: ossep
424 # rel: ossep
425 srcs.append((abs, rel, exact))
425 srcs.append((abs, rel, exact))
426 return srcs
426 return srcs
427
427
428 # abssrc: hgsep
428 # abssrc: hgsep
429 # relsrc: ossep
429 # relsrc: ossep
430 # otarget: ossep
430 # otarget: ossep
431 def copyfile(abssrc, relsrc, otarget, exact):
431 def copyfile(abssrc, relsrc, otarget, exact):
432 abstarget = util.canonpath(repo.root, cwd, otarget)
432 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
433 reltarget = repo.pathto(abstarget, cwd)
433 reltarget = repo.pathto(abstarget, cwd)
434 target = repo.wjoin(abstarget)
434 target = repo.wjoin(abstarget)
435 src = repo.wjoin(abssrc)
435 src = repo.wjoin(abssrc)
436 state = repo.dirstate[abstarget]
436 state = repo.dirstate[abstarget]
437
437
438 scmutil.checkportable(ui, abstarget)
438 scmutil.checkportable(ui, abstarget)
439
439
440 # check for collisions
440 # check for collisions
441 prevsrc = targets.get(abstarget)
441 prevsrc = targets.get(abstarget)
442 if prevsrc is not None:
442 if prevsrc is not None:
443 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
443 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
444 (reltarget, repo.pathto(abssrc, cwd),
444 (reltarget, repo.pathto(abssrc, cwd),
445 repo.pathto(prevsrc, cwd)))
445 repo.pathto(prevsrc, cwd)))
446 return
446 return
447
447
448 # check for overwrites
448 # check for overwrites
449 exists = os.path.lexists(target)
449 exists = os.path.lexists(target)
450 if not after and exists or after and state in 'mn':
450 if not after and exists or after and state in 'mn':
451 if not opts['force']:
451 if not opts['force']:
452 ui.warn(_('%s: not overwriting - file exists\n') %
452 ui.warn(_('%s: not overwriting - file exists\n') %
453 reltarget)
453 reltarget)
454 return
454 return
455
455
456 if after:
456 if after:
457 if not exists:
457 if not exists:
458 if rename:
458 if rename:
459 ui.warn(_('%s: not recording move - %s does not exist\n') %
459 ui.warn(_('%s: not recording move - %s does not exist\n') %
460 (relsrc, reltarget))
460 (relsrc, reltarget))
461 else:
461 else:
462 ui.warn(_('%s: not recording copy - %s does not exist\n') %
462 ui.warn(_('%s: not recording copy - %s does not exist\n') %
463 (relsrc, reltarget))
463 (relsrc, reltarget))
464 return
464 return
465 elif not dryrun:
465 elif not dryrun:
466 try:
466 try:
467 if exists:
467 if exists:
468 os.unlink(target)
468 os.unlink(target)
469 targetdir = os.path.dirname(target) or '.'
469 targetdir = os.path.dirname(target) or '.'
470 if not os.path.isdir(targetdir):
470 if not os.path.isdir(targetdir):
471 os.makedirs(targetdir)
471 os.makedirs(targetdir)
472 util.copyfile(src, target)
472 util.copyfile(src, target)
473 except IOError, inst:
473 except IOError, inst:
474 if inst.errno == errno.ENOENT:
474 if inst.errno == errno.ENOENT:
475 ui.warn(_('%s: deleted in working copy\n') % relsrc)
475 ui.warn(_('%s: deleted in working copy\n') % relsrc)
476 else:
476 else:
477 ui.warn(_('%s: cannot copy - %s\n') %
477 ui.warn(_('%s: cannot copy - %s\n') %
478 (relsrc, inst.strerror))
478 (relsrc, inst.strerror))
479 return True # report a failure
479 return True # report a failure
480
480
481 if ui.verbose or not exact:
481 if ui.verbose or not exact:
482 if rename:
482 if rename:
483 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
483 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
484 else:
484 else:
485 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
485 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
486
486
487 targets[abstarget] = abssrc
487 targets[abstarget] = abssrc
488
488
489 # fix up dirstate
489 # fix up dirstate
490 dirstatecopy(ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd)
490 dirstatecopy(ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd)
491 if rename and not dryrun:
491 if rename and not dryrun:
492 wctx.remove([abssrc], not after)
492 wctx.remove([abssrc], not after)
493
493
494 # pat: ossep
494 # pat: ossep
495 # dest ossep
495 # dest ossep
496 # srcs: list of (hgsep, hgsep, ossep, bool)
496 # srcs: list of (hgsep, hgsep, ossep, bool)
497 # return: function that takes hgsep and returns ossep
497 # return: function that takes hgsep and returns ossep
498 def targetpathfn(pat, dest, srcs):
498 def targetpathfn(pat, dest, srcs):
499 if os.path.isdir(pat):
499 if os.path.isdir(pat):
500 abspfx = util.canonpath(repo.root, cwd, pat)
500 abspfx = scmutil.canonpath(repo.root, cwd, pat)
501 abspfx = util.localpath(abspfx)
501 abspfx = util.localpath(abspfx)
502 if destdirexists:
502 if destdirexists:
503 striplen = len(os.path.split(abspfx)[0])
503 striplen = len(os.path.split(abspfx)[0])
504 else:
504 else:
505 striplen = len(abspfx)
505 striplen = len(abspfx)
506 if striplen:
506 if striplen:
507 striplen += len(os.sep)
507 striplen += len(os.sep)
508 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
508 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
509 elif destdirexists:
509 elif destdirexists:
510 res = lambda p: os.path.join(dest,
510 res = lambda p: os.path.join(dest,
511 os.path.basename(util.localpath(p)))
511 os.path.basename(util.localpath(p)))
512 else:
512 else:
513 res = lambda p: dest
513 res = lambda p: dest
514 return res
514 return res
515
515
516 # pat: ossep
516 # pat: ossep
517 # dest ossep
517 # dest ossep
518 # srcs: list of (hgsep, hgsep, ossep, bool)
518 # srcs: list of (hgsep, hgsep, ossep, bool)
519 # return: function that takes hgsep and returns ossep
519 # return: function that takes hgsep and returns ossep
520 def targetpathafterfn(pat, dest, srcs):
520 def targetpathafterfn(pat, dest, srcs):
521 if matchmod.patkind(pat):
521 if matchmod.patkind(pat):
522 # a mercurial pattern
522 # a mercurial pattern
523 res = lambda p: os.path.join(dest,
523 res = lambda p: os.path.join(dest,
524 os.path.basename(util.localpath(p)))
524 os.path.basename(util.localpath(p)))
525 else:
525 else:
526 abspfx = util.canonpath(repo.root, cwd, pat)
526 abspfx = scmutil.canonpath(repo.root, cwd, pat)
527 if len(abspfx) < len(srcs[0][0]):
527 if len(abspfx) < len(srcs[0][0]):
528 # A directory. Either the target path contains the last
528 # A directory. Either the target path contains the last
529 # component of the source path or it does not.
529 # component of the source path or it does not.
530 def evalpath(striplen):
530 def evalpath(striplen):
531 score = 0
531 score = 0
532 for s in srcs:
532 for s in srcs:
533 t = os.path.join(dest, util.localpath(s[0])[striplen:])
533 t = os.path.join(dest, util.localpath(s[0])[striplen:])
534 if os.path.lexists(t):
534 if os.path.lexists(t):
535 score += 1
535 score += 1
536 return score
536 return score
537
537
538 abspfx = util.localpath(abspfx)
538 abspfx = util.localpath(abspfx)
539 striplen = len(abspfx)
539 striplen = len(abspfx)
540 if striplen:
540 if striplen:
541 striplen += len(os.sep)
541 striplen += len(os.sep)
542 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
542 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
543 score = evalpath(striplen)
543 score = evalpath(striplen)
544 striplen1 = len(os.path.split(abspfx)[0])
544 striplen1 = len(os.path.split(abspfx)[0])
545 if striplen1:
545 if striplen1:
546 striplen1 += len(os.sep)
546 striplen1 += len(os.sep)
547 if evalpath(striplen1) > score:
547 if evalpath(striplen1) > score:
548 striplen = striplen1
548 striplen = striplen1
549 res = lambda p: os.path.join(dest,
549 res = lambda p: os.path.join(dest,
550 util.localpath(p)[striplen:])
550 util.localpath(p)[striplen:])
551 else:
551 else:
552 # a file
552 # a file
553 if destdirexists:
553 if destdirexists:
554 res = lambda p: os.path.join(dest,
554 res = lambda p: os.path.join(dest,
555 os.path.basename(util.localpath(p)))
555 os.path.basename(util.localpath(p)))
556 else:
556 else:
557 res = lambda p: dest
557 res = lambda p: dest
558 return res
558 return res
559
559
560
560
561 pats = expandpats(pats)
561 pats = expandpats(pats)
562 if not pats:
562 if not pats:
563 raise util.Abort(_('no source or destination specified'))
563 raise util.Abort(_('no source or destination specified'))
564 if len(pats) == 1:
564 if len(pats) == 1:
565 raise util.Abort(_('no destination specified'))
565 raise util.Abort(_('no destination specified'))
566 dest = pats.pop()
566 dest = pats.pop()
567 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
567 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
568 if not destdirexists:
568 if not destdirexists:
569 if len(pats) > 1 or matchmod.patkind(pats[0]):
569 if len(pats) > 1 or matchmod.patkind(pats[0]):
570 raise util.Abort(_('with multiple sources, destination must be an '
570 raise util.Abort(_('with multiple sources, destination must be an '
571 'existing directory'))
571 'existing directory'))
572 if util.endswithsep(dest):
572 if util.endswithsep(dest):
573 raise util.Abort(_('destination %s is not a directory') % dest)
573 raise util.Abort(_('destination %s is not a directory') % dest)
574
574
575 tfn = targetpathfn
575 tfn = targetpathfn
576 if after:
576 if after:
577 tfn = targetpathafterfn
577 tfn = targetpathafterfn
578 copylist = []
578 copylist = []
579 for pat in pats:
579 for pat in pats:
580 srcs = walkpat(pat)
580 srcs = walkpat(pat)
581 if not srcs:
581 if not srcs:
582 continue
582 continue
583 copylist.append((tfn(pat, dest, srcs), srcs))
583 copylist.append((tfn(pat, dest, srcs), srcs))
584 if not copylist:
584 if not copylist:
585 raise util.Abort(_('no files to copy'))
585 raise util.Abort(_('no files to copy'))
586
586
587 errors = 0
587 errors = 0
588 for targetpath, srcs in copylist:
588 for targetpath, srcs in copylist:
589 for abssrc, relsrc, exact in srcs:
589 for abssrc, relsrc, exact in srcs:
590 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
590 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
591 errors += 1
591 errors += 1
592
592
593 if errors:
593 if errors:
594 ui.warn(_('(consider using --after)\n'))
594 ui.warn(_('(consider using --after)\n'))
595
595
596 return errors != 0
596 return errors != 0
597
597
598 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
598 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
599 runargs=None, appendpid=False):
599 runargs=None, appendpid=False):
600 '''Run a command as a service.'''
600 '''Run a command as a service.'''
601
601
602 if opts['daemon'] and not opts['daemon_pipefds']:
602 if opts['daemon'] and not opts['daemon_pipefds']:
603 # Signal child process startup with file removal
603 # Signal child process startup with file removal
604 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
604 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
605 os.close(lockfd)
605 os.close(lockfd)
606 try:
606 try:
607 if not runargs:
607 if not runargs:
608 runargs = util.hgcmd() + sys.argv[1:]
608 runargs = util.hgcmd() + sys.argv[1:]
609 runargs.append('--daemon-pipefds=%s' % lockpath)
609 runargs.append('--daemon-pipefds=%s' % lockpath)
610 # Don't pass --cwd to the child process, because we've already
610 # Don't pass --cwd to the child process, because we've already
611 # changed directory.
611 # changed directory.
612 for i in xrange(1, len(runargs)):
612 for i in xrange(1, len(runargs)):
613 if runargs[i].startswith('--cwd='):
613 if runargs[i].startswith('--cwd='):
614 del runargs[i]
614 del runargs[i]
615 break
615 break
616 elif runargs[i].startswith('--cwd'):
616 elif runargs[i].startswith('--cwd'):
617 del runargs[i:i + 2]
617 del runargs[i:i + 2]
618 break
618 break
619 def condfn():
619 def condfn():
620 return not os.path.exists(lockpath)
620 return not os.path.exists(lockpath)
621 pid = util.rundetached(runargs, condfn)
621 pid = util.rundetached(runargs, condfn)
622 if pid < 0:
622 if pid < 0:
623 raise util.Abort(_('child process failed to start'))
623 raise util.Abort(_('child process failed to start'))
624 finally:
624 finally:
625 try:
625 try:
626 os.unlink(lockpath)
626 os.unlink(lockpath)
627 except OSError, e:
627 except OSError, e:
628 if e.errno != errno.ENOENT:
628 if e.errno != errno.ENOENT:
629 raise
629 raise
630 if parentfn:
630 if parentfn:
631 return parentfn(pid)
631 return parentfn(pid)
632 else:
632 else:
633 return
633 return
634
634
635 if initfn:
635 if initfn:
636 initfn()
636 initfn()
637
637
638 if opts['pid_file']:
638 if opts['pid_file']:
639 mode = appendpid and 'a' or 'w'
639 mode = appendpid and 'a' or 'w'
640 fp = open(opts['pid_file'], mode)
640 fp = open(opts['pid_file'], mode)
641 fp.write(str(os.getpid()) + '\n')
641 fp.write(str(os.getpid()) + '\n')
642 fp.close()
642 fp.close()
643
643
644 if opts['daemon_pipefds']:
644 if opts['daemon_pipefds']:
645 lockpath = opts['daemon_pipefds']
645 lockpath = opts['daemon_pipefds']
646 try:
646 try:
647 os.setsid()
647 os.setsid()
648 except AttributeError:
648 except AttributeError:
649 pass
649 pass
650 os.unlink(lockpath)
650 os.unlink(lockpath)
651 util.hidewindow()
651 util.hidewindow()
652 sys.stdout.flush()
652 sys.stdout.flush()
653 sys.stderr.flush()
653 sys.stderr.flush()
654
654
655 nullfd = os.open(util.nulldev, os.O_RDWR)
655 nullfd = os.open(util.nulldev, os.O_RDWR)
656 logfilefd = nullfd
656 logfilefd = nullfd
657 if logfile:
657 if logfile:
658 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
658 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
659 os.dup2(nullfd, 0)
659 os.dup2(nullfd, 0)
660 os.dup2(logfilefd, 1)
660 os.dup2(logfilefd, 1)
661 os.dup2(logfilefd, 2)
661 os.dup2(logfilefd, 2)
662 if nullfd not in (0, 1, 2):
662 if nullfd not in (0, 1, 2):
663 os.close(nullfd)
663 os.close(nullfd)
664 if logfile and logfilefd not in (0, 1, 2):
664 if logfile and logfilefd not in (0, 1, 2):
665 os.close(logfilefd)
665 os.close(logfilefd)
666
666
667 if runfn:
667 if runfn:
668 return runfn()
668 return runfn()
669
669
670 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
670 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
671 opts=None):
671 opts=None):
672 '''export changesets as hg patches.'''
672 '''export changesets as hg patches.'''
673
673
674 total = len(revs)
674 total = len(revs)
675 revwidth = max([len(str(rev)) for rev in revs])
675 revwidth = max([len(str(rev)) for rev in revs])
676
676
677 def single(rev, seqno, fp):
677 def single(rev, seqno, fp):
678 ctx = repo[rev]
678 ctx = repo[rev]
679 node = ctx.node()
679 node = ctx.node()
680 parents = [p.node() for p in ctx.parents() if p]
680 parents = [p.node() for p in ctx.parents() if p]
681 branch = ctx.branch()
681 branch = ctx.branch()
682 if switch_parent:
682 if switch_parent:
683 parents.reverse()
683 parents.reverse()
684 prev = (parents and parents[0]) or nullid
684 prev = (parents and parents[0]) or nullid
685
685
686 shouldclose = False
686 shouldclose = False
687 if not fp:
687 if not fp:
688 fp = make_file(repo, template, node, total=total, seqno=seqno,
688 fp = make_file(repo, template, node, total=total, seqno=seqno,
689 revwidth=revwidth, mode='ab')
689 revwidth=revwidth, mode='ab')
690 if fp != template:
690 if fp != template:
691 shouldclose = True
691 shouldclose = True
692 if fp != sys.stdout and hasattr(fp, 'name'):
692 if fp != sys.stdout and hasattr(fp, 'name'):
693 repo.ui.note("%s\n" % fp.name)
693 repo.ui.note("%s\n" % fp.name)
694
694
695 fp.write("# HG changeset patch\n")
695 fp.write("# HG changeset patch\n")
696 fp.write("# User %s\n" % ctx.user())
696 fp.write("# User %s\n" % ctx.user())
697 fp.write("# Date %d %d\n" % ctx.date())
697 fp.write("# Date %d %d\n" % ctx.date())
698 if branch and branch != 'default':
698 if branch and branch != 'default':
699 fp.write("# Branch %s\n" % branch)
699 fp.write("# Branch %s\n" % branch)
700 fp.write("# Node ID %s\n" % hex(node))
700 fp.write("# Node ID %s\n" % hex(node))
701 fp.write("# Parent %s\n" % hex(prev))
701 fp.write("# Parent %s\n" % hex(prev))
702 if len(parents) > 1:
702 if len(parents) > 1:
703 fp.write("# Parent %s\n" % hex(parents[1]))
703 fp.write("# Parent %s\n" % hex(parents[1]))
704 fp.write(ctx.description().rstrip())
704 fp.write(ctx.description().rstrip())
705 fp.write("\n\n")
705 fp.write("\n\n")
706
706
707 for chunk in patch.diff(repo, prev, node, opts=opts):
707 for chunk in patch.diff(repo, prev, node, opts=opts):
708 fp.write(chunk)
708 fp.write(chunk)
709
709
710 if shouldclose:
710 if shouldclose:
711 fp.close()
711 fp.close()
712
712
713 for seqno, rev in enumerate(revs):
713 for seqno, rev in enumerate(revs):
714 single(rev, seqno + 1, fp)
714 single(rev, seqno + 1, fp)
715
715
716 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
716 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
717 changes=None, stat=False, fp=None, prefix='',
717 changes=None, stat=False, fp=None, prefix='',
718 listsubrepos=False):
718 listsubrepos=False):
719 '''show diff or diffstat.'''
719 '''show diff or diffstat.'''
720 if fp is None:
720 if fp is None:
721 write = ui.write
721 write = ui.write
722 else:
722 else:
723 def write(s, **kw):
723 def write(s, **kw):
724 fp.write(s)
724 fp.write(s)
725
725
726 if stat:
726 if stat:
727 diffopts = diffopts.copy(context=0)
727 diffopts = diffopts.copy(context=0)
728 width = 80
728 width = 80
729 if not ui.plain():
729 if not ui.plain():
730 width = ui.termwidth()
730 width = ui.termwidth()
731 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
731 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
732 prefix=prefix)
732 prefix=prefix)
733 for chunk, label in patch.diffstatui(util.iterlines(chunks),
733 for chunk, label in patch.diffstatui(util.iterlines(chunks),
734 width=width,
734 width=width,
735 git=diffopts.git):
735 git=diffopts.git):
736 write(chunk, label=label)
736 write(chunk, label=label)
737 else:
737 else:
738 for chunk, label in patch.diffui(repo, node1, node2, match,
738 for chunk, label in patch.diffui(repo, node1, node2, match,
739 changes, diffopts, prefix=prefix):
739 changes, diffopts, prefix=prefix):
740 write(chunk, label=label)
740 write(chunk, label=label)
741
741
742 if listsubrepos:
742 if listsubrepos:
743 ctx1 = repo[node1]
743 ctx1 = repo[node1]
744 ctx2 = repo[node2]
744 ctx2 = repo[node2]
745 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
745 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
746 if node2 is not None:
746 if node2 is not None:
747 node2 = ctx2.substate[subpath][1]
747 node2 = ctx2.substate[subpath][1]
748 submatch = matchmod.narrowmatcher(subpath, match)
748 submatch = matchmod.narrowmatcher(subpath, match)
749 sub.diff(diffopts, node2, submatch, changes=changes,
749 sub.diff(diffopts, node2, submatch, changes=changes,
750 stat=stat, fp=fp, prefix=prefix)
750 stat=stat, fp=fp, prefix=prefix)
751
751
752 class changeset_printer(object):
752 class changeset_printer(object):
753 '''show changeset information when templating not requested.'''
753 '''show changeset information when templating not requested.'''
754
754
755 def __init__(self, ui, repo, patch, diffopts, buffered):
755 def __init__(self, ui, repo, patch, diffopts, buffered):
756 self.ui = ui
756 self.ui = ui
757 self.repo = repo
757 self.repo = repo
758 self.buffered = buffered
758 self.buffered = buffered
759 self.patch = patch
759 self.patch = patch
760 self.diffopts = diffopts
760 self.diffopts = diffopts
761 self.header = {}
761 self.header = {}
762 self.hunk = {}
762 self.hunk = {}
763 self.lastheader = None
763 self.lastheader = None
764 self.footer = None
764 self.footer = None
765
765
766 def flush(self, rev):
766 def flush(self, rev):
767 if rev in self.header:
767 if rev in self.header:
768 h = self.header[rev]
768 h = self.header[rev]
769 if h != self.lastheader:
769 if h != self.lastheader:
770 self.lastheader = h
770 self.lastheader = h
771 self.ui.write(h)
771 self.ui.write(h)
772 del self.header[rev]
772 del self.header[rev]
773 if rev in self.hunk:
773 if rev in self.hunk:
774 self.ui.write(self.hunk[rev])
774 self.ui.write(self.hunk[rev])
775 del self.hunk[rev]
775 del self.hunk[rev]
776 return 1
776 return 1
777 return 0
777 return 0
778
778
779 def close(self):
779 def close(self):
780 if self.footer:
780 if self.footer:
781 self.ui.write(self.footer)
781 self.ui.write(self.footer)
782
782
783 def show(self, ctx, copies=None, matchfn=None, **props):
783 def show(self, ctx, copies=None, matchfn=None, **props):
784 if self.buffered:
784 if self.buffered:
785 self.ui.pushbuffer()
785 self.ui.pushbuffer()
786 self._show(ctx, copies, matchfn, props)
786 self._show(ctx, copies, matchfn, props)
787 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
787 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
788 else:
788 else:
789 self._show(ctx, copies, matchfn, props)
789 self._show(ctx, copies, matchfn, props)
790
790
791 def _show(self, ctx, copies, matchfn, props):
791 def _show(self, ctx, copies, matchfn, props):
792 '''show a single changeset or file revision'''
792 '''show a single changeset or file revision'''
793 changenode = ctx.node()
793 changenode = ctx.node()
794 rev = ctx.rev()
794 rev = ctx.rev()
795
795
796 if self.ui.quiet:
796 if self.ui.quiet:
797 self.ui.write("%d:%s\n" % (rev, short(changenode)),
797 self.ui.write("%d:%s\n" % (rev, short(changenode)),
798 label='log.node')
798 label='log.node')
799 return
799 return
800
800
801 log = self.repo.changelog
801 log = self.repo.changelog
802 date = util.datestr(ctx.date())
802 date = util.datestr(ctx.date())
803
803
804 hexfunc = self.ui.debugflag and hex or short
804 hexfunc = self.ui.debugflag and hex or short
805
805
806 parents = [(p, hexfunc(log.node(p)))
806 parents = [(p, hexfunc(log.node(p)))
807 for p in self._meaningful_parentrevs(log, rev)]
807 for p in self._meaningful_parentrevs(log, rev)]
808
808
809 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
809 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
810 label='log.changeset')
810 label='log.changeset')
811
811
812 branch = ctx.branch()
812 branch = ctx.branch()
813 # don't show the default branch name
813 # don't show the default branch name
814 if branch != 'default':
814 if branch != 'default':
815 self.ui.write(_("branch: %s\n") % branch,
815 self.ui.write(_("branch: %s\n") % branch,
816 label='log.branch')
816 label='log.branch')
817 for bookmark in self.repo.nodebookmarks(changenode):
817 for bookmark in self.repo.nodebookmarks(changenode):
818 self.ui.write(_("bookmark: %s\n") % bookmark,
818 self.ui.write(_("bookmark: %s\n") % bookmark,
819 label='log.bookmark')
819 label='log.bookmark')
820 for tag in self.repo.nodetags(changenode):
820 for tag in self.repo.nodetags(changenode):
821 self.ui.write(_("tag: %s\n") % tag,
821 self.ui.write(_("tag: %s\n") % tag,
822 label='log.tag')
822 label='log.tag')
823 for parent in parents:
823 for parent in parents:
824 self.ui.write(_("parent: %d:%s\n") % parent,
824 self.ui.write(_("parent: %d:%s\n") % parent,
825 label='log.parent')
825 label='log.parent')
826
826
827 if self.ui.debugflag:
827 if self.ui.debugflag:
828 mnode = ctx.manifestnode()
828 mnode = ctx.manifestnode()
829 self.ui.write(_("manifest: %d:%s\n") %
829 self.ui.write(_("manifest: %d:%s\n") %
830 (self.repo.manifest.rev(mnode), hex(mnode)),
830 (self.repo.manifest.rev(mnode), hex(mnode)),
831 label='ui.debug log.manifest')
831 label='ui.debug log.manifest')
832 self.ui.write(_("user: %s\n") % ctx.user(),
832 self.ui.write(_("user: %s\n") % ctx.user(),
833 label='log.user')
833 label='log.user')
834 self.ui.write(_("date: %s\n") % date,
834 self.ui.write(_("date: %s\n") % date,
835 label='log.date')
835 label='log.date')
836
836
837 if self.ui.debugflag:
837 if self.ui.debugflag:
838 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
838 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
839 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
839 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
840 files):
840 files):
841 if value:
841 if value:
842 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
842 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
843 label='ui.debug log.files')
843 label='ui.debug log.files')
844 elif ctx.files() and self.ui.verbose:
844 elif ctx.files() and self.ui.verbose:
845 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
845 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
846 label='ui.note log.files')
846 label='ui.note log.files')
847 if copies and self.ui.verbose:
847 if copies and self.ui.verbose:
848 copies = ['%s (%s)' % c for c in copies]
848 copies = ['%s (%s)' % c for c in copies]
849 self.ui.write(_("copies: %s\n") % ' '.join(copies),
849 self.ui.write(_("copies: %s\n") % ' '.join(copies),
850 label='ui.note log.copies')
850 label='ui.note log.copies')
851
851
852 extra = ctx.extra()
852 extra = ctx.extra()
853 if extra and self.ui.debugflag:
853 if extra and self.ui.debugflag:
854 for key, value in sorted(extra.items()):
854 for key, value in sorted(extra.items()):
855 self.ui.write(_("extra: %s=%s\n")
855 self.ui.write(_("extra: %s=%s\n")
856 % (key, value.encode('string_escape')),
856 % (key, value.encode('string_escape')),
857 label='ui.debug log.extra')
857 label='ui.debug log.extra')
858
858
859 description = ctx.description().strip()
859 description = ctx.description().strip()
860 if description:
860 if description:
861 if self.ui.verbose:
861 if self.ui.verbose:
862 self.ui.write(_("description:\n"),
862 self.ui.write(_("description:\n"),
863 label='ui.note log.description')
863 label='ui.note log.description')
864 self.ui.write(description,
864 self.ui.write(description,
865 label='ui.note log.description')
865 label='ui.note log.description')
866 self.ui.write("\n\n")
866 self.ui.write("\n\n")
867 else:
867 else:
868 self.ui.write(_("summary: %s\n") %
868 self.ui.write(_("summary: %s\n") %
869 description.splitlines()[0],
869 description.splitlines()[0],
870 label='log.summary')
870 label='log.summary')
871 self.ui.write("\n")
871 self.ui.write("\n")
872
872
873 self.showpatch(changenode, matchfn)
873 self.showpatch(changenode, matchfn)
874
874
875 def showpatch(self, node, matchfn):
875 def showpatch(self, node, matchfn):
876 if not matchfn:
876 if not matchfn:
877 matchfn = self.patch
877 matchfn = self.patch
878 if matchfn:
878 if matchfn:
879 stat = self.diffopts.get('stat')
879 stat = self.diffopts.get('stat')
880 diff = self.diffopts.get('patch')
880 diff = self.diffopts.get('patch')
881 diffopts = patch.diffopts(self.ui, self.diffopts)
881 diffopts = patch.diffopts(self.ui, self.diffopts)
882 prev = self.repo.changelog.parents(node)[0]
882 prev = self.repo.changelog.parents(node)[0]
883 if stat:
883 if stat:
884 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
884 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
885 match=matchfn, stat=True)
885 match=matchfn, stat=True)
886 if diff:
886 if diff:
887 if stat:
887 if stat:
888 self.ui.write("\n")
888 self.ui.write("\n")
889 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
889 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
890 match=matchfn, stat=False)
890 match=matchfn, stat=False)
891 self.ui.write("\n")
891 self.ui.write("\n")
892
892
893 def _meaningful_parentrevs(self, log, rev):
893 def _meaningful_parentrevs(self, log, rev):
894 """Return list of meaningful (or all if debug) parentrevs for rev.
894 """Return list of meaningful (or all if debug) parentrevs for rev.
895
895
896 For merges (two non-nullrev revisions) both parents are meaningful.
896 For merges (two non-nullrev revisions) both parents are meaningful.
897 Otherwise the first parent revision is considered meaningful if it
897 Otherwise the first parent revision is considered meaningful if it
898 is not the preceding revision.
898 is not the preceding revision.
899 """
899 """
900 parents = log.parentrevs(rev)
900 parents = log.parentrevs(rev)
901 if not self.ui.debugflag and parents[1] == nullrev:
901 if not self.ui.debugflag and parents[1] == nullrev:
902 if parents[0] >= rev - 1:
902 if parents[0] >= rev - 1:
903 parents = []
903 parents = []
904 else:
904 else:
905 parents = [parents[0]]
905 parents = [parents[0]]
906 return parents
906 return parents
907
907
908
908
909 class changeset_templater(changeset_printer):
909 class changeset_templater(changeset_printer):
910 '''format changeset information.'''
910 '''format changeset information.'''
911
911
912 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
912 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
913 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
913 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
914 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
914 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
915 defaulttempl = {
915 defaulttempl = {
916 'parent': '{rev}:{node|formatnode} ',
916 'parent': '{rev}:{node|formatnode} ',
917 'manifest': '{rev}:{node|formatnode}',
917 'manifest': '{rev}:{node|formatnode}',
918 'file_copy': '{name} ({source})',
918 'file_copy': '{name} ({source})',
919 'extra': '{key}={value|stringescape}'
919 'extra': '{key}={value|stringescape}'
920 }
920 }
921 # filecopy is preserved for compatibility reasons
921 # filecopy is preserved for compatibility reasons
922 defaulttempl['filecopy'] = defaulttempl['file_copy']
922 defaulttempl['filecopy'] = defaulttempl['file_copy']
923 self.t = templater.templater(mapfile, {'formatnode': formatnode},
923 self.t = templater.templater(mapfile, {'formatnode': formatnode},
924 cache=defaulttempl)
924 cache=defaulttempl)
925 self.cache = {}
925 self.cache = {}
926
926
927 def use_template(self, t):
927 def use_template(self, t):
928 '''set template string to use'''
928 '''set template string to use'''
929 self.t.cache['changeset'] = t
929 self.t.cache['changeset'] = t
930
930
931 def _meaningful_parentrevs(self, ctx):
931 def _meaningful_parentrevs(self, ctx):
932 """Return list of meaningful (or all if debug) parentrevs for rev.
932 """Return list of meaningful (or all if debug) parentrevs for rev.
933 """
933 """
934 parents = ctx.parents()
934 parents = ctx.parents()
935 if len(parents) > 1:
935 if len(parents) > 1:
936 return parents
936 return parents
937 if self.ui.debugflag:
937 if self.ui.debugflag:
938 return [parents[0], self.repo['null']]
938 return [parents[0], self.repo['null']]
939 if parents[0].rev() >= ctx.rev() - 1:
939 if parents[0].rev() >= ctx.rev() - 1:
940 return []
940 return []
941 return parents
941 return parents
942
942
943 def _show(self, ctx, copies, matchfn, props):
943 def _show(self, ctx, copies, matchfn, props):
944 '''show a single changeset or file revision'''
944 '''show a single changeset or file revision'''
945
945
946 showlist = templatekw.showlist
946 showlist = templatekw.showlist
947
947
948 # showparents() behaviour depends on ui trace level which
948 # showparents() behaviour depends on ui trace level which
949 # causes unexpected behaviours at templating level and makes
949 # causes unexpected behaviours at templating level and makes
950 # it harder to extract it in a standalone function. Its
950 # it harder to extract it in a standalone function. Its
951 # behaviour cannot be changed so leave it here for now.
951 # behaviour cannot be changed so leave it here for now.
952 def showparents(**args):
952 def showparents(**args):
953 ctx = args['ctx']
953 ctx = args['ctx']
954 parents = [[('rev', p.rev()), ('node', p.hex())]
954 parents = [[('rev', p.rev()), ('node', p.hex())]
955 for p in self._meaningful_parentrevs(ctx)]
955 for p in self._meaningful_parentrevs(ctx)]
956 return showlist('parent', parents, **args)
956 return showlist('parent', parents, **args)
957
957
958 props = props.copy()
958 props = props.copy()
959 props.update(templatekw.keywords)
959 props.update(templatekw.keywords)
960 props['parents'] = showparents
960 props['parents'] = showparents
961 props['templ'] = self.t
961 props['templ'] = self.t
962 props['ctx'] = ctx
962 props['ctx'] = ctx
963 props['repo'] = self.repo
963 props['repo'] = self.repo
964 props['revcache'] = {'copies': copies}
964 props['revcache'] = {'copies': copies}
965 props['cache'] = self.cache
965 props['cache'] = self.cache
966
966
967 # find correct templates for current mode
967 # find correct templates for current mode
968
968
969 tmplmodes = [
969 tmplmodes = [
970 (True, None),
970 (True, None),
971 (self.ui.verbose, 'verbose'),
971 (self.ui.verbose, 'verbose'),
972 (self.ui.quiet, 'quiet'),
972 (self.ui.quiet, 'quiet'),
973 (self.ui.debugflag, 'debug'),
973 (self.ui.debugflag, 'debug'),
974 ]
974 ]
975
975
976 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
976 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
977 for mode, postfix in tmplmodes:
977 for mode, postfix in tmplmodes:
978 for type in types:
978 for type in types:
979 cur = postfix and ('%s_%s' % (type, postfix)) or type
979 cur = postfix and ('%s_%s' % (type, postfix)) or type
980 if mode and cur in self.t:
980 if mode and cur in self.t:
981 types[type] = cur
981 types[type] = cur
982
982
983 try:
983 try:
984
984
985 # write header
985 # write header
986 if types['header']:
986 if types['header']:
987 h = templater.stringify(self.t(types['header'], **props))
987 h = templater.stringify(self.t(types['header'], **props))
988 if self.buffered:
988 if self.buffered:
989 self.header[ctx.rev()] = h
989 self.header[ctx.rev()] = h
990 else:
990 else:
991 if self.lastheader != h:
991 if self.lastheader != h:
992 self.lastheader = h
992 self.lastheader = h
993 self.ui.write(h)
993 self.ui.write(h)
994
994
995 # write changeset metadata, then patch if requested
995 # write changeset metadata, then patch if requested
996 key = types['changeset']
996 key = types['changeset']
997 self.ui.write(templater.stringify(self.t(key, **props)))
997 self.ui.write(templater.stringify(self.t(key, **props)))
998 self.showpatch(ctx.node(), matchfn)
998 self.showpatch(ctx.node(), matchfn)
999
999
1000 if types['footer']:
1000 if types['footer']:
1001 if not self.footer:
1001 if not self.footer:
1002 self.footer = templater.stringify(self.t(types['footer'],
1002 self.footer = templater.stringify(self.t(types['footer'],
1003 **props))
1003 **props))
1004
1004
1005 except KeyError, inst:
1005 except KeyError, inst:
1006 msg = _("%s: no key named '%s'")
1006 msg = _("%s: no key named '%s'")
1007 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1007 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1008 except SyntaxError, inst:
1008 except SyntaxError, inst:
1009 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1009 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1010
1010
1011 def show_changeset(ui, repo, opts, buffered=False):
1011 def show_changeset(ui, repo, opts, buffered=False):
1012 """show one changeset using template or regular display.
1012 """show one changeset using template or regular display.
1013
1013
1014 Display format will be the first non-empty hit of:
1014 Display format will be the first non-empty hit of:
1015 1. option 'template'
1015 1. option 'template'
1016 2. option 'style'
1016 2. option 'style'
1017 3. [ui] setting 'logtemplate'
1017 3. [ui] setting 'logtemplate'
1018 4. [ui] setting 'style'
1018 4. [ui] setting 'style'
1019 If all of these values are either the unset or the empty string,
1019 If all of these values are either the unset or the empty string,
1020 regular display via changeset_printer() is done.
1020 regular display via changeset_printer() is done.
1021 """
1021 """
1022 # options
1022 # options
1023 patch = False
1023 patch = False
1024 if opts.get('patch') or opts.get('stat'):
1024 if opts.get('patch') or opts.get('stat'):
1025 patch = matchall(repo)
1025 patch = matchall(repo)
1026
1026
1027 tmpl = opts.get('template')
1027 tmpl = opts.get('template')
1028 style = None
1028 style = None
1029 if tmpl:
1029 if tmpl:
1030 tmpl = templater.parsestring(tmpl, quoted=False)
1030 tmpl = templater.parsestring(tmpl, quoted=False)
1031 else:
1031 else:
1032 style = opts.get('style')
1032 style = opts.get('style')
1033
1033
1034 # ui settings
1034 # ui settings
1035 if not (tmpl or style):
1035 if not (tmpl or style):
1036 tmpl = ui.config('ui', 'logtemplate')
1036 tmpl = ui.config('ui', 'logtemplate')
1037 if tmpl:
1037 if tmpl:
1038 tmpl = templater.parsestring(tmpl)
1038 tmpl = templater.parsestring(tmpl)
1039 else:
1039 else:
1040 style = util.expandpath(ui.config('ui', 'style', ''))
1040 style = util.expandpath(ui.config('ui', 'style', ''))
1041
1041
1042 if not (tmpl or style):
1042 if not (tmpl or style):
1043 return changeset_printer(ui, repo, patch, opts, buffered)
1043 return changeset_printer(ui, repo, patch, opts, buffered)
1044
1044
1045 mapfile = None
1045 mapfile = None
1046 if style and not tmpl:
1046 if style and not tmpl:
1047 mapfile = style
1047 mapfile = style
1048 if not os.path.split(mapfile)[0]:
1048 if not os.path.split(mapfile)[0]:
1049 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1049 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1050 or templater.templatepath(mapfile))
1050 or templater.templatepath(mapfile))
1051 if mapname:
1051 if mapname:
1052 mapfile = mapname
1052 mapfile = mapname
1053
1053
1054 try:
1054 try:
1055 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
1055 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
1056 except SyntaxError, inst:
1056 except SyntaxError, inst:
1057 raise util.Abort(inst.args[0])
1057 raise util.Abort(inst.args[0])
1058 if tmpl:
1058 if tmpl:
1059 t.use_template(tmpl)
1059 t.use_template(tmpl)
1060 return t
1060 return t
1061
1061
1062 def finddate(ui, repo, date):
1062 def finddate(ui, repo, date):
1063 """Find the tipmost changeset that matches the given date spec"""
1063 """Find the tipmost changeset that matches the given date spec"""
1064
1064
1065 df = util.matchdate(date)
1065 df = util.matchdate(date)
1066 m = matchall(repo)
1066 m = matchall(repo)
1067 results = {}
1067 results = {}
1068
1068
1069 def prep(ctx, fns):
1069 def prep(ctx, fns):
1070 d = ctx.date()
1070 d = ctx.date()
1071 if df(d[0]):
1071 if df(d[0]):
1072 results[ctx.rev()] = d
1072 results[ctx.rev()] = d
1073
1073
1074 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1074 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1075 rev = ctx.rev()
1075 rev = ctx.rev()
1076 if rev in results:
1076 if rev in results:
1077 ui.status(_("Found revision %s from %s\n") %
1077 ui.status(_("Found revision %s from %s\n") %
1078 (rev, util.datestr(results[rev])))
1078 (rev, util.datestr(results[rev])))
1079 return str(rev)
1079 return str(rev)
1080
1080
1081 raise util.Abort(_("revision matching date not found"))
1081 raise util.Abort(_("revision matching date not found"))
1082
1082
1083 def walkchangerevs(repo, match, opts, prepare):
1083 def walkchangerevs(repo, match, opts, prepare):
1084 '''Iterate over files and the revs in which they changed.
1084 '''Iterate over files and the revs in which they changed.
1085
1085
1086 Callers most commonly need to iterate backwards over the history
1086 Callers most commonly need to iterate backwards over the history
1087 in which they are interested. Doing so has awful (quadratic-looking)
1087 in which they are interested. Doing so has awful (quadratic-looking)
1088 performance, so we use iterators in a "windowed" way.
1088 performance, so we use iterators in a "windowed" way.
1089
1089
1090 We walk a window of revisions in the desired order. Within the
1090 We walk a window of revisions in the desired order. Within the
1091 window, we first walk forwards to gather data, then in the desired
1091 window, we first walk forwards to gather data, then in the desired
1092 order (usually backwards) to display it.
1092 order (usually backwards) to display it.
1093
1093
1094 This function returns an iterator yielding contexts. Before
1094 This function returns an iterator yielding contexts. Before
1095 yielding each context, the iterator will first call the prepare
1095 yielding each context, the iterator will first call the prepare
1096 function on each context in the window in forward order.'''
1096 function on each context in the window in forward order.'''
1097
1097
1098 def increasing_windows(start, end, windowsize=8, sizelimit=512):
1098 def increasing_windows(start, end, windowsize=8, sizelimit=512):
1099 if start < end:
1099 if start < end:
1100 while start < end:
1100 while start < end:
1101 yield start, min(windowsize, end - start)
1101 yield start, min(windowsize, end - start)
1102 start += windowsize
1102 start += windowsize
1103 if windowsize < sizelimit:
1103 if windowsize < sizelimit:
1104 windowsize *= 2
1104 windowsize *= 2
1105 else:
1105 else:
1106 while start > end:
1106 while start > end:
1107 yield start, min(windowsize, start - end - 1)
1107 yield start, min(windowsize, start - end - 1)
1108 start -= windowsize
1108 start -= windowsize
1109 if windowsize < sizelimit:
1109 if windowsize < sizelimit:
1110 windowsize *= 2
1110 windowsize *= 2
1111
1111
1112 follow = opts.get('follow') or opts.get('follow_first')
1112 follow = opts.get('follow') or opts.get('follow_first')
1113
1113
1114 if not len(repo):
1114 if not len(repo):
1115 return []
1115 return []
1116
1116
1117 if follow:
1117 if follow:
1118 defrange = '%s:0' % repo['.'].rev()
1118 defrange = '%s:0' % repo['.'].rev()
1119 else:
1119 else:
1120 defrange = '-1:0'
1120 defrange = '-1:0'
1121 revs = revrange(repo, opts['rev'] or [defrange])
1121 revs = revrange(repo, opts['rev'] or [defrange])
1122 if not revs:
1122 if not revs:
1123 return []
1123 return []
1124 wanted = set()
1124 wanted = set()
1125 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1125 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1126 fncache = {}
1126 fncache = {}
1127 change = util.cachefunc(repo.changectx)
1127 change = util.cachefunc(repo.changectx)
1128
1128
1129 # First step is to fill wanted, the set of revisions that we want to yield.
1129 # First step is to fill wanted, the set of revisions that we want to yield.
1130 # When it does not induce extra cost, we also fill fncache for revisions in
1130 # When it does not induce extra cost, we also fill fncache for revisions in
1131 # wanted: a cache of filenames that were changed (ctx.files()) and that
1131 # wanted: a cache of filenames that were changed (ctx.files()) and that
1132 # match the file filtering conditions.
1132 # match the file filtering conditions.
1133
1133
1134 if not slowpath and not match.files():
1134 if not slowpath and not match.files():
1135 # No files, no patterns. Display all revs.
1135 # No files, no patterns. Display all revs.
1136 wanted = set(revs)
1136 wanted = set(revs)
1137 copies = []
1137 copies = []
1138
1138
1139 if not slowpath:
1139 if not slowpath:
1140 # We only have to read through the filelog to find wanted revisions
1140 # We only have to read through the filelog to find wanted revisions
1141
1141
1142 minrev, maxrev = min(revs), max(revs)
1142 minrev, maxrev = min(revs), max(revs)
1143 def filerevgen(filelog, last):
1143 def filerevgen(filelog, last):
1144 """
1144 """
1145 Only files, no patterns. Check the history of each file.
1145 Only files, no patterns. Check the history of each file.
1146
1146
1147 Examines filelog entries within minrev, maxrev linkrev range
1147 Examines filelog entries within minrev, maxrev linkrev range
1148 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1148 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1149 tuples in backwards order
1149 tuples in backwards order
1150 """
1150 """
1151 cl_count = len(repo)
1151 cl_count = len(repo)
1152 revs = []
1152 revs = []
1153 for j in xrange(0, last + 1):
1153 for j in xrange(0, last + 1):
1154 linkrev = filelog.linkrev(j)
1154 linkrev = filelog.linkrev(j)
1155 if linkrev < minrev:
1155 if linkrev < minrev:
1156 continue
1156 continue
1157 # only yield rev for which we have the changelog, it can
1157 # only yield rev for which we have the changelog, it can
1158 # happen while doing "hg log" during a pull or commit
1158 # happen while doing "hg log" during a pull or commit
1159 if linkrev >= cl_count:
1159 if linkrev >= cl_count:
1160 break
1160 break
1161
1161
1162 parentlinkrevs = []
1162 parentlinkrevs = []
1163 for p in filelog.parentrevs(j):
1163 for p in filelog.parentrevs(j):
1164 if p != nullrev:
1164 if p != nullrev:
1165 parentlinkrevs.append(filelog.linkrev(p))
1165 parentlinkrevs.append(filelog.linkrev(p))
1166 n = filelog.node(j)
1166 n = filelog.node(j)
1167 revs.append((linkrev, parentlinkrevs,
1167 revs.append((linkrev, parentlinkrevs,
1168 follow and filelog.renamed(n)))
1168 follow and filelog.renamed(n)))
1169
1169
1170 return reversed(revs)
1170 return reversed(revs)
1171 def iterfiles():
1171 def iterfiles():
1172 for filename in match.files():
1172 for filename in match.files():
1173 yield filename, None
1173 yield filename, None
1174 for filename_node in copies:
1174 for filename_node in copies:
1175 yield filename_node
1175 yield filename_node
1176 for file_, node in iterfiles():
1176 for file_, node in iterfiles():
1177 filelog = repo.file(file_)
1177 filelog = repo.file(file_)
1178 if not len(filelog):
1178 if not len(filelog):
1179 if node is None:
1179 if node is None:
1180 # A zero count may be a directory or deleted file, so
1180 # A zero count may be a directory or deleted file, so
1181 # try to find matching entries on the slow path.
1181 # try to find matching entries on the slow path.
1182 if follow:
1182 if follow:
1183 raise util.Abort(
1183 raise util.Abort(
1184 _('cannot follow nonexistent file: "%s"') % file_)
1184 _('cannot follow nonexistent file: "%s"') % file_)
1185 slowpath = True
1185 slowpath = True
1186 break
1186 break
1187 else:
1187 else:
1188 continue
1188 continue
1189
1189
1190 if node is None:
1190 if node is None:
1191 last = len(filelog) - 1
1191 last = len(filelog) - 1
1192 else:
1192 else:
1193 last = filelog.rev(node)
1193 last = filelog.rev(node)
1194
1194
1195
1195
1196 # keep track of all ancestors of the file
1196 # keep track of all ancestors of the file
1197 ancestors = set([filelog.linkrev(last)])
1197 ancestors = set([filelog.linkrev(last)])
1198
1198
1199 # iterate from latest to oldest revision
1199 # iterate from latest to oldest revision
1200 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1200 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1201 if not follow:
1201 if not follow:
1202 if rev > maxrev:
1202 if rev > maxrev:
1203 continue
1203 continue
1204 else:
1204 else:
1205 # Note that last might not be the first interesting
1205 # Note that last might not be the first interesting
1206 # rev to us:
1206 # rev to us:
1207 # if the file has been changed after maxrev, we'll
1207 # if the file has been changed after maxrev, we'll
1208 # have linkrev(last) > maxrev, and we still need
1208 # have linkrev(last) > maxrev, and we still need
1209 # to explore the file graph
1209 # to explore the file graph
1210 if rev not in ancestors:
1210 if rev not in ancestors:
1211 continue
1211 continue
1212 # XXX insert 1327 fix here
1212 # XXX insert 1327 fix here
1213 if flparentlinkrevs:
1213 if flparentlinkrevs:
1214 ancestors.update(flparentlinkrevs)
1214 ancestors.update(flparentlinkrevs)
1215
1215
1216 fncache.setdefault(rev, []).append(file_)
1216 fncache.setdefault(rev, []).append(file_)
1217 wanted.add(rev)
1217 wanted.add(rev)
1218 if copied:
1218 if copied:
1219 copies.append(copied)
1219 copies.append(copied)
1220 if slowpath:
1220 if slowpath:
1221 # We have to read the changelog to match filenames against
1221 # We have to read the changelog to match filenames against
1222 # changed files
1222 # changed files
1223
1223
1224 if follow:
1224 if follow:
1225 raise util.Abort(_('can only follow copies/renames for explicit '
1225 raise util.Abort(_('can only follow copies/renames for explicit '
1226 'filenames'))
1226 'filenames'))
1227
1227
1228 # The slow path checks files modified in every changeset.
1228 # The slow path checks files modified in every changeset.
1229 for i in sorted(revs):
1229 for i in sorted(revs):
1230 ctx = change(i)
1230 ctx = change(i)
1231 matches = filter(match, ctx.files())
1231 matches = filter(match, ctx.files())
1232 if matches:
1232 if matches:
1233 fncache[i] = matches
1233 fncache[i] = matches
1234 wanted.add(i)
1234 wanted.add(i)
1235
1235
1236 class followfilter(object):
1236 class followfilter(object):
1237 def __init__(self, onlyfirst=False):
1237 def __init__(self, onlyfirst=False):
1238 self.startrev = nullrev
1238 self.startrev = nullrev
1239 self.roots = set()
1239 self.roots = set()
1240 self.onlyfirst = onlyfirst
1240 self.onlyfirst = onlyfirst
1241
1241
1242 def match(self, rev):
1242 def match(self, rev):
1243 def realparents(rev):
1243 def realparents(rev):
1244 if self.onlyfirst:
1244 if self.onlyfirst:
1245 return repo.changelog.parentrevs(rev)[0:1]
1245 return repo.changelog.parentrevs(rev)[0:1]
1246 else:
1246 else:
1247 return filter(lambda x: x != nullrev,
1247 return filter(lambda x: x != nullrev,
1248 repo.changelog.parentrevs(rev))
1248 repo.changelog.parentrevs(rev))
1249
1249
1250 if self.startrev == nullrev:
1250 if self.startrev == nullrev:
1251 self.startrev = rev
1251 self.startrev = rev
1252 return True
1252 return True
1253
1253
1254 if rev > self.startrev:
1254 if rev > self.startrev:
1255 # forward: all descendants
1255 # forward: all descendants
1256 if not self.roots:
1256 if not self.roots:
1257 self.roots.add(self.startrev)
1257 self.roots.add(self.startrev)
1258 for parent in realparents(rev):
1258 for parent in realparents(rev):
1259 if parent in self.roots:
1259 if parent in self.roots:
1260 self.roots.add(rev)
1260 self.roots.add(rev)
1261 return True
1261 return True
1262 else:
1262 else:
1263 # backwards: all parents
1263 # backwards: all parents
1264 if not self.roots:
1264 if not self.roots:
1265 self.roots.update(realparents(self.startrev))
1265 self.roots.update(realparents(self.startrev))
1266 if rev in self.roots:
1266 if rev in self.roots:
1267 self.roots.remove(rev)
1267 self.roots.remove(rev)
1268 self.roots.update(realparents(rev))
1268 self.roots.update(realparents(rev))
1269 return True
1269 return True
1270
1270
1271 return False
1271 return False
1272
1272
1273 # it might be worthwhile to do this in the iterator if the rev range
1273 # it might be worthwhile to do this in the iterator if the rev range
1274 # is descending and the prune args are all within that range
1274 # is descending and the prune args are all within that range
1275 for rev in opts.get('prune', ()):
1275 for rev in opts.get('prune', ()):
1276 rev = repo.changelog.rev(repo.lookup(rev))
1276 rev = repo.changelog.rev(repo.lookup(rev))
1277 ff = followfilter()
1277 ff = followfilter()
1278 stop = min(revs[0], revs[-1])
1278 stop = min(revs[0], revs[-1])
1279 for x in xrange(rev, stop - 1, -1):
1279 for x in xrange(rev, stop - 1, -1):
1280 if ff.match(x):
1280 if ff.match(x):
1281 wanted.discard(x)
1281 wanted.discard(x)
1282
1282
1283 # Now that wanted is correctly initialized, we can iterate over the
1283 # Now that wanted is correctly initialized, we can iterate over the
1284 # revision range, yielding only revisions in wanted.
1284 # revision range, yielding only revisions in wanted.
1285 def iterate():
1285 def iterate():
1286 if follow and not match.files():
1286 if follow and not match.files():
1287 ff = followfilter(onlyfirst=opts.get('follow_first'))
1287 ff = followfilter(onlyfirst=opts.get('follow_first'))
1288 def want(rev):
1288 def want(rev):
1289 return ff.match(rev) and rev in wanted
1289 return ff.match(rev) and rev in wanted
1290 else:
1290 else:
1291 def want(rev):
1291 def want(rev):
1292 return rev in wanted
1292 return rev in wanted
1293
1293
1294 for i, window in increasing_windows(0, len(revs)):
1294 for i, window in increasing_windows(0, len(revs)):
1295 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1295 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1296 for rev in sorted(nrevs):
1296 for rev in sorted(nrevs):
1297 fns = fncache.get(rev)
1297 fns = fncache.get(rev)
1298 ctx = change(rev)
1298 ctx = change(rev)
1299 if not fns:
1299 if not fns:
1300 def fns_generator():
1300 def fns_generator():
1301 for f in ctx.files():
1301 for f in ctx.files():
1302 if match(f):
1302 if match(f):
1303 yield f
1303 yield f
1304 fns = fns_generator()
1304 fns = fns_generator()
1305 prepare(ctx, fns)
1305 prepare(ctx, fns)
1306 for rev in nrevs:
1306 for rev in nrevs:
1307 yield change(rev)
1307 yield change(rev)
1308 return iterate()
1308 return iterate()
1309
1309
1310 def add(ui, repo, match, dryrun, listsubrepos, prefix):
1310 def add(ui, repo, match, dryrun, listsubrepos, prefix):
1311 join = lambda f: os.path.join(prefix, f)
1311 join = lambda f: os.path.join(prefix, f)
1312 bad = []
1312 bad = []
1313 oldbad = match.bad
1313 oldbad = match.bad
1314 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1314 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1315 names = []
1315 names = []
1316 wctx = repo[None]
1316 wctx = repo[None]
1317 for f in repo.walk(match):
1317 for f in repo.walk(match):
1318 exact = match.exact(f)
1318 exact = match.exact(f)
1319 if exact or f not in repo.dirstate:
1319 if exact or f not in repo.dirstate:
1320 names.append(f)
1320 names.append(f)
1321 if ui.verbose or not exact:
1321 if ui.verbose or not exact:
1322 ui.status(_('adding %s\n') % match.rel(join(f)))
1322 ui.status(_('adding %s\n') % match.rel(join(f)))
1323
1323
1324 if listsubrepos:
1324 if listsubrepos:
1325 for subpath in wctx.substate:
1325 for subpath in wctx.substate:
1326 sub = wctx.sub(subpath)
1326 sub = wctx.sub(subpath)
1327 try:
1327 try:
1328 submatch = matchmod.narrowmatcher(subpath, match)
1328 submatch = matchmod.narrowmatcher(subpath, match)
1329 bad.extend(sub.add(ui, submatch, dryrun, prefix))
1329 bad.extend(sub.add(ui, submatch, dryrun, prefix))
1330 except error.LookupError:
1330 except error.LookupError:
1331 ui.status(_("skipping missing subrepository: %s\n")
1331 ui.status(_("skipping missing subrepository: %s\n")
1332 % join(subpath))
1332 % join(subpath))
1333
1333
1334 if not dryrun:
1334 if not dryrun:
1335 rejected = wctx.add(names, prefix)
1335 rejected = wctx.add(names, prefix)
1336 bad.extend(f for f in rejected if f in match.files())
1336 bad.extend(f for f in rejected if f in match.files())
1337 return bad
1337 return bad
1338
1338
1339 def commit(ui, repo, commitfunc, pats, opts):
1339 def commit(ui, repo, commitfunc, pats, opts):
1340 '''commit the specified files or all outstanding changes'''
1340 '''commit the specified files or all outstanding changes'''
1341 date = opts.get('date')
1341 date = opts.get('date')
1342 if date:
1342 if date:
1343 opts['date'] = util.parsedate(date)
1343 opts['date'] = util.parsedate(date)
1344 message = logmessage(opts)
1344 message = logmessage(opts)
1345
1345
1346 # extract addremove carefully -- this function can be called from a command
1346 # extract addremove carefully -- this function can be called from a command
1347 # that doesn't support addremove
1347 # that doesn't support addremove
1348 if opts.get('addremove'):
1348 if opts.get('addremove'):
1349 addremove(repo, pats, opts)
1349 addremove(repo, pats, opts)
1350
1350
1351 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1351 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1352
1352
1353 def commiteditor(repo, ctx, subs):
1353 def commiteditor(repo, ctx, subs):
1354 if ctx.description():
1354 if ctx.description():
1355 return ctx.description()
1355 return ctx.description()
1356 return commitforceeditor(repo, ctx, subs)
1356 return commitforceeditor(repo, ctx, subs)
1357
1357
1358 def commitforceeditor(repo, ctx, subs):
1358 def commitforceeditor(repo, ctx, subs):
1359 edittext = []
1359 edittext = []
1360 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1360 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1361 if ctx.description():
1361 if ctx.description():
1362 edittext.append(ctx.description())
1362 edittext.append(ctx.description())
1363 edittext.append("")
1363 edittext.append("")
1364 edittext.append("") # Empty line between message and comments.
1364 edittext.append("") # Empty line between message and comments.
1365 edittext.append(_("HG: Enter commit message."
1365 edittext.append(_("HG: Enter commit message."
1366 " Lines beginning with 'HG:' are removed."))
1366 " Lines beginning with 'HG:' are removed."))
1367 edittext.append(_("HG: Leave message empty to abort commit."))
1367 edittext.append(_("HG: Leave message empty to abort commit."))
1368 edittext.append("HG: --")
1368 edittext.append("HG: --")
1369 edittext.append(_("HG: user: %s") % ctx.user())
1369 edittext.append(_("HG: user: %s") % ctx.user())
1370 if ctx.p2():
1370 if ctx.p2():
1371 edittext.append(_("HG: branch merge"))
1371 edittext.append(_("HG: branch merge"))
1372 if ctx.branch():
1372 if ctx.branch():
1373 edittext.append(_("HG: branch '%s'") % ctx.branch())
1373 edittext.append(_("HG: branch '%s'") % ctx.branch())
1374 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1374 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1375 edittext.extend([_("HG: added %s") % f for f in added])
1375 edittext.extend([_("HG: added %s") % f for f in added])
1376 edittext.extend([_("HG: changed %s") % f for f in modified])
1376 edittext.extend([_("HG: changed %s") % f for f in modified])
1377 edittext.extend([_("HG: removed %s") % f for f in removed])
1377 edittext.extend([_("HG: removed %s") % f for f in removed])
1378 if not added and not modified and not removed:
1378 if not added and not modified and not removed:
1379 edittext.append(_("HG: no files changed"))
1379 edittext.append(_("HG: no files changed"))
1380 edittext.append("")
1380 edittext.append("")
1381 # run editor in the repository root
1381 # run editor in the repository root
1382 olddir = os.getcwd()
1382 olddir = os.getcwd()
1383 os.chdir(repo.root)
1383 os.chdir(repo.root)
1384 text = repo.ui.edit("\n".join(edittext), ctx.user())
1384 text = repo.ui.edit("\n".join(edittext), ctx.user())
1385 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1385 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1386 os.chdir(olddir)
1386 os.chdir(olddir)
1387
1387
1388 if not text.strip():
1388 if not text.strip():
1389 raise util.Abort(_("empty commit message"))
1389 raise util.Abort(_("empty commit message"))
1390
1390
1391 return text
1391 return text
@@ -1,233 +1,233 b''
1 # hgweb/webutil.py - utility library for the web interface.
1 # hgweb/webutil.py - utility library for the web interface.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import os, copy
9 import os, copy
10 from mercurial import match, patch, util, error, ui
10 from mercurial import match, patch, scmutil, error, ui
11 from mercurial.node import hex, nullid
11 from mercurial.node import hex, nullid
12
12
13 def up(p):
13 def up(p):
14 if p[0] != "/":
14 if p[0] != "/":
15 p = "/" + p
15 p = "/" + p
16 if p[-1] == "/":
16 if p[-1] == "/":
17 p = p[:-1]
17 p = p[:-1]
18 up = os.path.dirname(p)
18 up = os.path.dirname(p)
19 if up == "/":
19 if up == "/":
20 return "/"
20 return "/"
21 return up + "/"
21 return up + "/"
22
22
23 def revnavgen(pos, pagelen, limit, nodefunc):
23 def revnavgen(pos, pagelen, limit, nodefunc):
24 def seq(factor, limit=None):
24 def seq(factor, limit=None):
25 if limit:
25 if limit:
26 yield limit
26 yield limit
27 if limit >= 20 and limit <= 40:
27 if limit >= 20 and limit <= 40:
28 yield 50
28 yield 50
29 else:
29 else:
30 yield 1 * factor
30 yield 1 * factor
31 yield 3 * factor
31 yield 3 * factor
32 for f in seq(factor * 10):
32 for f in seq(factor * 10):
33 yield f
33 yield f
34
34
35 navbefore = []
35 navbefore = []
36 navafter = []
36 navafter = []
37
37
38 last = 0
38 last = 0
39 for f in seq(1, pagelen):
39 for f in seq(1, pagelen):
40 if f < pagelen or f <= last:
40 if f < pagelen or f <= last:
41 continue
41 continue
42 if f > limit:
42 if f > limit:
43 break
43 break
44 last = f
44 last = f
45 if pos + f < limit:
45 if pos + f < limit:
46 navafter.append(("+%d" % f, hex(nodefunc(pos + f).node())))
46 navafter.append(("+%d" % f, hex(nodefunc(pos + f).node())))
47 if pos - f >= 0:
47 if pos - f >= 0:
48 navbefore.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node())))
48 navbefore.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node())))
49
49
50 navafter.append(("tip", "tip"))
50 navafter.append(("tip", "tip"))
51 try:
51 try:
52 navbefore.insert(0, ("(0)", hex(nodefunc('0').node())))
52 navbefore.insert(0, ("(0)", hex(nodefunc('0').node())))
53 except error.RepoError:
53 except error.RepoError:
54 pass
54 pass
55
55
56 def gen(l):
56 def gen(l):
57 def f(**map):
57 def f(**map):
58 for label, node in l:
58 for label, node in l:
59 yield {"label": label, "node": node}
59 yield {"label": label, "node": node}
60 return f
60 return f
61
61
62 return (dict(before=gen(navbefore), after=gen(navafter)),)
62 return (dict(before=gen(navbefore), after=gen(navafter)),)
63
63
64 def _siblings(siblings=[], hiderev=None):
64 def _siblings(siblings=[], hiderev=None):
65 siblings = [s for s in siblings if s.node() != nullid]
65 siblings = [s for s in siblings if s.node() != nullid]
66 if len(siblings) == 1 and siblings[0].rev() == hiderev:
66 if len(siblings) == 1 and siblings[0].rev() == hiderev:
67 return
67 return
68 for s in siblings:
68 for s in siblings:
69 d = {'node': hex(s.node()), 'rev': s.rev()}
69 d = {'node': hex(s.node()), 'rev': s.rev()}
70 d['user'] = s.user()
70 d['user'] = s.user()
71 d['date'] = s.date()
71 d['date'] = s.date()
72 d['description'] = s.description()
72 d['description'] = s.description()
73 d['branch'] = s.branch()
73 d['branch'] = s.branch()
74 if hasattr(s, 'path'):
74 if hasattr(s, 'path'):
75 d['file'] = s.path()
75 d['file'] = s.path()
76 yield d
76 yield d
77
77
78 def parents(ctx, hide=None):
78 def parents(ctx, hide=None):
79 return _siblings(ctx.parents(), hide)
79 return _siblings(ctx.parents(), hide)
80
80
81 def children(ctx, hide=None):
81 def children(ctx, hide=None):
82 return _siblings(ctx.children(), hide)
82 return _siblings(ctx.children(), hide)
83
83
84 def renamelink(fctx):
84 def renamelink(fctx):
85 r = fctx.renamed()
85 r = fctx.renamed()
86 if r:
86 if r:
87 return [dict(file=r[0], node=hex(r[1]))]
87 return [dict(file=r[0], node=hex(r[1]))]
88 return []
88 return []
89
89
90 def nodetagsdict(repo, node):
90 def nodetagsdict(repo, node):
91 return [{"name": i} for i in repo.nodetags(node)]
91 return [{"name": i} for i in repo.nodetags(node)]
92
92
93 def nodebookmarksdict(repo, node):
93 def nodebookmarksdict(repo, node):
94 return [{"name": i} for i in repo.nodebookmarks(node)]
94 return [{"name": i} for i in repo.nodebookmarks(node)]
95
95
96 def nodebranchdict(repo, ctx):
96 def nodebranchdict(repo, ctx):
97 branches = []
97 branches = []
98 branch = ctx.branch()
98 branch = ctx.branch()
99 # If this is an empty repo, ctx.node() == nullid,
99 # If this is an empty repo, ctx.node() == nullid,
100 # ctx.branch() == 'default', but branchtags() is
100 # ctx.branch() == 'default', but branchtags() is
101 # an empty dict. Using dict.get avoids a traceback.
101 # an empty dict. Using dict.get avoids a traceback.
102 if repo.branchtags().get(branch) == ctx.node():
102 if repo.branchtags().get(branch) == ctx.node():
103 branches.append({"name": branch})
103 branches.append({"name": branch})
104 return branches
104 return branches
105
105
106 def nodeinbranch(repo, ctx):
106 def nodeinbranch(repo, ctx):
107 branches = []
107 branches = []
108 branch = ctx.branch()
108 branch = ctx.branch()
109 if branch != 'default' and repo.branchtags().get(branch) != ctx.node():
109 if branch != 'default' and repo.branchtags().get(branch) != ctx.node():
110 branches.append({"name": branch})
110 branches.append({"name": branch})
111 return branches
111 return branches
112
112
113 def nodebranchnodefault(ctx):
113 def nodebranchnodefault(ctx):
114 branches = []
114 branches = []
115 branch = ctx.branch()
115 branch = ctx.branch()
116 if branch != 'default':
116 if branch != 'default':
117 branches.append({"name": branch})
117 branches.append({"name": branch})
118 return branches
118 return branches
119
119
120 def showtag(repo, tmpl, t1, node=nullid, **args):
120 def showtag(repo, tmpl, t1, node=nullid, **args):
121 for t in repo.nodetags(node):
121 for t in repo.nodetags(node):
122 yield tmpl(t1, tag=t, **args)
122 yield tmpl(t1, tag=t, **args)
123
123
124 def showbookmark(repo, tmpl, t1, node=nullid, **args):
124 def showbookmark(repo, tmpl, t1, node=nullid, **args):
125 for t in repo.nodebookmarks(node):
125 for t in repo.nodebookmarks(node):
126 yield tmpl(t1, bookmark=t, **args)
126 yield tmpl(t1, bookmark=t, **args)
127
127
128 def cleanpath(repo, path):
128 def cleanpath(repo, path):
129 path = path.lstrip('/')
129 path = path.lstrip('/')
130 return util.canonpath(repo.root, '', path)
130 return scmutil.canonpath(repo.root, '', path)
131
131
132 def changectx(repo, req):
132 def changectx(repo, req):
133 changeid = "tip"
133 changeid = "tip"
134 if 'node' in req.form:
134 if 'node' in req.form:
135 changeid = req.form['node'][0]
135 changeid = req.form['node'][0]
136 elif 'manifest' in req.form:
136 elif 'manifest' in req.form:
137 changeid = req.form['manifest'][0]
137 changeid = req.form['manifest'][0]
138
138
139 try:
139 try:
140 ctx = repo[changeid]
140 ctx = repo[changeid]
141 except error.RepoError:
141 except error.RepoError:
142 man = repo.manifest
142 man = repo.manifest
143 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
143 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
144
144
145 return ctx
145 return ctx
146
146
147 def filectx(repo, req):
147 def filectx(repo, req):
148 path = cleanpath(repo, req.form['file'][0])
148 path = cleanpath(repo, req.form['file'][0])
149 if 'node' in req.form:
149 if 'node' in req.form:
150 changeid = req.form['node'][0]
150 changeid = req.form['node'][0]
151 else:
151 else:
152 changeid = req.form['filenode'][0]
152 changeid = req.form['filenode'][0]
153 try:
153 try:
154 fctx = repo[changeid][path]
154 fctx = repo[changeid][path]
155 except error.RepoError:
155 except error.RepoError:
156 fctx = repo.filectx(path, fileid=changeid)
156 fctx = repo.filectx(path, fileid=changeid)
157
157
158 return fctx
158 return fctx
159
159
160 def listfilediffs(tmpl, files, node, max):
160 def listfilediffs(tmpl, files, node, max):
161 for f in files[:max]:
161 for f in files[:max]:
162 yield tmpl('filedifflink', node=hex(node), file=f)
162 yield tmpl('filedifflink', node=hex(node), file=f)
163 if len(files) > max:
163 if len(files) > max:
164 yield tmpl('fileellipses')
164 yield tmpl('fileellipses')
165
165
166 def diffs(repo, tmpl, ctx, files, parity, style):
166 def diffs(repo, tmpl, ctx, files, parity, style):
167
167
168 def countgen():
168 def countgen():
169 start = 1
169 start = 1
170 while True:
170 while True:
171 yield start
171 yield start
172 start += 1
172 start += 1
173
173
174 blockcount = countgen()
174 blockcount = countgen()
175 def prettyprintlines(diff):
175 def prettyprintlines(diff):
176 blockno = blockcount.next()
176 blockno = blockcount.next()
177 for lineno, l in enumerate(diff.splitlines(True)):
177 for lineno, l in enumerate(diff.splitlines(True)):
178 lineno = "%d.%d" % (blockno, lineno + 1)
178 lineno = "%d.%d" % (blockno, lineno + 1)
179 if l.startswith('+'):
179 if l.startswith('+'):
180 ltype = "difflineplus"
180 ltype = "difflineplus"
181 elif l.startswith('-'):
181 elif l.startswith('-'):
182 ltype = "difflineminus"
182 ltype = "difflineminus"
183 elif l.startswith('@'):
183 elif l.startswith('@'):
184 ltype = "difflineat"
184 ltype = "difflineat"
185 else:
185 else:
186 ltype = "diffline"
186 ltype = "diffline"
187 yield tmpl(ltype,
187 yield tmpl(ltype,
188 line=l,
188 line=l,
189 lineid="l%s" % lineno,
189 lineid="l%s" % lineno,
190 linenumber="% 8s" % lineno)
190 linenumber="% 8s" % lineno)
191
191
192 if files:
192 if files:
193 m = match.exact(repo.root, repo.getcwd(), files)
193 m = match.exact(repo.root, repo.getcwd(), files)
194 else:
194 else:
195 m = match.always(repo.root, repo.getcwd())
195 m = match.always(repo.root, repo.getcwd())
196
196
197 diffopts = patch.diffopts(repo.ui, untrusted=True)
197 diffopts = patch.diffopts(repo.ui, untrusted=True)
198 parents = ctx.parents()
198 parents = ctx.parents()
199 node1 = parents and parents[0].node() or nullid
199 node1 = parents and parents[0].node() or nullid
200 node2 = ctx.node()
200 node2 = ctx.node()
201
201
202 block = []
202 block = []
203 for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
203 for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
204 if chunk.startswith('diff') and block:
204 if chunk.startswith('diff') and block:
205 yield tmpl('diffblock', parity=parity.next(),
205 yield tmpl('diffblock', parity=parity.next(),
206 lines=prettyprintlines(''.join(block)))
206 lines=prettyprintlines(''.join(block)))
207 block = []
207 block = []
208 if chunk.startswith('diff') and style != 'raw':
208 if chunk.startswith('diff') and style != 'raw':
209 chunk = ''.join(chunk.splitlines(True)[1:])
209 chunk = ''.join(chunk.splitlines(True)[1:])
210 block.append(chunk)
210 block.append(chunk)
211 yield tmpl('diffblock', parity=parity.next(),
211 yield tmpl('diffblock', parity=parity.next(),
212 lines=prettyprintlines(''.join(block)))
212 lines=prettyprintlines(''.join(block)))
213
213
214 class sessionvars(object):
214 class sessionvars(object):
215 def __init__(self, vars, start='?'):
215 def __init__(self, vars, start='?'):
216 self.start = start
216 self.start = start
217 self.vars = vars
217 self.vars = vars
218 def __getitem__(self, key):
218 def __getitem__(self, key):
219 return self.vars[key]
219 return self.vars[key]
220 def __setitem__(self, key, value):
220 def __setitem__(self, key, value):
221 self.vars[key] = value
221 self.vars[key] = value
222 def __copy__(self):
222 def __copy__(self):
223 return sessionvars(copy.copy(self.vars), self.start)
223 return sessionvars(copy.copy(self.vars), self.start)
224 def __iter__(self):
224 def __iter__(self):
225 separator = self.start
225 separator = self.start
226 for key, value in self.vars.iteritems():
226 for key, value in self.vars.iteritems():
227 yield {'name': key, 'value': str(value), 'separator': separator}
227 yield {'name': key, 'value': str(value), 'separator': separator}
228 separator = '&'
228 separator = '&'
229
229
230 class wsgiui(ui.ui):
230 class wsgiui(ui.ui):
231 # default termwidth breaks under mod_wsgi
231 # default termwidth breaks under mod_wsgi
232 def termwidth(self):
232 def termwidth(self):
233 return 80
233 return 80
@@ -1,307 +1,307 b''
1 # match.py - filename matching
1 # match.py - filename matching
2 #
2 #
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import util
9 import scmutil, util
10 from i18n import _
10 from i18n import _
11
11
12 class match(object):
12 class match(object):
13 def __init__(self, root, cwd, patterns, include=[], exclude=[],
13 def __init__(self, root, cwd, patterns, include=[], exclude=[],
14 default='glob', exact=False, auditor=None):
14 default='glob', exact=False, auditor=None):
15 """build an object to match a set of file patterns
15 """build an object to match a set of file patterns
16
16
17 arguments:
17 arguments:
18 root - the canonical root of the tree you're matching against
18 root - the canonical root of the tree you're matching against
19 cwd - the current working directory, if relevant
19 cwd - the current working directory, if relevant
20 patterns - patterns to find
20 patterns - patterns to find
21 include - patterns to include
21 include - patterns to include
22 exclude - patterns to exclude
22 exclude - patterns to exclude
23 default - if a pattern in names has no explicit type, assume this one
23 default - if a pattern in names has no explicit type, assume this one
24 exact - patterns are actually literals
24 exact - patterns are actually literals
25
25
26 a pattern is one of:
26 a pattern is one of:
27 'glob:<glob>' - a glob relative to cwd
27 'glob:<glob>' - a glob relative to cwd
28 're:<regexp>' - a regular expression
28 're:<regexp>' - a regular expression
29 'path:<path>' - a path relative to canonroot
29 'path:<path>' - a path relative to canonroot
30 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
30 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
31 'relpath:<path>' - a path relative to cwd
31 'relpath:<path>' - a path relative to cwd
32 'relre:<regexp>' - a regexp that needn't match the start of a name
32 'relre:<regexp>' - a regexp that needn't match the start of a name
33 '<something>' - a pattern of the specified default type
33 '<something>' - a pattern of the specified default type
34 """
34 """
35
35
36 self._root = root
36 self._root = root
37 self._cwd = cwd
37 self._cwd = cwd
38 self._files = []
38 self._files = []
39 self._anypats = bool(include or exclude)
39 self._anypats = bool(include or exclude)
40
40
41 if include:
41 if include:
42 pats = _normalize(include, 'glob', root, cwd, auditor)
42 pats = _normalize(include, 'glob', root, cwd, auditor)
43 self.includepat, im = _buildmatch(pats, '(?:/|$)')
43 self.includepat, im = _buildmatch(pats, '(?:/|$)')
44 if exclude:
44 if exclude:
45 pats = _normalize(exclude, 'glob', root, cwd, auditor)
45 pats = _normalize(exclude, 'glob', root, cwd, auditor)
46 self.excludepat, em = _buildmatch(pats, '(?:/|$)')
46 self.excludepat, em = _buildmatch(pats, '(?:/|$)')
47 if exact:
47 if exact:
48 self._files = patterns
48 self._files = patterns
49 pm = self.exact
49 pm = self.exact
50 elif patterns:
50 elif patterns:
51 pats = _normalize(patterns, default, root, cwd, auditor)
51 pats = _normalize(patterns, default, root, cwd, auditor)
52 self._files = _roots(pats)
52 self._files = _roots(pats)
53 self._anypats = self._anypats or _anypats(pats)
53 self._anypats = self._anypats or _anypats(pats)
54 self.patternspat, pm = _buildmatch(pats, '$')
54 self.patternspat, pm = _buildmatch(pats, '$')
55
55
56 if patterns or exact:
56 if patterns or exact:
57 if include:
57 if include:
58 if exclude:
58 if exclude:
59 m = lambda f: im(f) and not em(f) and pm(f)
59 m = lambda f: im(f) and not em(f) and pm(f)
60 else:
60 else:
61 m = lambda f: im(f) and pm(f)
61 m = lambda f: im(f) and pm(f)
62 else:
62 else:
63 if exclude:
63 if exclude:
64 m = lambda f: not em(f) and pm(f)
64 m = lambda f: not em(f) and pm(f)
65 else:
65 else:
66 m = pm
66 m = pm
67 else:
67 else:
68 if include:
68 if include:
69 if exclude:
69 if exclude:
70 m = lambda f: im(f) and not em(f)
70 m = lambda f: im(f) and not em(f)
71 else:
71 else:
72 m = im
72 m = im
73 else:
73 else:
74 if exclude:
74 if exclude:
75 m = lambda f: not em(f)
75 m = lambda f: not em(f)
76 else:
76 else:
77 m = lambda f: True
77 m = lambda f: True
78
78
79 self.matchfn = m
79 self.matchfn = m
80 self._fmap = set(self._files)
80 self._fmap = set(self._files)
81
81
82 def __call__(self, fn):
82 def __call__(self, fn):
83 return self.matchfn(fn)
83 return self.matchfn(fn)
84 def __iter__(self):
84 def __iter__(self):
85 for f in self._files:
85 for f in self._files:
86 yield f
86 yield f
87 def bad(self, f, msg):
87 def bad(self, f, msg):
88 '''callback for each explicit file that can't be
88 '''callback for each explicit file that can't be
89 found/accessed, with an error message
89 found/accessed, with an error message
90 '''
90 '''
91 pass
91 pass
92 def dir(self, f):
92 def dir(self, f):
93 pass
93 pass
94 def missing(self, f):
94 def missing(self, f):
95 pass
95 pass
96 def exact(self, f):
96 def exact(self, f):
97 return f in self._fmap
97 return f in self._fmap
98 def rel(self, f):
98 def rel(self, f):
99 return util.pathto(self._root, self._cwd, f)
99 return util.pathto(self._root, self._cwd, f)
100 def files(self):
100 def files(self):
101 return self._files
101 return self._files
102 def anypats(self):
102 def anypats(self):
103 return self._anypats
103 return self._anypats
104
104
105 class exact(match):
105 class exact(match):
106 def __init__(self, root, cwd, files):
106 def __init__(self, root, cwd, files):
107 match.__init__(self, root, cwd, files, exact = True)
107 match.__init__(self, root, cwd, files, exact = True)
108
108
109 class always(match):
109 class always(match):
110 def __init__(self, root, cwd):
110 def __init__(self, root, cwd):
111 match.__init__(self, root, cwd, [])
111 match.__init__(self, root, cwd, [])
112
112
113 class narrowmatcher(match):
113 class narrowmatcher(match):
114 """Adapt a matcher to work on a subdirectory only.
114 """Adapt a matcher to work on a subdirectory only.
115
115
116 The paths are remapped to remove/insert the path as needed:
116 The paths are remapped to remove/insert the path as needed:
117
117
118 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
118 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
119 >>> m2 = narrowmatcher('sub', m1)
119 >>> m2 = narrowmatcher('sub', m1)
120 >>> bool(m2('a.txt'))
120 >>> bool(m2('a.txt'))
121 False
121 False
122 >>> bool(m2('b.txt'))
122 >>> bool(m2('b.txt'))
123 True
123 True
124 >>> bool(m2.matchfn('a.txt'))
124 >>> bool(m2.matchfn('a.txt'))
125 False
125 False
126 >>> bool(m2.matchfn('b.txt'))
126 >>> bool(m2.matchfn('b.txt'))
127 True
127 True
128 >>> m2.files()
128 >>> m2.files()
129 ['b.txt']
129 ['b.txt']
130 >>> m2.exact('b.txt')
130 >>> m2.exact('b.txt')
131 True
131 True
132 >>> m2.rel('b.txt')
132 >>> m2.rel('b.txt')
133 'b.txt'
133 'b.txt'
134 >>> def bad(f, msg):
134 >>> def bad(f, msg):
135 ... print "%s: %s" % (f, msg)
135 ... print "%s: %s" % (f, msg)
136 >>> m1.bad = bad
136 >>> m1.bad = bad
137 >>> m2.bad('x.txt', 'No such file')
137 >>> m2.bad('x.txt', 'No such file')
138 sub/x.txt: No such file
138 sub/x.txt: No such file
139 """
139 """
140
140
141 def __init__(self, path, matcher):
141 def __init__(self, path, matcher):
142 self._root = matcher._root
142 self._root = matcher._root
143 self._cwd = matcher._cwd
143 self._cwd = matcher._cwd
144 self._path = path
144 self._path = path
145 self._matcher = matcher
145 self._matcher = matcher
146
146
147 self._files = [f[len(path) + 1:] for f in matcher._files
147 self._files = [f[len(path) + 1:] for f in matcher._files
148 if f.startswith(path + "/")]
148 if f.startswith(path + "/")]
149 self._anypats = matcher._anypats
149 self._anypats = matcher._anypats
150 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
150 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
151 self._fmap = set(self._files)
151 self._fmap = set(self._files)
152
152
153 def bad(self, f, msg):
153 def bad(self, f, msg):
154 self._matcher.bad(self._path + "/" + f, msg)
154 self._matcher.bad(self._path + "/" + f, msg)
155
155
156 def patkind(pat):
156 def patkind(pat):
157 return _patsplit(pat, None)[0]
157 return _patsplit(pat, None)[0]
158
158
159 def _patsplit(pat, default):
159 def _patsplit(pat, default):
160 """Split a string into an optional pattern kind prefix and the
160 """Split a string into an optional pattern kind prefix and the
161 actual pattern."""
161 actual pattern."""
162 if ':' in pat:
162 if ':' in pat:
163 kind, val = pat.split(':', 1)
163 kind, val = pat.split(':', 1)
164 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
164 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
165 'listfile', 'listfile0'):
165 'listfile', 'listfile0'):
166 return kind, val
166 return kind, val
167 return default, pat
167 return default, pat
168
168
169 def _globre(pat):
169 def _globre(pat):
170 "convert a glob pattern into a regexp"
170 "convert a glob pattern into a regexp"
171 i, n = 0, len(pat)
171 i, n = 0, len(pat)
172 res = ''
172 res = ''
173 group = 0
173 group = 0
174 escape = re.escape
174 escape = re.escape
175 def peek():
175 def peek():
176 return i < n and pat[i]
176 return i < n and pat[i]
177 while i < n:
177 while i < n:
178 c = pat[i]
178 c = pat[i]
179 i += 1
179 i += 1
180 if c not in '*?[{},\\':
180 if c not in '*?[{},\\':
181 res += escape(c)
181 res += escape(c)
182 elif c == '*':
182 elif c == '*':
183 if peek() == '*':
183 if peek() == '*':
184 i += 1
184 i += 1
185 res += '.*'
185 res += '.*'
186 else:
186 else:
187 res += '[^/]*'
187 res += '[^/]*'
188 elif c == '?':
188 elif c == '?':
189 res += '.'
189 res += '.'
190 elif c == '[':
190 elif c == '[':
191 j = i
191 j = i
192 if j < n and pat[j] in '!]':
192 if j < n and pat[j] in '!]':
193 j += 1
193 j += 1
194 while j < n and pat[j] != ']':
194 while j < n and pat[j] != ']':
195 j += 1
195 j += 1
196 if j >= n:
196 if j >= n:
197 res += '\\['
197 res += '\\['
198 else:
198 else:
199 stuff = pat[i:j].replace('\\','\\\\')
199 stuff = pat[i:j].replace('\\','\\\\')
200 i = j + 1
200 i = j + 1
201 if stuff[0] == '!':
201 if stuff[0] == '!':
202 stuff = '^' + stuff[1:]
202 stuff = '^' + stuff[1:]
203 elif stuff[0] == '^':
203 elif stuff[0] == '^':
204 stuff = '\\' + stuff
204 stuff = '\\' + stuff
205 res = '%s[%s]' % (res, stuff)
205 res = '%s[%s]' % (res, stuff)
206 elif c == '{':
206 elif c == '{':
207 group += 1
207 group += 1
208 res += '(?:'
208 res += '(?:'
209 elif c == '}' and group:
209 elif c == '}' and group:
210 res += ')'
210 res += ')'
211 group -= 1
211 group -= 1
212 elif c == ',' and group:
212 elif c == ',' and group:
213 res += '|'
213 res += '|'
214 elif c == '\\':
214 elif c == '\\':
215 p = peek()
215 p = peek()
216 if p:
216 if p:
217 i += 1
217 i += 1
218 res += escape(p)
218 res += escape(p)
219 else:
219 else:
220 res += escape(c)
220 res += escape(c)
221 else:
221 else:
222 res += escape(c)
222 res += escape(c)
223 return res
223 return res
224
224
225 def _regex(kind, name, tail):
225 def _regex(kind, name, tail):
226 '''convert a pattern into a regular expression'''
226 '''convert a pattern into a regular expression'''
227 if not name:
227 if not name:
228 return ''
228 return ''
229 if kind == 're':
229 if kind == 're':
230 return name
230 return name
231 elif kind == 'path':
231 elif kind == 'path':
232 return '^' + re.escape(name) + '(?:/|$)'
232 return '^' + re.escape(name) + '(?:/|$)'
233 elif kind == 'relglob':
233 elif kind == 'relglob':
234 return '(?:|.*/)' + _globre(name) + tail
234 return '(?:|.*/)' + _globre(name) + tail
235 elif kind == 'relpath':
235 elif kind == 'relpath':
236 return re.escape(name) + '(?:/|$)'
236 return re.escape(name) + '(?:/|$)'
237 elif kind == 'relre':
237 elif kind == 'relre':
238 if name.startswith('^'):
238 if name.startswith('^'):
239 return name
239 return name
240 return '.*' + name
240 return '.*' + name
241 return _globre(name) + tail
241 return _globre(name) + tail
242
242
243 def _buildmatch(pats, tail):
243 def _buildmatch(pats, tail):
244 """build a matching function from a set of patterns"""
244 """build a matching function from a set of patterns"""
245 try:
245 try:
246 pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
246 pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
247 if len(pat) > 20000:
247 if len(pat) > 20000:
248 raise OverflowError()
248 raise OverflowError()
249 return pat, re.compile(pat).match
249 return pat, re.compile(pat).match
250 except OverflowError:
250 except OverflowError:
251 # We're using a Python with a tiny regex engine and we
251 # We're using a Python with a tiny regex engine and we
252 # made it explode, so we'll divide the pattern list in two
252 # made it explode, so we'll divide the pattern list in two
253 # until it works
253 # until it works
254 l = len(pats)
254 l = len(pats)
255 if l < 2:
255 if l < 2:
256 raise
256 raise
257 pata, a = _buildmatch(pats[:l//2], tail)
257 pata, a = _buildmatch(pats[:l//2], tail)
258 patb, b = _buildmatch(pats[l//2:], tail)
258 patb, b = _buildmatch(pats[l//2:], tail)
259 return pat, lambda s: a(s) or b(s)
259 return pat, lambda s: a(s) or b(s)
260 except re.error:
260 except re.error:
261 for k, p in pats:
261 for k, p in pats:
262 try:
262 try:
263 re.compile('(?:%s)' % _regex(k, p, tail))
263 re.compile('(?:%s)' % _regex(k, p, tail))
264 except re.error:
264 except re.error:
265 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
265 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
266 raise util.Abort(_("invalid pattern"))
266 raise util.Abort(_("invalid pattern"))
267
267
268 def _normalize(names, default, root, cwd, auditor):
268 def _normalize(names, default, root, cwd, auditor):
269 pats = []
269 pats = []
270 for kind, name in [_patsplit(p, default) for p in names]:
270 for kind, name in [_patsplit(p, default) for p in names]:
271 if kind in ('glob', 'relpath'):
271 if kind in ('glob', 'relpath'):
272 name = util.canonpath(root, cwd, name, auditor)
272 name = scmutil.canonpath(root, cwd, name, auditor)
273 elif kind in ('relglob', 'path'):
273 elif kind in ('relglob', 'path'):
274 name = util.normpath(name)
274 name = util.normpath(name)
275 elif kind in ('listfile', 'listfile0'):
275 elif kind in ('listfile', 'listfile0'):
276 delimiter = kind == 'listfile0' and '\0' or '\n'
276 delimiter = kind == 'listfile0' and '\0' or '\n'
277 try:
277 try:
278 files = open(name, 'r').read().split(delimiter)
278 files = open(name, 'r').read().split(delimiter)
279 files = [f for f in files if f]
279 files = [f for f in files if f]
280 except EnvironmentError:
280 except EnvironmentError:
281 raise util.Abort(_("unable to read file list (%s)") % name)
281 raise util.Abort(_("unable to read file list (%s)") % name)
282 pats += _normalize(files, default, root, cwd, auditor)
282 pats += _normalize(files, default, root, cwd, auditor)
283 continue
283 continue
284
284
285 pats.append((kind, name))
285 pats.append((kind, name))
286 return pats
286 return pats
287
287
288 def _roots(patterns):
288 def _roots(patterns):
289 r = []
289 r = []
290 for kind, name in patterns:
290 for kind, name in patterns:
291 if kind == 'glob': # find the non-glob prefix
291 if kind == 'glob': # find the non-glob prefix
292 root = []
292 root = []
293 for p in name.split('/'):
293 for p in name.split('/'):
294 if '[' in p or '{' in p or '*' in p or '?' in p:
294 if '[' in p or '{' in p or '*' in p or '?' in p:
295 break
295 break
296 root.append(p)
296 root.append(p)
297 r.append('/'.join(root) or '.')
297 r.append('/'.join(root) or '.')
298 elif kind in ('relpath', 'path'):
298 elif kind in ('relpath', 'path'):
299 r.append(name or '.')
299 r.append(name or '.')
300 elif kind == 'relglob':
300 elif kind == 'relglob':
301 r.append('.')
301 r.append('.')
302 return r
302 return r
303
303
304 def _anypats(patterns):
304 def _anypats(patterns):
305 for kind, name in patterns:
305 for kind, name in patterns:
306 if kind in ('glob', 're', 'relglob', 'relre'):
306 if kind in ('glob', 're', 'relglob', 'relre'):
307 return True
307 return True
@@ -1,1617 +1,1618 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib
10 import tempfile, zlib
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
15
15
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17
17
18 class PatchError(Exception):
18 class PatchError(Exception):
19 pass
19 pass
20
20
21 # helper functions
21 # helper functions
22
22
23 def copyfile(src, dst, basedir):
23 def copyfile(src, dst, basedir):
24 abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
24 abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
25 for x in [src, dst]]
25 if os.path.lexists(absdst):
26 if os.path.lexists(absdst):
26 raise util.Abort(_("cannot create %s: destination already exists") %
27 raise util.Abort(_("cannot create %s: destination already exists") %
27 dst)
28 dst)
28
29
29 dstdir = os.path.dirname(absdst)
30 dstdir = os.path.dirname(absdst)
30 if dstdir and not os.path.isdir(dstdir):
31 if dstdir and not os.path.isdir(dstdir):
31 try:
32 try:
32 os.makedirs(dstdir)
33 os.makedirs(dstdir)
33 except IOError:
34 except IOError:
34 raise util.Abort(
35 raise util.Abort(
35 _("cannot create %s: unable to create destination directory")
36 _("cannot create %s: unable to create destination directory")
36 % dst)
37 % dst)
37
38
38 util.copyfile(abssrc, absdst)
39 util.copyfile(abssrc, absdst)
39
40
40 # public functions
41 # public functions
41
42
42 def split(stream):
43 def split(stream):
43 '''return an iterator of individual patches from a stream'''
44 '''return an iterator of individual patches from a stream'''
44 def isheader(line, inheader):
45 def isheader(line, inheader):
45 if inheader and line[0] in (' ', '\t'):
46 if inheader and line[0] in (' ', '\t'):
46 # continuation
47 # continuation
47 return True
48 return True
48 if line[0] in (' ', '-', '+'):
49 if line[0] in (' ', '-', '+'):
49 # diff line - don't check for header pattern in there
50 # diff line - don't check for header pattern in there
50 return False
51 return False
51 l = line.split(': ', 1)
52 l = line.split(': ', 1)
52 return len(l) == 2 and ' ' not in l[0]
53 return len(l) == 2 and ' ' not in l[0]
53
54
54 def chunk(lines):
55 def chunk(lines):
55 return cStringIO.StringIO(''.join(lines))
56 return cStringIO.StringIO(''.join(lines))
56
57
57 def hgsplit(stream, cur):
58 def hgsplit(stream, cur):
58 inheader = True
59 inheader = True
59
60
60 for line in stream:
61 for line in stream:
61 if not line.strip():
62 if not line.strip():
62 inheader = False
63 inheader = False
63 if not inheader and line.startswith('# HG changeset patch'):
64 if not inheader and line.startswith('# HG changeset patch'):
64 yield chunk(cur)
65 yield chunk(cur)
65 cur = []
66 cur = []
66 inheader = True
67 inheader = True
67
68
68 cur.append(line)
69 cur.append(line)
69
70
70 if cur:
71 if cur:
71 yield chunk(cur)
72 yield chunk(cur)
72
73
73 def mboxsplit(stream, cur):
74 def mboxsplit(stream, cur):
74 for line in stream:
75 for line in stream:
75 if line.startswith('From '):
76 if line.startswith('From '):
76 for c in split(chunk(cur[1:])):
77 for c in split(chunk(cur[1:])):
77 yield c
78 yield c
78 cur = []
79 cur = []
79
80
80 cur.append(line)
81 cur.append(line)
81
82
82 if cur:
83 if cur:
83 for c in split(chunk(cur[1:])):
84 for c in split(chunk(cur[1:])):
84 yield c
85 yield c
85
86
86 def mimesplit(stream, cur):
87 def mimesplit(stream, cur):
87 def msgfp(m):
88 def msgfp(m):
88 fp = cStringIO.StringIO()
89 fp = cStringIO.StringIO()
89 g = email.Generator.Generator(fp, mangle_from_=False)
90 g = email.Generator.Generator(fp, mangle_from_=False)
90 g.flatten(m)
91 g.flatten(m)
91 fp.seek(0)
92 fp.seek(0)
92 return fp
93 return fp
93
94
94 for line in stream:
95 for line in stream:
95 cur.append(line)
96 cur.append(line)
96 c = chunk(cur)
97 c = chunk(cur)
97
98
98 m = email.Parser.Parser().parse(c)
99 m = email.Parser.Parser().parse(c)
99 if not m.is_multipart():
100 if not m.is_multipart():
100 yield msgfp(m)
101 yield msgfp(m)
101 else:
102 else:
102 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
103 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
103 for part in m.walk():
104 for part in m.walk():
104 ct = part.get_content_type()
105 ct = part.get_content_type()
105 if ct not in ok_types:
106 if ct not in ok_types:
106 continue
107 continue
107 yield msgfp(part)
108 yield msgfp(part)
108
109
109 def headersplit(stream, cur):
110 def headersplit(stream, cur):
110 inheader = False
111 inheader = False
111
112
112 for line in stream:
113 for line in stream:
113 if not inheader and isheader(line, inheader):
114 if not inheader and isheader(line, inheader):
114 yield chunk(cur)
115 yield chunk(cur)
115 cur = []
116 cur = []
116 inheader = True
117 inheader = True
117 if inheader and not isheader(line, inheader):
118 if inheader and not isheader(line, inheader):
118 inheader = False
119 inheader = False
119
120
120 cur.append(line)
121 cur.append(line)
121
122
122 if cur:
123 if cur:
123 yield chunk(cur)
124 yield chunk(cur)
124
125
125 def remainder(cur):
126 def remainder(cur):
126 yield chunk(cur)
127 yield chunk(cur)
127
128
128 class fiter(object):
129 class fiter(object):
129 def __init__(self, fp):
130 def __init__(self, fp):
130 self.fp = fp
131 self.fp = fp
131
132
132 def __iter__(self):
133 def __iter__(self):
133 return self
134 return self
134
135
135 def next(self):
136 def next(self):
136 l = self.fp.readline()
137 l = self.fp.readline()
137 if not l:
138 if not l:
138 raise StopIteration
139 raise StopIteration
139 return l
140 return l
140
141
141 inheader = False
142 inheader = False
142 cur = []
143 cur = []
143
144
144 mimeheaders = ['content-type']
145 mimeheaders = ['content-type']
145
146
146 if not hasattr(stream, 'next'):
147 if not hasattr(stream, 'next'):
147 # http responses, for example, have readline but not next
148 # http responses, for example, have readline but not next
148 stream = fiter(stream)
149 stream = fiter(stream)
149
150
150 for line in stream:
151 for line in stream:
151 cur.append(line)
152 cur.append(line)
152 if line.startswith('# HG changeset patch'):
153 if line.startswith('# HG changeset patch'):
153 return hgsplit(stream, cur)
154 return hgsplit(stream, cur)
154 elif line.startswith('From '):
155 elif line.startswith('From '):
155 return mboxsplit(stream, cur)
156 return mboxsplit(stream, cur)
156 elif isheader(line, inheader):
157 elif isheader(line, inheader):
157 inheader = True
158 inheader = True
158 if line.split(':', 1)[0].lower() in mimeheaders:
159 if line.split(':', 1)[0].lower() in mimeheaders:
159 # let email parser handle this
160 # let email parser handle this
160 return mimesplit(stream, cur)
161 return mimesplit(stream, cur)
161 elif line.startswith('--- ') and inheader:
162 elif line.startswith('--- ') and inheader:
162 # No evil headers seen by diff start, split by hand
163 # No evil headers seen by diff start, split by hand
163 return headersplit(stream, cur)
164 return headersplit(stream, cur)
164 # Not enough info, keep reading
165 # Not enough info, keep reading
165
166
166 # if we are here, we have a very plain patch
167 # if we are here, we have a very plain patch
167 return remainder(cur)
168 return remainder(cur)
168
169
169 def extract(ui, fileobj):
170 def extract(ui, fileobj):
170 '''extract patch from data read from fileobj.
171 '''extract patch from data read from fileobj.
171
172
172 patch can be a normal patch or contained in an email message.
173 patch can be a normal patch or contained in an email message.
173
174
174 return tuple (filename, message, user, date, branch, node, p1, p2).
175 return tuple (filename, message, user, date, branch, node, p1, p2).
175 Any item in the returned tuple can be None. If filename is None,
176 Any item in the returned tuple can be None. If filename is None,
176 fileobj did not contain a patch. Caller must unlink filename when done.'''
177 fileobj did not contain a patch. Caller must unlink filename when done.'''
177
178
178 # attempt to detect the start of a patch
179 # attempt to detect the start of a patch
179 # (this heuristic is borrowed from quilt)
180 # (this heuristic is borrowed from quilt)
180 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
181 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
181 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
182 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
182 r'---[ \t].*?^\+\+\+[ \t]|'
183 r'---[ \t].*?^\+\+\+[ \t]|'
183 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
184 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
184
185
185 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
186 tmpfp = os.fdopen(fd, 'w')
187 tmpfp = os.fdopen(fd, 'w')
187 try:
188 try:
188 msg = email.Parser.Parser().parse(fileobj)
189 msg = email.Parser.Parser().parse(fileobj)
189
190
190 subject = msg['Subject']
191 subject = msg['Subject']
191 user = msg['From']
192 user = msg['From']
192 if not subject and not user:
193 if not subject and not user:
193 # Not an email, restore parsed headers if any
194 # Not an email, restore parsed headers if any
194 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
195
196
196 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
197 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
197 # should try to parse msg['Date']
198 # should try to parse msg['Date']
198 date = None
199 date = None
199 nodeid = None
200 nodeid = None
200 branch = None
201 branch = None
201 parents = []
202 parents = []
202
203
203 if subject:
204 if subject:
204 if subject.startswith('[PATCH'):
205 if subject.startswith('[PATCH'):
205 pend = subject.find(']')
206 pend = subject.find(']')
206 if pend >= 0:
207 if pend >= 0:
207 subject = subject[pend + 1:].lstrip()
208 subject = subject[pend + 1:].lstrip()
208 subject = subject.replace('\n\t', ' ')
209 subject = subject.replace('\n\t', ' ')
209 ui.debug('Subject: %s\n' % subject)
210 ui.debug('Subject: %s\n' % subject)
210 if user:
211 if user:
211 ui.debug('From: %s\n' % user)
212 ui.debug('From: %s\n' % user)
212 diffs_seen = 0
213 diffs_seen = 0
213 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
214 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
214 message = ''
215 message = ''
215 for part in msg.walk():
216 for part in msg.walk():
216 content_type = part.get_content_type()
217 content_type = part.get_content_type()
217 ui.debug('Content-Type: %s\n' % content_type)
218 ui.debug('Content-Type: %s\n' % content_type)
218 if content_type not in ok_types:
219 if content_type not in ok_types:
219 continue
220 continue
220 payload = part.get_payload(decode=True)
221 payload = part.get_payload(decode=True)
221 m = diffre.search(payload)
222 m = diffre.search(payload)
222 if m:
223 if m:
223 hgpatch = False
224 hgpatch = False
224 hgpatchheader = False
225 hgpatchheader = False
225 ignoretext = False
226 ignoretext = False
226
227
227 ui.debug('found patch at byte %d\n' % m.start(0))
228 ui.debug('found patch at byte %d\n' % m.start(0))
228 diffs_seen += 1
229 diffs_seen += 1
229 cfp = cStringIO.StringIO()
230 cfp = cStringIO.StringIO()
230 for line in payload[:m.start(0)].splitlines():
231 for line in payload[:m.start(0)].splitlines():
231 if line.startswith('# HG changeset patch') and not hgpatch:
232 if line.startswith('# HG changeset patch') and not hgpatch:
232 ui.debug('patch generated by hg export\n')
233 ui.debug('patch generated by hg export\n')
233 hgpatch = True
234 hgpatch = True
234 hgpatchheader = True
235 hgpatchheader = True
235 # drop earlier commit message content
236 # drop earlier commit message content
236 cfp.seek(0)
237 cfp.seek(0)
237 cfp.truncate()
238 cfp.truncate()
238 subject = None
239 subject = None
239 elif hgpatchheader:
240 elif hgpatchheader:
240 if line.startswith('# User '):
241 if line.startswith('# User '):
241 user = line[7:]
242 user = line[7:]
242 ui.debug('From: %s\n' % user)
243 ui.debug('From: %s\n' % user)
243 elif line.startswith("# Date "):
244 elif line.startswith("# Date "):
244 date = line[7:]
245 date = line[7:]
245 elif line.startswith("# Branch "):
246 elif line.startswith("# Branch "):
246 branch = line[9:]
247 branch = line[9:]
247 elif line.startswith("# Node ID "):
248 elif line.startswith("# Node ID "):
248 nodeid = line[10:]
249 nodeid = line[10:]
249 elif line.startswith("# Parent "):
250 elif line.startswith("# Parent "):
250 parents.append(line[10:])
251 parents.append(line[10:])
251 elif not line.startswith("# "):
252 elif not line.startswith("# "):
252 hgpatchheader = False
253 hgpatchheader = False
253 elif line == '---' and gitsendmail:
254 elif line == '---' and gitsendmail:
254 ignoretext = True
255 ignoretext = True
255 if not hgpatchheader and not ignoretext:
256 if not hgpatchheader and not ignoretext:
256 cfp.write(line)
257 cfp.write(line)
257 cfp.write('\n')
258 cfp.write('\n')
258 message = cfp.getvalue()
259 message = cfp.getvalue()
259 if tmpfp:
260 if tmpfp:
260 tmpfp.write(payload)
261 tmpfp.write(payload)
261 if not payload.endswith('\n'):
262 if not payload.endswith('\n'):
262 tmpfp.write('\n')
263 tmpfp.write('\n')
263 elif not diffs_seen and message and content_type == 'text/plain':
264 elif not diffs_seen and message and content_type == 'text/plain':
264 message += '\n' + payload
265 message += '\n' + payload
265 except:
266 except:
266 tmpfp.close()
267 tmpfp.close()
267 os.unlink(tmpname)
268 os.unlink(tmpname)
268 raise
269 raise
269
270
270 if subject and not message.startswith(subject):
271 if subject and not message.startswith(subject):
271 message = '%s\n%s' % (subject, message)
272 message = '%s\n%s' % (subject, message)
272 tmpfp.close()
273 tmpfp.close()
273 if not diffs_seen:
274 if not diffs_seen:
274 os.unlink(tmpname)
275 os.unlink(tmpname)
275 return None, message, user, date, branch, None, None, None
276 return None, message, user, date, branch, None, None, None
276 p1 = parents and parents.pop(0) or None
277 p1 = parents and parents.pop(0) or None
277 p2 = parents and parents.pop(0) or None
278 p2 = parents and parents.pop(0) or None
278 return tmpname, message, user, date, branch, nodeid, p1, p2
279 return tmpname, message, user, date, branch, nodeid, p1, p2
279
280
280 class patchmeta(object):
281 class patchmeta(object):
281 """Patched file metadata
282 """Patched file metadata
282
283
283 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
284 or COPY. 'path' is patched file path. 'oldpath' is set to the
285 or COPY. 'path' is patched file path. 'oldpath' is set to the
285 origin file when 'op' is either COPY or RENAME, None otherwise. If
286 origin file when 'op' is either COPY or RENAME, None otherwise. If
286 file mode is changed, 'mode' is a tuple (islink, isexec) where
287 file mode is changed, 'mode' is a tuple (islink, isexec) where
287 'islink' is True if the file is a symlink and 'isexec' is True if
288 'islink' is True if the file is a symlink and 'isexec' is True if
288 the file is executable. Otherwise, 'mode' is None.
289 the file is executable. Otherwise, 'mode' is None.
289 """
290 """
290 def __init__(self, path):
291 def __init__(self, path):
291 self.path = path
292 self.path = path
292 self.oldpath = None
293 self.oldpath = None
293 self.mode = None
294 self.mode = None
294 self.op = 'MODIFY'
295 self.op = 'MODIFY'
295 self.binary = False
296 self.binary = False
296
297
297 def setmode(self, mode):
298 def setmode(self, mode):
298 islink = mode & 020000
299 islink = mode & 020000
299 isexec = mode & 0100
300 isexec = mode & 0100
300 self.mode = (islink, isexec)
301 self.mode = (islink, isexec)
301
302
302 def __repr__(self):
303 def __repr__(self):
303 return "<patchmeta %s %r>" % (self.op, self.path)
304 return "<patchmeta %s %r>" % (self.op, self.path)
304
305
305 def readgitpatch(lr):
306 def readgitpatch(lr):
306 """extract git-style metadata about patches from <patchname>"""
307 """extract git-style metadata about patches from <patchname>"""
307
308
308 # Filter patch for git information
309 # Filter patch for git information
309 gp = None
310 gp = None
310 gitpatches = []
311 gitpatches = []
311 for line in lr:
312 for line in lr:
312 line = line.rstrip(' \r\n')
313 line = line.rstrip(' \r\n')
313 if line.startswith('diff --git'):
314 if line.startswith('diff --git'):
314 m = gitre.match(line)
315 m = gitre.match(line)
315 if m:
316 if m:
316 if gp:
317 if gp:
317 gitpatches.append(gp)
318 gitpatches.append(gp)
318 dst = m.group(2)
319 dst = m.group(2)
319 gp = patchmeta(dst)
320 gp = patchmeta(dst)
320 elif gp:
321 elif gp:
321 if line.startswith('--- '):
322 if line.startswith('--- '):
322 gitpatches.append(gp)
323 gitpatches.append(gp)
323 gp = None
324 gp = None
324 continue
325 continue
325 if line.startswith('rename from '):
326 if line.startswith('rename from '):
326 gp.op = 'RENAME'
327 gp.op = 'RENAME'
327 gp.oldpath = line[12:]
328 gp.oldpath = line[12:]
328 elif line.startswith('rename to '):
329 elif line.startswith('rename to '):
329 gp.path = line[10:]
330 gp.path = line[10:]
330 elif line.startswith('copy from '):
331 elif line.startswith('copy from '):
331 gp.op = 'COPY'
332 gp.op = 'COPY'
332 gp.oldpath = line[10:]
333 gp.oldpath = line[10:]
333 elif line.startswith('copy to '):
334 elif line.startswith('copy to '):
334 gp.path = line[8:]
335 gp.path = line[8:]
335 elif line.startswith('deleted file'):
336 elif line.startswith('deleted file'):
336 gp.op = 'DELETE'
337 gp.op = 'DELETE'
337 elif line.startswith('new file mode '):
338 elif line.startswith('new file mode '):
338 gp.op = 'ADD'
339 gp.op = 'ADD'
339 gp.setmode(int(line[-6:], 8))
340 gp.setmode(int(line[-6:], 8))
340 elif line.startswith('new mode '):
341 elif line.startswith('new mode '):
341 gp.setmode(int(line[-6:], 8))
342 gp.setmode(int(line[-6:], 8))
342 elif line.startswith('GIT binary patch'):
343 elif line.startswith('GIT binary patch'):
343 gp.binary = True
344 gp.binary = True
344 if gp:
345 if gp:
345 gitpatches.append(gp)
346 gitpatches.append(gp)
346
347
347 return gitpatches
348 return gitpatches
348
349
349 class linereader(object):
350 class linereader(object):
350 # simple class to allow pushing lines back into the input stream
351 # simple class to allow pushing lines back into the input stream
351 def __init__(self, fp, textmode=False):
352 def __init__(self, fp, textmode=False):
352 self.fp = fp
353 self.fp = fp
353 self.buf = []
354 self.buf = []
354 self.textmode = textmode
355 self.textmode = textmode
355 self.eol = None
356 self.eol = None
356
357
357 def push(self, line):
358 def push(self, line):
358 if line is not None:
359 if line is not None:
359 self.buf.append(line)
360 self.buf.append(line)
360
361
361 def readline(self):
362 def readline(self):
362 if self.buf:
363 if self.buf:
363 l = self.buf[0]
364 l = self.buf[0]
364 del self.buf[0]
365 del self.buf[0]
365 return l
366 return l
366 l = self.fp.readline()
367 l = self.fp.readline()
367 if not self.eol:
368 if not self.eol:
368 if l.endswith('\r\n'):
369 if l.endswith('\r\n'):
369 self.eol = '\r\n'
370 self.eol = '\r\n'
370 elif l.endswith('\n'):
371 elif l.endswith('\n'):
371 self.eol = '\n'
372 self.eol = '\n'
372 if self.textmode and l.endswith('\r\n'):
373 if self.textmode and l.endswith('\r\n'):
373 l = l[:-2] + '\n'
374 l = l[:-2] + '\n'
374 return l
375 return l
375
376
376 def __iter__(self):
377 def __iter__(self):
377 while 1:
378 while 1:
378 l = self.readline()
379 l = self.readline()
379 if not l:
380 if not l:
380 break
381 break
381 yield l
382 yield l
382
383
383 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
384 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
384 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
385 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
385 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
386 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
386 eolmodes = ['strict', 'crlf', 'lf', 'auto']
387 eolmodes = ['strict', 'crlf', 'lf', 'auto']
387
388
388 class patchfile(object):
389 class patchfile(object):
389 def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
390 def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
390 self.fname = fname
391 self.fname = fname
391 self.eolmode = eolmode
392 self.eolmode = eolmode
392 self.eol = None
393 self.eol = None
393 self.opener = opener
394 self.opener = opener
394 self.ui = ui
395 self.ui = ui
395 self.lines = []
396 self.lines = []
396 self.exists = False
397 self.exists = False
397 self.missing = missing
398 self.missing = missing
398 if not missing:
399 if not missing:
399 try:
400 try:
400 self.lines = self.readlines(fname)
401 self.lines = self.readlines(fname)
401 self.exists = True
402 self.exists = True
402 except IOError:
403 except IOError:
403 pass
404 pass
404 else:
405 else:
405 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
406 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
406
407
407 self.hash = {}
408 self.hash = {}
408 self.dirty = 0
409 self.dirty = 0
409 self.offset = 0
410 self.offset = 0
410 self.skew = 0
411 self.skew = 0
411 self.rej = []
412 self.rej = []
412 self.fileprinted = False
413 self.fileprinted = False
413 self.printfile(False)
414 self.printfile(False)
414 self.hunks = 0
415 self.hunks = 0
415
416
416 def readlines(self, fname):
417 def readlines(self, fname):
417 if os.path.islink(fname):
418 if os.path.islink(fname):
418 return [os.readlink(fname)]
419 return [os.readlink(fname)]
419 fp = self.opener(fname, 'r')
420 fp = self.opener(fname, 'r')
420 try:
421 try:
421 lr = linereader(fp, self.eolmode != 'strict')
422 lr = linereader(fp, self.eolmode != 'strict')
422 lines = list(lr)
423 lines = list(lr)
423 self.eol = lr.eol
424 self.eol = lr.eol
424 return lines
425 return lines
425 finally:
426 finally:
426 fp.close()
427 fp.close()
427
428
428 def writelines(self, fname, lines):
429 def writelines(self, fname, lines):
429 # Ensure supplied data ends in fname, being a regular file or
430 # Ensure supplied data ends in fname, being a regular file or
430 # a symlink. cmdutil.updatedir will -too magically- take care
431 # a symlink. cmdutil.updatedir will -too magically- take care
431 # of setting it to the proper type afterwards.
432 # of setting it to the proper type afterwards.
432 st_mode = None
433 st_mode = None
433 islink = os.path.islink(fname)
434 islink = os.path.islink(fname)
434 if islink:
435 if islink:
435 fp = cStringIO.StringIO()
436 fp = cStringIO.StringIO()
436 else:
437 else:
437 try:
438 try:
438 st_mode = os.lstat(fname).st_mode & 0777
439 st_mode = os.lstat(fname).st_mode & 0777
439 except OSError, e:
440 except OSError, e:
440 if e.errno != errno.ENOENT:
441 if e.errno != errno.ENOENT:
441 raise
442 raise
442 fp = self.opener(fname, 'w')
443 fp = self.opener(fname, 'w')
443 try:
444 try:
444 if self.eolmode == 'auto':
445 if self.eolmode == 'auto':
445 eol = self.eol
446 eol = self.eol
446 elif self.eolmode == 'crlf':
447 elif self.eolmode == 'crlf':
447 eol = '\r\n'
448 eol = '\r\n'
448 else:
449 else:
449 eol = '\n'
450 eol = '\n'
450
451
451 if self.eolmode != 'strict' and eol and eol != '\n':
452 if self.eolmode != 'strict' and eol and eol != '\n':
452 for l in lines:
453 for l in lines:
453 if l and l[-1] == '\n':
454 if l and l[-1] == '\n':
454 l = l[:-1] + eol
455 l = l[:-1] + eol
455 fp.write(l)
456 fp.write(l)
456 else:
457 else:
457 fp.writelines(lines)
458 fp.writelines(lines)
458 if islink:
459 if islink:
459 self.opener.symlink(fp.getvalue(), fname)
460 self.opener.symlink(fp.getvalue(), fname)
460 if st_mode is not None:
461 if st_mode is not None:
461 os.chmod(fname, st_mode)
462 os.chmod(fname, st_mode)
462 finally:
463 finally:
463 fp.close()
464 fp.close()
464
465
465 def unlink(self, fname):
466 def unlink(self, fname):
466 os.unlink(fname)
467 os.unlink(fname)
467
468
468 def printfile(self, warn):
469 def printfile(self, warn):
469 if self.fileprinted:
470 if self.fileprinted:
470 return
471 return
471 if warn or self.ui.verbose:
472 if warn or self.ui.verbose:
472 self.fileprinted = True
473 self.fileprinted = True
473 s = _("patching file %s\n") % self.fname
474 s = _("patching file %s\n") % self.fname
474 if warn:
475 if warn:
475 self.ui.warn(s)
476 self.ui.warn(s)
476 else:
477 else:
477 self.ui.note(s)
478 self.ui.note(s)
478
479
479
480
480 def findlines(self, l, linenum):
481 def findlines(self, l, linenum):
481 # looks through the hash and finds candidate lines. The
482 # looks through the hash and finds candidate lines. The
482 # result is a list of line numbers sorted based on distance
483 # result is a list of line numbers sorted based on distance
483 # from linenum
484 # from linenum
484
485
485 cand = self.hash.get(l, [])
486 cand = self.hash.get(l, [])
486 if len(cand) > 1:
487 if len(cand) > 1:
487 # resort our list of potentials forward then back.
488 # resort our list of potentials forward then back.
488 cand.sort(key=lambda x: abs(x - linenum))
489 cand.sort(key=lambda x: abs(x - linenum))
489 return cand
490 return cand
490
491
491 def makerejlines(self, fname):
492 def makerejlines(self, fname):
492 base = os.path.basename(fname)
493 base = os.path.basename(fname)
493 yield "--- %s\n+++ %s\n" % (base, base)
494 yield "--- %s\n+++ %s\n" % (base, base)
494 for x in self.rej:
495 for x in self.rej:
495 for l in x.hunk:
496 for l in x.hunk:
496 yield l
497 yield l
497 if l[-1] != '\n':
498 if l[-1] != '\n':
498 yield "\n\ No newline at end of file\n"
499 yield "\n\ No newline at end of file\n"
499
500
500 def write_rej(self):
501 def write_rej(self):
501 # our rejects are a little different from patch(1). This always
502 # our rejects are a little different from patch(1). This always
502 # creates rejects in the same form as the original patch. A file
503 # creates rejects in the same form as the original patch. A file
503 # header is inserted so that you can run the reject through patch again
504 # header is inserted so that you can run the reject through patch again
504 # without having to type the filename.
505 # without having to type the filename.
505
506
506 if not self.rej:
507 if not self.rej:
507 return
508 return
508
509
509 fname = self.fname + ".rej"
510 fname = self.fname + ".rej"
510 self.ui.warn(
511 self.ui.warn(
511 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
512 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
512 (len(self.rej), self.hunks, fname))
513 (len(self.rej), self.hunks, fname))
513
514
514 fp = self.opener(fname, 'w')
515 fp = self.opener(fname, 'w')
515 fp.writelines(self.makerejlines(self.fname))
516 fp.writelines(self.makerejlines(self.fname))
516 fp.close()
517 fp.close()
517
518
518 def apply(self, h):
519 def apply(self, h):
519 if not h.complete():
520 if not h.complete():
520 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
521 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
521 (h.number, h.desc, len(h.a), h.lena, len(h.b),
522 (h.number, h.desc, len(h.a), h.lena, len(h.b),
522 h.lenb))
523 h.lenb))
523
524
524 self.hunks += 1
525 self.hunks += 1
525
526
526 if self.missing:
527 if self.missing:
527 self.rej.append(h)
528 self.rej.append(h)
528 return -1
529 return -1
529
530
530 if self.exists and h.createfile():
531 if self.exists and h.createfile():
531 self.ui.warn(_("file %s already exists\n") % self.fname)
532 self.ui.warn(_("file %s already exists\n") % self.fname)
532 self.rej.append(h)
533 self.rej.append(h)
533 return -1
534 return -1
534
535
535 if isinstance(h, binhunk):
536 if isinstance(h, binhunk):
536 if h.rmfile():
537 if h.rmfile():
537 self.unlink(self.fname)
538 self.unlink(self.fname)
538 else:
539 else:
539 self.lines[:] = h.new()
540 self.lines[:] = h.new()
540 self.offset += len(h.new())
541 self.offset += len(h.new())
541 self.dirty = 1
542 self.dirty = 1
542 return 0
543 return 0
543
544
544 horig = h
545 horig = h
545 if (self.eolmode in ('crlf', 'lf')
546 if (self.eolmode in ('crlf', 'lf')
546 or self.eolmode == 'auto' and self.eol):
547 or self.eolmode == 'auto' and self.eol):
547 # If new eols are going to be normalized, then normalize
548 # If new eols are going to be normalized, then normalize
548 # hunk data before patching. Otherwise, preserve input
549 # hunk data before patching. Otherwise, preserve input
549 # line-endings.
550 # line-endings.
550 h = h.getnormalized()
551 h = h.getnormalized()
551
552
552 # fast case first, no offsets, no fuzz
553 # fast case first, no offsets, no fuzz
553 old = h.old()
554 old = h.old()
554 # patch starts counting at 1 unless we are adding the file
555 # patch starts counting at 1 unless we are adding the file
555 if h.starta == 0:
556 if h.starta == 0:
556 start = 0
557 start = 0
557 else:
558 else:
558 start = h.starta + self.offset - 1
559 start = h.starta + self.offset - 1
559 orig_start = start
560 orig_start = start
560 # if there's skew we want to emit the "(offset %d lines)" even
561 # if there's skew we want to emit the "(offset %d lines)" even
561 # when the hunk cleanly applies at start + skew, so skip the
562 # when the hunk cleanly applies at start + skew, so skip the
562 # fast case code
563 # fast case code
563 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
564 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
564 if h.rmfile():
565 if h.rmfile():
565 self.unlink(self.fname)
566 self.unlink(self.fname)
566 else:
567 else:
567 self.lines[start : start + h.lena] = h.new()
568 self.lines[start : start + h.lena] = h.new()
568 self.offset += h.lenb - h.lena
569 self.offset += h.lenb - h.lena
569 self.dirty = 1
570 self.dirty = 1
570 return 0
571 return 0
571
572
572 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
573 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
573 self.hash = {}
574 self.hash = {}
574 for x, s in enumerate(self.lines):
575 for x, s in enumerate(self.lines):
575 self.hash.setdefault(s, []).append(x)
576 self.hash.setdefault(s, []).append(x)
576 if h.hunk[-1][0] != ' ':
577 if h.hunk[-1][0] != ' ':
577 # if the hunk tried to put something at the bottom of the file
578 # if the hunk tried to put something at the bottom of the file
578 # override the start line and use eof here
579 # override the start line and use eof here
579 search_start = len(self.lines)
580 search_start = len(self.lines)
580 else:
581 else:
581 search_start = orig_start + self.skew
582 search_start = orig_start + self.skew
582
583
583 for fuzzlen in xrange(3):
584 for fuzzlen in xrange(3):
584 for toponly in [True, False]:
585 for toponly in [True, False]:
585 old = h.old(fuzzlen, toponly)
586 old = h.old(fuzzlen, toponly)
586
587
587 cand = self.findlines(old[0][1:], search_start)
588 cand = self.findlines(old[0][1:], search_start)
588 for l in cand:
589 for l in cand:
589 if diffhelpers.testhunk(old, self.lines, l) == 0:
590 if diffhelpers.testhunk(old, self.lines, l) == 0:
590 newlines = h.new(fuzzlen, toponly)
591 newlines = h.new(fuzzlen, toponly)
591 self.lines[l : l + len(old)] = newlines
592 self.lines[l : l + len(old)] = newlines
592 self.offset += len(newlines) - len(old)
593 self.offset += len(newlines) - len(old)
593 self.skew = l - orig_start
594 self.skew = l - orig_start
594 self.dirty = 1
595 self.dirty = 1
595 offset = l - orig_start - fuzzlen
596 offset = l - orig_start - fuzzlen
596 if fuzzlen:
597 if fuzzlen:
597 msg = _("Hunk #%d succeeded at %d "
598 msg = _("Hunk #%d succeeded at %d "
598 "with fuzz %d "
599 "with fuzz %d "
599 "(offset %d lines).\n")
600 "(offset %d lines).\n")
600 self.printfile(True)
601 self.printfile(True)
601 self.ui.warn(msg %
602 self.ui.warn(msg %
602 (h.number, l + 1, fuzzlen, offset))
603 (h.number, l + 1, fuzzlen, offset))
603 else:
604 else:
604 msg = _("Hunk #%d succeeded at %d "
605 msg = _("Hunk #%d succeeded at %d "
605 "(offset %d lines).\n")
606 "(offset %d lines).\n")
606 self.ui.note(msg % (h.number, l + 1, offset))
607 self.ui.note(msg % (h.number, l + 1, offset))
607 return fuzzlen
608 return fuzzlen
608 self.printfile(True)
609 self.printfile(True)
609 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
610 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
610 self.rej.append(horig)
611 self.rej.append(horig)
611 return -1
612 return -1
612
613
613 def close(self):
614 def close(self):
614 if self.dirty:
615 if self.dirty:
615 self.writelines(self.fname, self.lines)
616 self.writelines(self.fname, self.lines)
616 self.write_rej()
617 self.write_rej()
617 return len(self.rej)
618 return len(self.rej)
618
619
619 class hunk(object):
620 class hunk(object):
620 def __init__(self, desc, num, lr, context, create=False, remove=False):
621 def __init__(self, desc, num, lr, context, create=False, remove=False):
621 self.number = num
622 self.number = num
622 self.desc = desc
623 self.desc = desc
623 self.hunk = [desc]
624 self.hunk = [desc]
624 self.a = []
625 self.a = []
625 self.b = []
626 self.b = []
626 self.starta = self.lena = None
627 self.starta = self.lena = None
627 self.startb = self.lenb = None
628 self.startb = self.lenb = None
628 if lr is not None:
629 if lr is not None:
629 if context:
630 if context:
630 self.read_context_hunk(lr)
631 self.read_context_hunk(lr)
631 else:
632 else:
632 self.read_unified_hunk(lr)
633 self.read_unified_hunk(lr)
633 self.create = create
634 self.create = create
634 self.remove = remove and not create
635 self.remove = remove and not create
635
636
636 def getnormalized(self):
637 def getnormalized(self):
637 """Return a copy with line endings normalized to LF."""
638 """Return a copy with line endings normalized to LF."""
638
639
639 def normalize(lines):
640 def normalize(lines):
640 nlines = []
641 nlines = []
641 for line in lines:
642 for line in lines:
642 if line.endswith('\r\n'):
643 if line.endswith('\r\n'):
643 line = line[:-2] + '\n'
644 line = line[:-2] + '\n'
644 nlines.append(line)
645 nlines.append(line)
645 return nlines
646 return nlines
646
647
647 # Dummy object, it is rebuilt manually
648 # Dummy object, it is rebuilt manually
648 nh = hunk(self.desc, self.number, None, None, False, False)
649 nh = hunk(self.desc, self.number, None, None, False, False)
649 nh.number = self.number
650 nh.number = self.number
650 nh.desc = self.desc
651 nh.desc = self.desc
651 nh.hunk = self.hunk
652 nh.hunk = self.hunk
652 nh.a = normalize(self.a)
653 nh.a = normalize(self.a)
653 nh.b = normalize(self.b)
654 nh.b = normalize(self.b)
654 nh.starta = self.starta
655 nh.starta = self.starta
655 nh.startb = self.startb
656 nh.startb = self.startb
656 nh.lena = self.lena
657 nh.lena = self.lena
657 nh.lenb = self.lenb
658 nh.lenb = self.lenb
658 nh.create = self.create
659 nh.create = self.create
659 nh.remove = self.remove
660 nh.remove = self.remove
660 return nh
661 return nh
661
662
662 def read_unified_hunk(self, lr):
663 def read_unified_hunk(self, lr):
663 m = unidesc.match(self.desc)
664 m = unidesc.match(self.desc)
664 if not m:
665 if not m:
665 raise PatchError(_("bad hunk #%d") % self.number)
666 raise PatchError(_("bad hunk #%d") % self.number)
666 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
667 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
667 if self.lena is None:
668 if self.lena is None:
668 self.lena = 1
669 self.lena = 1
669 else:
670 else:
670 self.lena = int(self.lena)
671 self.lena = int(self.lena)
671 if self.lenb is None:
672 if self.lenb is None:
672 self.lenb = 1
673 self.lenb = 1
673 else:
674 else:
674 self.lenb = int(self.lenb)
675 self.lenb = int(self.lenb)
675 self.starta = int(self.starta)
676 self.starta = int(self.starta)
676 self.startb = int(self.startb)
677 self.startb = int(self.startb)
677 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
678 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
678 # if we hit eof before finishing out the hunk, the last line will
679 # if we hit eof before finishing out the hunk, the last line will
679 # be zero length. Lets try to fix it up.
680 # be zero length. Lets try to fix it up.
680 while len(self.hunk[-1]) == 0:
681 while len(self.hunk[-1]) == 0:
681 del self.hunk[-1]
682 del self.hunk[-1]
682 del self.a[-1]
683 del self.a[-1]
683 del self.b[-1]
684 del self.b[-1]
684 self.lena -= 1
685 self.lena -= 1
685 self.lenb -= 1
686 self.lenb -= 1
686 self._fixnewline(lr)
687 self._fixnewline(lr)
687
688
688 def read_context_hunk(self, lr):
689 def read_context_hunk(self, lr):
689 self.desc = lr.readline()
690 self.desc = lr.readline()
690 m = contextdesc.match(self.desc)
691 m = contextdesc.match(self.desc)
691 if not m:
692 if not m:
692 raise PatchError(_("bad hunk #%d") % self.number)
693 raise PatchError(_("bad hunk #%d") % self.number)
693 foo, self.starta, foo2, aend, foo3 = m.groups()
694 foo, self.starta, foo2, aend, foo3 = m.groups()
694 self.starta = int(self.starta)
695 self.starta = int(self.starta)
695 if aend is None:
696 if aend is None:
696 aend = self.starta
697 aend = self.starta
697 self.lena = int(aend) - self.starta
698 self.lena = int(aend) - self.starta
698 if self.starta:
699 if self.starta:
699 self.lena += 1
700 self.lena += 1
700 for x in xrange(self.lena):
701 for x in xrange(self.lena):
701 l = lr.readline()
702 l = lr.readline()
702 if l.startswith('---'):
703 if l.startswith('---'):
703 # lines addition, old block is empty
704 # lines addition, old block is empty
704 lr.push(l)
705 lr.push(l)
705 break
706 break
706 s = l[2:]
707 s = l[2:]
707 if l.startswith('- ') or l.startswith('! '):
708 if l.startswith('- ') or l.startswith('! '):
708 u = '-' + s
709 u = '-' + s
709 elif l.startswith(' '):
710 elif l.startswith(' '):
710 u = ' ' + s
711 u = ' ' + s
711 else:
712 else:
712 raise PatchError(_("bad hunk #%d old text line %d") %
713 raise PatchError(_("bad hunk #%d old text line %d") %
713 (self.number, x))
714 (self.number, x))
714 self.a.append(u)
715 self.a.append(u)
715 self.hunk.append(u)
716 self.hunk.append(u)
716
717
717 l = lr.readline()
718 l = lr.readline()
718 if l.startswith('\ '):
719 if l.startswith('\ '):
719 s = self.a[-1][:-1]
720 s = self.a[-1][:-1]
720 self.a[-1] = s
721 self.a[-1] = s
721 self.hunk[-1] = s
722 self.hunk[-1] = s
722 l = lr.readline()
723 l = lr.readline()
723 m = contextdesc.match(l)
724 m = contextdesc.match(l)
724 if not m:
725 if not m:
725 raise PatchError(_("bad hunk #%d") % self.number)
726 raise PatchError(_("bad hunk #%d") % self.number)
726 foo, self.startb, foo2, bend, foo3 = m.groups()
727 foo, self.startb, foo2, bend, foo3 = m.groups()
727 self.startb = int(self.startb)
728 self.startb = int(self.startb)
728 if bend is None:
729 if bend is None:
729 bend = self.startb
730 bend = self.startb
730 self.lenb = int(bend) - self.startb
731 self.lenb = int(bend) - self.startb
731 if self.startb:
732 if self.startb:
732 self.lenb += 1
733 self.lenb += 1
733 hunki = 1
734 hunki = 1
734 for x in xrange(self.lenb):
735 for x in xrange(self.lenb):
735 l = lr.readline()
736 l = lr.readline()
736 if l.startswith('\ '):
737 if l.startswith('\ '):
737 # XXX: the only way to hit this is with an invalid line range.
738 # XXX: the only way to hit this is with an invalid line range.
738 # The no-eol marker is not counted in the line range, but I
739 # The no-eol marker is not counted in the line range, but I
739 # guess there are diff(1) out there which behave differently.
740 # guess there are diff(1) out there which behave differently.
740 s = self.b[-1][:-1]
741 s = self.b[-1][:-1]
741 self.b[-1] = s
742 self.b[-1] = s
742 self.hunk[hunki - 1] = s
743 self.hunk[hunki - 1] = s
743 continue
744 continue
744 if not l:
745 if not l:
745 # line deletions, new block is empty and we hit EOF
746 # line deletions, new block is empty and we hit EOF
746 lr.push(l)
747 lr.push(l)
747 break
748 break
748 s = l[2:]
749 s = l[2:]
749 if l.startswith('+ ') or l.startswith('! '):
750 if l.startswith('+ ') or l.startswith('! '):
750 u = '+' + s
751 u = '+' + s
751 elif l.startswith(' '):
752 elif l.startswith(' '):
752 u = ' ' + s
753 u = ' ' + s
753 elif len(self.b) == 0:
754 elif len(self.b) == 0:
754 # line deletions, new block is empty
755 # line deletions, new block is empty
755 lr.push(l)
756 lr.push(l)
756 break
757 break
757 else:
758 else:
758 raise PatchError(_("bad hunk #%d old text line %d") %
759 raise PatchError(_("bad hunk #%d old text line %d") %
759 (self.number, x))
760 (self.number, x))
760 self.b.append(s)
761 self.b.append(s)
761 while True:
762 while True:
762 if hunki >= len(self.hunk):
763 if hunki >= len(self.hunk):
763 h = ""
764 h = ""
764 else:
765 else:
765 h = self.hunk[hunki]
766 h = self.hunk[hunki]
766 hunki += 1
767 hunki += 1
767 if h == u:
768 if h == u:
768 break
769 break
769 elif h.startswith('-'):
770 elif h.startswith('-'):
770 continue
771 continue
771 else:
772 else:
772 self.hunk.insert(hunki - 1, u)
773 self.hunk.insert(hunki - 1, u)
773 break
774 break
774
775
775 if not self.a:
776 if not self.a:
776 # this happens when lines were only added to the hunk
777 # this happens when lines were only added to the hunk
777 for x in self.hunk:
778 for x in self.hunk:
778 if x.startswith('-') or x.startswith(' '):
779 if x.startswith('-') or x.startswith(' '):
779 self.a.append(x)
780 self.a.append(x)
780 if not self.b:
781 if not self.b:
781 # this happens when lines were only deleted from the hunk
782 # this happens when lines were only deleted from the hunk
782 for x in self.hunk:
783 for x in self.hunk:
783 if x.startswith('+') or x.startswith(' '):
784 if x.startswith('+') or x.startswith(' '):
784 self.b.append(x[1:])
785 self.b.append(x[1:])
785 # @@ -start,len +start,len @@
786 # @@ -start,len +start,len @@
786 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
787 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
787 self.startb, self.lenb)
788 self.startb, self.lenb)
788 self.hunk[0] = self.desc
789 self.hunk[0] = self.desc
789 self._fixnewline(lr)
790 self._fixnewline(lr)
790
791
791 def _fixnewline(self, lr):
792 def _fixnewline(self, lr):
792 l = lr.readline()
793 l = lr.readline()
793 if l.startswith('\ '):
794 if l.startswith('\ '):
794 diffhelpers.fix_newline(self.hunk, self.a, self.b)
795 diffhelpers.fix_newline(self.hunk, self.a, self.b)
795 else:
796 else:
796 lr.push(l)
797 lr.push(l)
797
798
798 def complete(self):
799 def complete(self):
799 return len(self.a) == self.lena and len(self.b) == self.lenb
800 return len(self.a) == self.lena and len(self.b) == self.lenb
800
801
801 def createfile(self):
802 def createfile(self):
802 return self.starta == 0 and self.lena == 0 and self.create
803 return self.starta == 0 and self.lena == 0 and self.create
803
804
804 def rmfile(self):
805 def rmfile(self):
805 return self.startb == 0 and self.lenb == 0 and self.remove
806 return self.startb == 0 and self.lenb == 0 and self.remove
806
807
807 def fuzzit(self, l, fuzz, toponly):
808 def fuzzit(self, l, fuzz, toponly):
808 # this removes context lines from the top and bottom of list 'l'. It
809 # this removes context lines from the top and bottom of list 'l'. It
809 # checks the hunk to make sure only context lines are removed, and then
810 # checks the hunk to make sure only context lines are removed, and then
810 # returns a new shortened list of lines.
811 # returns a new shortened list of lines.
811 fuzz = min(fuzz, len(l)-1)
812 fuzz = min(fuzz, len(l)-1)
812 if fuzz:
813 if fuzz:
813 top = 0
814 top = 0
814 bot = 0
815 bot = 0
815 hlen = len(self.hunk)
816 hlen = len(self.hunk)
816 for x in xrange(hlen - 1):
817 for x in xrange(hlen - 1):
817 # the hunk starts with the @@ line, so use x+1
818 # the hunk starts with the @@ line, so use x+1
818 if self.hunk[x + 1][0] == ' ':
819 if self.hunk[x + 1][0] == ' ':
819 top += 1
820 top += 1
820 else:
821 else:
821 break
822 break
822 if not toponly:
823 if not toponly:
823 for x in xrange(hlen - 1):
824 for x in xrange(hlen - 1):
824 if self.hunk[hlen - bot - 1][0] == ' ':
825 if self.hunk[hlen - bot - 1][0] == ' ':
825 bot += 1
826 bot += 1
826 else:
827 else:
827 break
828 break
828
829
829 # top and bot now count context in the hunk
830 # top and bot now count context in the hunk
830 # adjust them if either one is short
831 # adjust them if either one is short
831 context = max(top, bot, 3)
832 context = max(top, bot, 3)
832 if bot < context:
833 if bot < context:
833 bot = max(0, fuzz - (context - bot))
834 bot = max(0, fuzz - (context - bot))
834 else:
835 else:
835 bot = min(fuzz, bot)
836 bot = min(fuzz, bot)
836 if top < context:
837 if top < context:
837 top = max(0, fuzz - (context - top))
838 top = max(0, fuzz - (context - top))
838 else:
839 else:
839 top = min(fuzz, top)
840 top = min(fuzz, top)
840
841
841 return l[top:len(l)-bot]
842 return l[top:len(l)-bot]
842 return l
843 return l
843
844
844 def old(self, fuzz=0, toponly=False):
845 def old(self, fuzz=0, toponly=False):
845 return self.fuzzit(self.a, fuzz, toponly)
846 return self.fuzzit(self.a, fuzz, toponly)
846
847
847 def new(self, fuzz=0, toponly=False):
848 def new(self, fuzz=0, toponly=False):
848 return self.fuzzit(self.b, fuzz, toponly)
849 return self.fuzzit(self.b, fuzz, toponly)
849
850
850 class binhunk:
851 class binhunk:
851 'A binary patch file. Only understands literals so far.'
852 'A binary patch file. Only understands literals so far.'
852 def __init__(self, gitpatch):
853 def __init__(self, gitpatch):
853 self.gitpatch = gitpatch
854 self.gitpatch = gitpatch
854 self.text = None
855 self.text = None
855 self.hunk = ['GIT binary patch\n']
856 self.hunk = ['GIT binary patch\n']
856
857
857 def createfile(self):
858 def createfile(self):
858 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
859 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
859
860
860 def rmfile(self):
861 def rmfile(self):
861 return self.gitpatch.op == 'DELETE'
862 return self.gitpatch.op == 'DELETE'
862
863
863 def complete(self):
864 def complete(self):
864 return self.text is not None
865 return self.text is not None
865
866
866 def new(self):
867 def new(self):
867 return [self.text]
868 return [self.text]
868
869
869 def extract(self, lr):
870 def extract(self, lr):
870 line = lr.readline()
871 line = lr.readline()
871 self.hunk.append(line)
872 self.hunk.append(line)
872 while line and not line.startswith('literal '):
873 while line and not line.startswith('literal '):
873 line = lr.readline()
874 line = lr.readline()
874 self.hunk.append(line)
875 self.hunk.append(line)
875 if not line:
876 if not line:
876 raise PatchError(_('could not extract binary patch'))
877 raise PatchError(_('could not extract binary patch'))
877 size = int(line[8:].rstrip())
878 size = int(line[8:].rstrip())
878 dec = []
879 dec = []
879 line = lr.readline()
880 line = lr.readline()
880 self.hunk.append(line)
881 self.hunk.append(line)
881 while len(line) > 1:
882 while len(line) > 1:
882 l = line[0]
883 l = line[0]
883 if l <= 'Z' and l >= 'A':
884 if l <= 'Z' and l >= 'A':
884 l = ord(l) - ord('A') + 1
885 l = ord(l) - ord('A') + 1
885 else:
886 else:
886 l = ord(l) - ord('a') + 27
887 l = ord(l) - ord('a') + 27
887 dec.append(base85.b85decode(line[1:-1])[:l])
888 dec.append(base85.b85decode(line[1:-1])[:l])
888 line = lr.readline()
889 line = lr.readline()
889 self.hunk.append(line)
890 self.hunk.append(line)
890 text = zlib.decompress(''.join(dec))
891 text = zlib.decompress(''.join(dec))
891 if len(text) != size:
892 if len(text) != size:
892 raise PatchError(_('binary patch is %d bytes, not %d') %
893 raise PatchError(_('binary patch is %d bytes, not %d') %
893 len(text), size)
894 len(text), size)
894 self.text = text
895 self.text = text
895
896
896 def parsefilename(str):
897 def parsefilename(str):
897 # --- filename \t|space stuff
898 # --- filename \t|space stuff
898 s = str[4:].rstrip('\r\n')
899 s = str[4:].rstrip('\r\n')
899 i = s.find('\t')
900 i = s.find('\t')
900 if i < 0:
901 if i < 0:
901 i = s.find(' ')
902 i = s.find(' ')
902 if i < 0:
903 if i < 0:
903 return s
904 return s
904 return s[:i]
905 return s[:i]
905
906
906 def pathstrip(path, strip):
907 def pathstrip(path, strip):
907 pathlen = len(path)
908 pathlen = len(path)
908 i = 0
909 i = 0
909 if strip == 0:
910 if strip == 0:
910 return '', path.rstrip()
911 return '', path.rstrip()
911 count = strip
912 count = strip
912 while count > 0:
913 while count > 0:
913 i = path.find('/', i)
914 i = path.find('/', i)
914 if i == -1:
915 if i == -1:
915 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
916 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
916 (count, strip, path))
917 (count, strip, path))
917 i += 1
918 i += 1
918 # consume '//' in the path
919 # consume '//' in the path
919 while i < pathlen - 1 and path[i] == '/':
920 while i < pathlen - 1 and path[i] == '/':
920 i += 1
921 i += 1
921 count -= 1
922 count -= 1
922 return path[:i].lstrip(), path[i:].rstrip()
923 return path[:i].lstrip(), path[i:].rstrip()
923
924
924 def selectfile(afile_orig, bfile_orig, hunk, strip):
925 def selectfile(afile_orig, bfile_orig, hunk, strip):
925 nulla = afile_orig == "/dev/null"
926 nulla = afile_orig == "/dev/null"
926 nullb = bfile_orig == "/dev/null"
927 nullb = bfile_orig == "/dev/null"
927 abase, afile = pathstrip(afile_orig, strip)
928 abase, afile = pathstrip(afile_orig, strip)
928 gooda = not nulla and os.path.lexists(afile)
929 gooda = not nulla and os.path.lexists(afile)
929 bbase, bfile = pathstrip(bfile_orig, strip)
930 bbase, bfile = pathstrip(bfile_orig, strip)
930 if afile == bfile:
931 if afile == bfile:
931 goodb = gooda
932 goodb = gooda
932 else:
933 else:
933 goodb = not nullb and os.path.lexists(bfile)
934 goodb = not nullb and os.path.lexists(bfile)
934 createfunc = hunk.createfile
935 createfunc = hunk.createfile
935 missing = not goodb and not gooda and not createfunc()
936 missing = not goodb and not gooda and not createfunc()
936
937
937 # some diff programs apparently produce patches where the afile is
938 # some diff programs apparently produce patches where the afile is
938 # not /dev/null, but afile starts with bfile
939 # not /dev/null, but afile starts with bfile
939 abasedir = afile[:afile.rfind('/') + 1]
940 abasedir = afile[:afile.rfind('/') + 1]
940 bbasedir = bfile[:bfile.rfind('/') + 1]
941 bbasedir = bfile[:bfile.rfind('/') + 1]
941 if missing and abasedir == bbasedir and afile.startswith(bfile):
942 if missing and abasedir == bbasedir and afile.startswith(bfile):
942 # this isn't very pretty
943 # this isn't very pretty
943 hunk.create = True
944 hunk.create = True
944 if createfunc():
945 if createfunc():
945 missing = False
946 missing = False
946 else:
947 else:
947 hunk.create = False
948 hunk.create = False
948
949
949 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
950 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
950 # diff is between a file and its backup. In this case, the original
951 # diff is between a file and its backup. In this case, the original
951 # file should be patched (see original mpatch code).
952 # file should be patched (see original mpatch code).
952 isbackup = (abase == bbase and bfile.startswith(afile))
953 isbackup = (abase == bbase and bfile.startswith(afile))
953 fname = None
954 fname = None
954 if not missing:
955 if not missing:
955 if gooda and goodb:
956 if gooda and goodb:
956 fname = isbackup and afile or bfile
957 fname = isbackup and afile or bfile
957 elif gooda:
958 elif gooda:
958 fname = afile
959 fname = afile
959
960
960 if not fname:
961 if not fname:
961 if not nullb:
962 if not nullb:
962 fname = isbackup and afile or bfile
963 fname = isbackup and afile or bfile
963 elif not nulla:
964 elif not nulla:
964 fname = afile
965 fname = afile
965 else:
966 else:
966 raise PatchError(_("undefined source and destination files"))
967 raise PatchError(_("undefined source and destination files"))
967
968
968 return fname, missing
969 return fname, missing
969
970
970 def scangitpatch(lr, firstline):
971 def scangitpatch(lr, firstline):
971 """
972 """
972 Git patches can emit:
973 Git patches can emit:
973 - rename a to b
974 - rename a to b
974 - change b
975 - change b
975 - copy a to c
976 - copy a to c
976 - change c
977 - change c
977
978
978 We cannot apply this sequence as-is, the renamed 'a' could not be
979 We cannot apply this sequence as-is, the renamed 'a' could not be
979 found for it would have been renamed already. And we cannot copy
980 found for it would have been renamed already. And we cannot copy
980 from 'b' instead because 'b' would have been changed already. So
981 from 'b' instead because 'b' would have been changed already. So
981 we scan the git patch for copy and rename commands so we can
982 we scan the git patch for copy and rename commands so we can
982 perform the copies ahead of time.
983 perform the copies ahead of time.
983 """
984 """
984 pos = 0
985 pos = 0
985 try:
986 try:
986 pos = lr.fp.tell()
987 pos = lr.fp.tell()
987 fp = lr.fp
988 fp = lr.fp
988 except IOError:
989 except IOError:
989 fp = cStringIO.StringIO(lr.fp.read())
990 fp = cStringIO.StringIO(lr.fp.read())
990 gitlr = linereader(fp, lr.textmode)
991 gitlr = linereader(fp, lr.textmode)
991 gitlr.push(firstline)
992 gitlr.push(firstline)
992 gitpatches = readgitpatch(gitlr)
993 gitpatches = readgitpatch(gitlr)
993 fp.seek(pos)
994 fp.seek(pos)
994 return gitpatches
995 return gitpatches
995
996
996 def iterhunks(ui, fp):
997 def iterhunks(ui, fp):
997 """Read a patch and yield the following events:
998 """Read a patch and yield the following events:
998 - ("file", afile, bfile, firsthunk): select a new target file.
999 - ("file", afile, bfile, firsthunk): select a new target file.
999 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1000 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1000 "file" event.
1001 "file" event.
1001 - ("git", gitchanges): current diff is in git format, gitchanges
1002 - ("git", gitchanges): current diff is in git format, gitchanges
1002 maps filenames to gitpatch records. Unique event.
1003 maps filenames to gitpatch records. Unique event.
1003 """
1004 """
1004 changed = {}
1005 changed = {}
1005 afile = ""
1006 afile = ""
1006 bfile = ""
1007 bfile = ""
1007 state = None
1008 state = None
1008 hunknum = 0
1009 hunknum = 0
1009 emitfile = False
1010 emitfile = False
1010 git = False
1011 git = False
1011
1012
1012 # our states
1013 # our states
1013 BFILE = 1
1014 BFILE = 1
1014 context = None
1015 context = None
1015 lr = linereader(fp)
1016 lr = linereader(fp)
1016
1017
1017 while True:
1018 while True:
1018 newfile = newgitfile = False
1019 newfile = newgitfile = False
1019 x = lr.readline()
1020 x = lr.readline()
1020 if not x:
1021 if not x:
1021 break
1022 break
1022 if (state == BFILE and ((not context and x[0] == '@') or
1023 if (state == BFILE and ((not context and x[0] == '@') or
1023 ((context is not False) and x.startswith('***************')))):
1024 ((context is not False) and x.startswith('***************')))):
1024 if context is None and x.startswith('***************'):
1025 if context is None and x.startswith('***************'):
1025 context = True
1026 context = True
1026 gpatch = changed.get(bfile)
1027 gpatch = changed.get(bfile)
1027 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1028 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1028 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1029 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1029 h = hunk(x, hunknum + 1, lr, context, create, remove)
1030 h = hunk(x, hunknum + 1, lr, context, create, remove)
1030 hunknum += 1
1031 hunknum += 1
1031 if emitfile:
1032 if emitfile:
1032 emitfile = False
1033 emitfile = False
1033 yield 'file', (afile, bfile, h)
1034 yield 'file', (afile, bfile, h)
1034 yield 'hunk', h
1035 yield 'hunk', h
1035 elif state == BFILE and x.startswith('GIT binary patch'):
1036 elif state == BFILE and x.startswith('GIT binary patch'):
1036 h = binhunk(changed[bfile])
1037 h = binhunk(changed[bfile])
1037 hunknum += 1
1038 hunknum += 1
1038 if emitfile:
1039 if emitfile:
1039 emitfile = False
1040 emitfile = False
1040 yield 'file', ('a/' + afile, 'b/' + bfile, h)
1041 yield 'file', ('a/' + afile, 'b/' + bfile, h)
1041 h.extract(lr)
1042 h.extract(lr)
1042 yield 'hunk', h
1043 yield 'hunk', h
1043 elif x.startswith('diff --git'):
1044 elif x.startswith('diff --git'):
1044 # check for git diff, scanning the whole patch file if needed
1045 # check for git diff, scanning the whole patch file if needed
1045 m = gitre.match(x)
1046 m = gitre.match(x)
1046 if m:
1047 if m:
1047 afile, bfile = m.group(1, 2)
1048 afile, bfile = m.group(1, 2)
1048 if not git:
1049 if not git:
1049 git = True
1050 git = True
1050 gitpatches = scangitpatch(lr, x)
1051 gitpatches = scangitpatch(lr, x)
1051 yield 'git', gitpatches
1052 yield 'git', gitpatches
1052 for gp in gitpatches:
1053 for gp in gitpatches:
1053 changed[gp.path] = gp
1054 changed[gp.path] = gp
1054 # else error?
1055 # else error?
1055 # copy/rename + modify should modify target, not source
1056 # copy/rename + modify should modify target, not source
1056 gp = changed.get(bfile)
1057 gp = changed.get(bfile)
1057 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1058 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1058 or gp.mode):
1059 or gp.mode):
1059 afile = bfile
1060 afile = bfile
1060 newgitfile = True
1061 newgitfile = True
1061 elif x.startswith('---'):
1062 elif x.startswith('---'):
1062 # check for a unified diff
1063 # check for a unified diff
1063 l2 = lr.readline()
1064 l2 = lr.readline()
1064 if not l2.startswith('+++'):
1065 if not l2.startswith('+++'):
1065 lr.push(l2)
1066 lr.push(l2)
1066 continue
1067 continue
1067 newfile = True
1068 newfile = True
1068 context = False
1069 context = False
1069 afile = parsefilename(x)
1070 afile = parsefilename(x)
1070 bfile = parsefilename(l2)
1071 bfile = parsefilename(l2)
1071 elif x.startswith('***'):
1072 elif x.startswith('***'):
1072 # check for a context diff
1073 # check for a context diff
1073 l2 = lr.readline()
1074 l2 = lr.readline()
1074 if not l2.startswith('---'):
1075 if not l2.startswith('---'):
1075 lr.push(l2)
1076 lr.push(l2)
1076 continue
1077 continue
1077 l3 = lr.readline()
1078 l3 = lr.readline()
1078 lr.push(l3)
1079 lr.push(l3)
1079 if not l3.startswith("***************"):
1080 if not l3.startswith("***************"):
1080 lr.push(l2)
1081 lr.push(l2)
1081 continue
1082 continue
1082 newfile = True
1083 newfile = True
1083 context = True
1084 context = True
1084 afile = parsefilename(x)
1085 afile = parsefilename(x)
1085 bfile = parsefilename(l2)
1086 bfile = parsefilename(l2)
1086
1087
1087 if newgitfile or newfile:
1088 if newgitfile or newfile:
1088 emitfile = True
1089 emitfile = True
1089 state = BFILE
1090 state = BFILE
1090 hunknum = 0
1091 hunknum = 0
1091
1092
1092 def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
1093 def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
1093 """Reads a patch from fp and tries to apply it.
1094 """Reads a patch from fp and tries to apply it.
1094
1095
1095 The dict 'changed' is filled in with all of the filenames changed
1096 The dict 'changed' is filled in with all of the filenames changed
1096 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1097 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1097 found and 1 if there was any fuzz.
1098 found and 1 if there was any fuzz.
1098
1099
1099 If 'eolmode' is 'strict', the patch content and patched file are
1100 If 'eolmode' is 'strict', the patch content and patched file are
1100 read in binary mode. Otherwise, line endings are ignored when
1101 read in binary mode. Otherwise, line endings are ignored when
1101 patching then normalized according to 'eolmode'.
1102 patching then normalized according to 'eolmode'.
1102
1103
1103 Callers probably want to call 'cmdutil.updatedir' after this to
1104 Callers probably want to call 'cmdutil.updatedir' after this to
1104 apply certain categories of changes not done by this function.
1105 apply certain categories of changes not done by this function.
1105 """
1106 """
1106 return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip,
1107 return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip,
1107 eolmode=eolmode)
1108 eolmode=eolmode)
1108
1109
1109 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'):
1110 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'):
1110 rejects = 0
1111 rejects = 0
1111 err = 0
1112 err = 0
1112 current_file = None
1113 current_file = None
1113 cwd = os.getcwd()
1114 cwd = os.getcwd()
1114 opener = scmutil.opener(cwd)
1115 opener = scmutil.opener(cwd)
1115
1116
1116 for state, values in iterhunks(ui, fp):
1117 for state, values in iterhunks(ui, fp):
1117 if state == 'hunk':
1118 if state == 'hunk':
1118 if not current_file:
1119 if not current_file:
1119 continue
1120 continue
1120 ret = current_file.apply(values)
1121 ret = current_file.apply(values)
1121 if ret >= 0:
1122 if ret >= 0:
1122 changed.setdefault(current_file.fname, None)
1123 changed.setdefault(current_file.fname, None)
1123 if ret > 0:
1124 if ret > 0:
1124 err = 1
1125 err = 1
1125 elif state == 'file':
1126 elif state == 'file':
1126 if current_file:
1127 if current_file:
1127 rejects += current_file.close()
1128 rejects += current_file.close()
1128 afile, bfile, first_hunk = values
1129 afile, bfile, first_hunk = values
1129 try:
1130 try:
1130 current_file, missing = selectfile(afile, bfile,
1131 current_file, missing = selectfile(afile, bfile,
1131 first_hunk, strip)
1132 first_hunk, strip)
1132 current_file = patcher(ui, current_file, opener,
1133 current_file = patcher(ui, current_file, opener,
1133 missing=missing, eolmode=eolmode)
1134 missing=missing, eolmode=eolmode)
1134 except PatchError, err:
1135 except PatchError, err:
1135 ui.warn(str(err) + '\n')
1136 ui.warn(str(err) + '\n')
1136 current_file = None
1137 current_file = None
1137 rejects += 1
1138 rejects += 1
1138 continue
1139 continue
1139 elif state == 'git':
1140 elif state == 'git':
1140 for gp in values:
1141 for gp in values:
1141 gp.path = pathstrip(gp.path, strip - 1)[1]
1142 gp.path = pathstrip(gp.path, strip - 1)[1]
1142 if gp.oldpath:
1143 if gp.oldpath:
1143 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1144 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1144 # Binary patches really overwrite target files, copying them
1145 # Binary patches really overwrite target files, copying them
1145 # will just make it fails with "target file exists"
1146 # will just make it fails with "target file exists"
1146 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1147 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1147 copyfn(gp.oldpath, gp.path, cwd)
1148 copyfn(gp.oldpath, gp.path, cwd)
1148 changed[gp.path] = gp
1149 changed[gp.path] = gp
1149 else:
1150 else:
1150 raise util.Abort(_('unsupported parser state: %s') % state)
1151 raise util.Abort(_('unsupported parser state: %s') % state)
1151
1152
1152 if current_file:
1153 if current_file:
1153 rejects += current_file.close()
1154 rejects += current_file.close()
1154
1155
1155 if rejects:
1156 if rejects:
1156 return -1
1157 return -1
1157 return err
1158 return err
1158
1159
1159 def _externalpatch(patcher, patchname, ui, strip, cwd, files):
1160 def _externalpatch(patcher, patchname, ui, strip, cwd, files):
1160 """use <patcher> to apply <patchname> to the working directory.
1161 """use <patcher> to apply <patchname> to the working directory.
1161 returns whether patch was applied with fuzz factor."""
1162 returns whether patch was applied with fuzz factor."""
1162
1163
1163 fuzz = False
1164 fuzz = False
1164 args = []
1165 args = []
1165 if cwd:
1166 if cwd:
1166 args.append('-d %s' % util.shellquote(cwd))
1167 args.append('-d %s' % util.shellquote(cwd))
1167 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1168 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1168 util.shellquote(patchname)))
1169 util.shellquote(patchname)))
1169
1170
1170 for line in fp:
1171 for line in fp:
1171 line = line.rstrip()
1172 line = line.rstrip()
1172 ui.note(line + '\n')
1173 ui.note(line + '\n')
1173 if line.startswith('patching file '):
1174 if line.startswith('patching file '):
1174 pf = util.parse_patch_output(line)
1175 pf = util.parse_patch_output(line)
1175 printed_file = False
1176 printed_file = False
1176 files.setdefault(pf, None)
1177 files.setdefault(pf, None)
1177 elif line.find('with fuzz') >= 0:
1178 elif line.find('with fuzz') >= 0:
1178 fuzz = True
1179 fuzz = True
1179 if not printed_file:
1180 if not printed_file:
1180 ui.warn(pf + '\n')
1181 ui.warn(pf + '\n')
1181 printed_file = True
1182 printed_file = True
1182 ui.warn(line + '\n')
1183 ui.warn(line + '\n')
1183 elif line.find('saving rejects to file') >= 0:
1184 elif line.find('saving rejects to file') >= 0:
1184 ui.warn(line + '\n')
1185 ui.warn(line + '\n')
1185 elif line.find('FAILED') >= 0:
1186 elif line.find('FAILED') >= 0:
1186 if not printed_file:
1187 if not printed_file:
1187 ui.warn(pf + '\n')
1188 ui.warn(pf + '\n')
1188 printed_file = True
1189 printed_file = True
1189 ui.warn(line + '\n')
1190 ui.warn(line + '\n')
1190 code = fp.close()
1191 code = fp.close()
1191 if code:
1192 if code:
1192 raise PatchError(_("patch command failed: %s") %
1193 raise PatchError(_("patch command failed: %s") %
1193 util.explain_exit(code)[0])
1194 util.explain_exit(code)[0])
1194 return fuzz
1195 return fuzz
1195
1196
1196 def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'):
1197 def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'):
1197 """use builtin patch to apply <patchobj> to the working directory.
1198 """use builtin patch to apply <patchobj> to the working directory.
1198 returns whether patch was applied with fuzz factor."""
1199 returns whether patch was applied with fuzz factor."""
1199
1200
1200 if files is None:
1201 if files is None:
1201 files = {}
1202 files = {}
1202 if eolmode is None:
1203 if eolmode is None:
1203 eolmode = ui.config('patch', 'eol', 'strict')
1204 eolmode = ui.config('patch', 'eol', 'strict')
1204 if eolmode.lower() not in eolmodes:
1205 if eolmode.lower() not in eolmodes:
1205 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1206 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1206 eolmode = eolmode.lower()
1207 eolmode = eolmode.lower()
1207
1208
1208 try:
1209 try:
1209 fp = open(patchobj, 'rb')
1210 fp = open(patchobj, 'rb')
1210 except TypeError:
1211 except TypeError:
1211 fp = patchobj
1212 fp = patchobj
1212 if cwd:
1213 if cwd:
1213 curdir = os.getcwd()
1214 curdir = os.getcwd()
1214 os.chdir(cwd)
1215 os.chdir(cwd)
1215 try:
1216 try:
1216 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1217 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1217 finally:
1218 finally:
1218 if cwd:
1219 if cwd:
1219 os.chdir(curdir)
1220 os.chdir(curdir)
1220 if fp != patchobj:
1221 if fp != patchobj:
1221 fp.close()
1222 fp.close()
1222 if ret < 0:
1223 if ret < 0:
1223 raise PatchError(_('patch failed to apply'))
1224 raise PatchError(_('patch failed to apply'))
1224 return ret > 0
1225 return ret > 0
1225
1226
1226 def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'):
1227 def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'):
1227 """Apply <patchname> to the working directory.
1228 """Apply <patchname> to the working directory.
1228
1229
1229 'eolmode' specifies how end of lines should be handled. It can be:
1230 'eolmode' specifies how end of lines should be handled. It can be:
1230 - 'strict': inputs are read in binary mode, EOLs are preserved
1231 - 'strict': inputs are read in binary mode, EOLs are preserved
1231 - 'crlf': EOLs are ignored when patching and reset to CRLF
1232 - 'crlf': EOLs are ignored when patching and reset to CRLF
1232 - 'lf': EOLs are ignored when patching and reset to LF
1233 - 'lf': EOLs are ignored when patching and reset to LF
1233 - None: get it from user settings, default to 'strict'
1234 - None: get it from user settings, default to 'strict'
1234 'eolmode' is ignored when using an external patcher program.
1235 'eolmode' is ignored when using an external patcher program.
1235
1236
1236 Returns whether patch was applied with fuzz factor.
1237 Returns whether patch was applied with fuzz factor.
1237 """
1238 """
1238 patcher = ui.config('ui', 'patch')
1239 patcher = ui.config('ui', 'patch')
1239 if files is None:
1240 if files is None:
1240 files = {}
1241 files = {}
1241 try:
1242 try:
1242 if patcher:
1243 if patcher:
1243 return _externalpatch(patcher, patchname, ui, strip, cwd, files)
1244 return _externalpatch(patcher, patchname, ui, strip, cwd, files)
1244 return internalpatch(patchname, ui, strip, cwd, files, eolmode)
1245 return internalpatch(patchname, ui, strip, cwd, files, eolmode)
1245 except PatchError, err:
1246 except PatchError, err:
1246 raise util.Abort(str(err))
1247 raise util.Abort(str(err))
1247
1248
1248 def b85diff(to, tn):
1249 def b85diff(to, tn):
1249 '''print base85-encoded binary diff'''
1250 '''print base85-encoded binary diff'''
1250 def gitindex(text):
1251 def gitindex(text):
1251 if not text:
1252 if not text:
1252 return hex(nullid)
1253 return hex(nullid)
1253 l = len(text)
1254 l = len(text)
1254 s = util.sha1('blob %d\0' % l)
1255 s = util.sha1('blob %d\0' % l)
1255 s.update(text)
1256 s.update(text)
1256 return s.hexdigest()
1257 return s.hexdigest()
1257
1258
1258 def fmtline(line):
1259 def fmtline(line):
1259 l = len(line)
1260 l = len(line)
1260 if l <= 26:
1261 if l <= 26:
1261 l = chr(ord('A') + l - 1)
1262 l = chr(ord('A') + l - 1)
1262 else:
1263 else:
1263 l = chr(l - 26 + ord('a') - 1)
1264 l = chr(l - 26 + ord('a') - 1)
1264 return '%c%s\n' % (l, base85.b85encode(line, True))
1265 return '%c%s\n' % (l, base85.b85encode(line, True))
1265
1266
1266 def chunk(text, csize=52):
1267 def chunk(text, csize=52):
1267 l = len(text)
1268 l = len(text)
1268 i = 0
1269 i = 0
1269 while i < l:
1270 while i < l:
1270 yield text[i:i + csize]
1271 yield text[i:i + csize]
1271 i += csize
1272 i += csize
1272
1273
1273 tohash = gitindex(to)
1274 tohash = gitindex(to)
1274 tnhash = gitindex(tn)
1275 tnhash = gitindex(tn)
1275 if tohash == tnhash:
1276 if tohash == tnhash:
1276 return ""
1277 return ""
1277
1278
1278 # TODO: deltas
1279 # TODO: deltas
1279 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1280 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1280 (tohash, tnhash, len(tn))]
1281 (tohash, tnhash, len(tn))]
1281 for l in chunk(zlib.compress(tn)):
1282 for l in chunk(zlib.compress(tn)):
1282 ret.append(fmtline(l))
1283 ret.append(fmtline(l))
1283 ret.append('\n')
1284 ret.append('\n')
1284 return ''.join(ret)
1285 return ''.join(ret)
1285
1286
1286 class GitDiffRequired(Exception):
1287 class GitDiffRequired(Exception):
1287 pass
1288 pass
1288
1289
1289 def diffopts(ui, opts=None, untrusted=False):
1290 def diffopts(ui, opts=None, untrusted=False):
1290 def get(key, name=None, getter=ui.configbool):
1291 def get(key, name=None, getter=ui.configbool):
1291 return ((opts and opts.get(key)) or
1292 return ((opts and opts.get(key)) or
1292 getter('diff', name or key, None, untrusted=untrusted))
1293 getter('diff', name or key, None, untrusted=untrusted))
1293 return mdiff.diffopts(
1294 return mdiff.diffopts(
1294 text=opts and opts.get('text'),
1295 text=opts and opts.get('text'),
1295 git=get('git'),
1296 git=get('git'),
1296 nodates=get('nodates'),
1297 nodates=get('nodates'),
1297 showfunc=get('show_function', 'showfunc'),
1298 showfunc=get('show_function', 'showfunc'),
1298 ignorews=get('ignore_all_space', 'ignorews'),
1299 ignorews=get('ignore_all_space', 'ignorews'),
1299 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1300 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1300 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1301 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1301 context=get('unified', getter=ui.config))
1302 context=get('unified', getter=ui.config))
1302
1303
1303 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1304 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1304 losedatafn=None, prefix=''):
1305 losedatafn=None, prefix=''):
1305 '''yields diff of changes to files between two nodes, or node and
1306 '''yields diff of changes to files between two nodes, or node and
1306 working directory.
1307 working directory.
1307
1308
1308 if node1 is None, use first dirstate parent instead.
1309 if node1 is None, use first dirstate parent instead.
1309 if node2 is None, compare node1 with working directory.
1310 if node2 is None, compare node1 with working directory.
1310
1311
1311 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1312 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1312 every time some change cannot be represented with the current
1313 every time some change cannot be represented with the current
1313 patch format. Return False to upgrade to git patch format, True to
1314 patch format. Return False to upgrade to git patch format, True to
1314 accept the loss or raise an exception to abort the diff. It is
1315 accept the loss or raise an exception to abort the diff. It is
1315 called with the name of current file being diffed as 'fn'. If set
1316 called with the name of current file being diffed as 'fn'. If set
1316 to None, patches will always be upgraded to git format when
1317 to None, patches will always be upgraded to git format when
1317 necessary.
1318 necessary.
1318
1319
1319 prefix is a filename prefix that is prepended to all filenames on
1320 prefix is a filename prefix that is prepended to all filenames on
1320 display (used for subrepos).
1321 display (used for subrepos).
1321 '''
1322 '''
1322
1323
1323 if opts is None:
1324 if opts is None:
1324 opts = mdiff.defaultopts
1325 opts = mdiff.defaultopts
1325
1326
1326 if not node1 and not node2:
1327 if not node1 and not node2:
1327 node1 = repo.dirstate.p1()
1328 node1 = repo.dirstate.p1()
1328
1329
1329 def lrugetfilectx():
1330 def lrugetfilectx():
1330 cache = {}
1331 cache = {}
1331 order = []
1332 order = []
1332 def getfilectx(f, ctx):
1333 def getfilectx(f, ctx):
1333 fctx = ctx.filectx(f, filelog=cache.get(f))
1334 fctx = ctx.filectx(f, filelog=cache.get(f))
1334 if f not in cache:
1335 if f not in cache:
1335 if len(cache) > 20:
1336 if len(cache) > 20:
1336 del cache[order.pop(0)]
1337 del cache[order.pop(0)]
1337 cache[f] = fctx.filelog()
1338 cache[f] = fctx.filelog()
1338 else:
1339 else:
1339 order.remove(f)
1340 order.remove(f)
1340 order.append(f)
1341 order.append(f)
1341 return fctx
1342 return fctx
1342 return getfilectx
1343 return getfilectx
1343 getfilectx = lrugetfilectx()
1344 getfilectx = lrugetfilectx()
1344
1345
1345 ctx1 = repo[node1]
1346 ctx1 = repo[node1]
1346 ctx2 = repo[node2]
1347 ctx2 = repo[node2]
1347
1348
1348 if not changes:
1349 if not changes:
1349 changes = repo.status(ctx1, ctx2, match=match)
1350 changes = repo.status(ctx1, ctx2, match=match)
1350 modified, added, removed = changes[:3]
1351 modified, added, removed = changes[:3]
1351
1352
1352 if not modified and not added and not removed:
1353 if not modified and not added and not removed:
1353 return []
1354 return []
1354
1355
1355 revs = None
1356 revs = None
1356 if not repo.ui.quiet:
1357 if not repo.ui.quiet:
1357 hexfunc = repo.ui.debugflag and hex or short
1358 hexfunc = repo.ui.debugflag and hex or short
1358 revs = [hexfunc(node) for node in [node1, node2] if node]
1359 revs = [hexfunc(node) for node in [node1, node2] if node]
1359
1360
1360 copy = {}
1361 copy = {}
1361 if opts.git or opts.upgrade:
1362 if opts.git or opts.upgrade:
1362 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1363 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1363
1364
1364 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1365 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1365 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1366 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1366 if opts.upgrade and not opts.git:
1367 if opts.upgrade and not opts.git:
1367 try:
1368 try:
1368 def losedata(fn):
1369 def losedata(fn):
1369 if not losedatafn or not losedatafn(fn=fn):
1370 if not losedatafn or not losedatafn(fn=fn):
1370 raise GitDiffRequired()
1371 raise GitDiffRequired()
1371 # Buffer the whole output until we are sure it can be generated
1372 # Buffer the whole output until we are sure it can be generated
1372 return list(difffn(opts.copy(git=False), losedata))
1373 return list(difffn(opts.copy(git=False), losedata))
1373 except GitDiffRequired:
1374 except GitDiffRequired:
1374 return difffn(opts.copy(git=True), None)
1375 return difffn(opts.copy(git=True), None)
1375 else:
1376 else:
1376 return difffn(opts, None)
1377 return difffn(opts, None)
1377
1378
1378 def difflabel(func, *args, **kw):
1379 def difflabel(func, *args, **kw):
1379 '''yields 2-tuples of (output, label) based on the output of func()'''
1380 '''yields 2-tuples of (output, label) based on the output of func()'''
1380 prefixes = [('diff', 'diff.diffline'),
1381 prefixes = [('diff', 'diff.diffline'),
1381 ('copy', 'diff.extended'),
1382 ('copy', 'diff.extended'),
1382 ('rename', 'diff.extended'),
1383 ('rename', 'diff.extended'),
1383 ('old', 'diff.extended'),
1384 ('old', 'diff.extended'),
1384 ('new', 'diff.extended'),
1385 ('new', 'diff.extended'),
1385 ('deleted', 'diff.extended'),
1386 ('deleted', 'diff.extended'),
1386 ('---', 'diff.file_a'),
1387 ('---', 'diff.file_a'),
1387 ('+++', 'diff.file_b'),
1388 ('+++', 'diff.file_b'),
1388 ('@@', 'diff.hunk'),
1389 ('@@', 'diff.hunk'),
1389 ('-', 'diff.deleted'),
1390 ('-', 'diff.deleted'),
1390 ('+', 'diff.inserted')]
1391 ('+', 'diff.inserted')]
1391
1392
1392 for chunk in func(*args, **kw):
1393 for chunk in func(*args, **kw):
1393 lines = chunk.split('\n')
1394 lines = chunk.split('\n')
1394 for i, line in enumerate(lines):
1395 for i, line in enumerate(lines):
1395 if i != 0:
1396 if i != 0:
1396 yield ('\n', '')
1397 yield ('\n', '')
1397 stripline = line
1398 stripline = line
1398 if line and line[0] in '+-':
1399 if line and line[0] in '+-':
1399 # highlight trailing whitespace, but only in changed lines
1400 # highlight trailing whitespace, but only in changed lines
1400 stripline = line.rstrip()
1401 stripline = line.rstrip()
1401 for prefix, label in prefixes:
1402 for prefix, label in prefixes:
1402 if stripline.startswith(prefix):
1403 if stripline.startswith(prefix):
1403 yield (stripline, label)
1404 yield (stripline, label)
1404 break
1405 break
1405 else:
1406 else:
1406 yield (line, '')
1407 yield (line, '')
1407 if line != stripline:
1408 if line != stripline:
1408 yield (line[len(stripline):], 'diff.trailingwhitespace')
1409 yield (line[len(stripline):], 'diff.trailingwhitespace')
1409
1410
1410 def diffui(*args, **kw):
1411 def diffui(*args, **kw):
1411 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1412 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1412 return difflabel(diff, *args, **kw)
1413 return difflabel(diff, *args, **kw)
1413
1414
1414
1415
1415 def _addmodehdr(header, omode, nmode):
1416 def _addmodehdr(header, omode, nmode):
1416 if omode != nmode:
1417 if omode != nmode:
1417 header.append('old mode %s\n' % omode)
1418 header.append('old mode %s\n' % omode)
1418 header.append('new mode %s\n' % nmode)
1419 header.append('new mode %s\n' % nmode)
1419
1420
1420 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1421 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1421 copy, getfilectx, opts, losedatafn, prefix):
1422 copy, getfilectx, opts, losedatafn, prefix):
1422
1423
1423 def join(f):
1424 def join(f):
1424 return os.path.join(prefix, f)
1425 return os.path.join(prefix, f)
1425
1426
1426 date1 = util.datestr(ctx1.date())
1427 date1 = util.datestr(ctx1.date())
1427 man1 = ctx1.manifest()
1428 man1 = ctx1.manifest()
1428
1429
1429 gone = set()
1430 gone = set()
1430 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1431 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1431
1432
1432 copyto = dict([(v, k) for k, v in copy.items()])
1433 copyto = dict([(v, k) for k, v in copy.items()])
1433
1434
1434 if opts.git:
1435 if opts.git:
1435 revs = None
1436 revs = None
1436
1437
1437 for f in sorted(modified + added + removed):
1438 for f in sorted(modified + added + removed):
1438 to = None
1439 to = None
1439 tn = None
1440 tn = None
1440 dodiff = True
1441 dodiff = True
1441 header = []
1442 header = []
1442 if f in man1:
1443 if f in man1:
1443 to = getfilectx(f, ctx1).data()
1444 to = getfilectx(f, ctx1).data()
1444 if f not in removed:
1445 if f not in removed:
1445 tn = getfilectx(f, ctx2).data()
1446 tn = getfilectx(f, ctx2).data()
1446 a, b = f, f
1447 a, b = f, f
1447 if opts.git or losedatafn:
1448 if opts.git or losedatafn:
1448 if f in added:
1449 if f in added:
1449 mode = gitmode[ctx2.flags(f)]
1450 mode = gitmode[ctx2.flags(f)]
1450 if f in copy or f in copyto:
1451 if f in copy or f in copyto:
1451 if opts.git:
1452 if opts.git:
1452 if f in copy:
1453 if f in copy:
1453 a = copy[f]
1454 a = copy[f]
1454 else:
1455 else:
1455 a = copyto[f]
1456 a = copyto[f]
1456 omode = gitmode[man1.flags(a)]
1457 omode = gitmode[man1.flags(a)]
1457 _addmodehdr(header, omode, mode)
1458 _addmodehdr(header, omode, mode)
1458 if a in removed and a not in gone:
1459 if a in removed and a not in gone:
1459 op = 'rename'
1460 op = 'rename'
1460 gone.add(a)
1461 gone.add(a)
1461 else:
1462 else:
1462 op = 'copy'
1463 op = 'copy'
1463 header.append('%s from %s\n' % (op, join(a)))
1464 header.append('%s from %s\n' % (op, join(a)))
1464 header.append('%s to %s\n' % (op, join(f)))
1465 header.append('%s to %s\n' % (op, join(f)))
1465 to = getfilectx(a, ctx1).data()
1466 to = getfilectx(a, ctx1).data()
1466 else:
1467 else:
1467 losedatafn(f)
1468 losedatafn(f)
1468 else:
1469 else:
1469 if opts.git:
1470 if opts.git:
1470 header.append('new file mode %s\n' % mode)
1471 header.append('new file mode %s\n' % mode)
1471 elif ctx2.flags(f):
1472 elif ctx2.flags(f):
1472 losedatafn(f)
1473 losedatafn(f)
1473 # In theory, if tn was copied or renamed we should check
1474 # In theory, if tn was copied or renamed we should check
1474 # if the source is binary too but the copy record already
1475 # if the source is binary too but the copy record already
1475 # forces git mode.
1476 # forces git mode.
1476 if util.binary(tn):
1477 if util.binary(tn):
1477 if opts.git:
1478 if opts.git:
1478 dodiff = 'binary'
1479 dodiff = 'binary'
1479 else:
1480 else:
1480 losedatafn(f)
1481 losedatafn(f)
1481 if not opts.git and not tn:
1482 if not opts.git and not tn:
1482 # regular diffs cannot represent new empty file
1483 # regular diffs cannot represent new empty file
1483 losedatafn(f)
1484 losedatafn(f)
1484 elif f in removed:
1485 elif f in removed:
1485 if opts.git:
1486 if opts.git:
1486 # have we already reported a copy above?
1487 # have we already reported a copy above?
1487 if ((f in copy and copy[f] in added
1488 if ((f in copy and copy[f] in added
1488 and copyto[copy[f]] == f) or
1489 and copyto[copy[f]] == f) or
1489 (f in copyto and copyto[f] in added
1490 (f in copyto and copyto[f] in added
1490 and copy[copyto[f]] == f)):
1491 and copy[copyto[f]] == f)):
1491 dodiff = False
1492 dodiff = False
1492 else:
1493 else:
1493 header.append('deleted file mode %s\n' %
1494 header.append('deleted file mode %s\n' %
1494 gitmode[man1.flags(f)])
1495 gitmode[man1.flags(f)])
1495 elif not to or util.binary(to):
1496 elif not to or util.binary(to):
1496 # regular diffs cannot represent empty file deletion
1497 # regular diffs cannot represent empty file deletion
1497 losedatafn(f)
1498 losedatafn(f)
1498 else:
1499 else:
1499 oflag = man1.flags(f)
1500 oflag = man1.flags(f)
1500 nflag = ctx2.flags(f)
1501 nflag = ctx2.flags(f)
1501 binary = util.binary(to) or util.binary(tn)
1502 binary = util.binary(to) or util.binary(tn)
1502 if opts.git:
1503 if opts.git:
1503 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1504 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1504 if binary:
1505 if binary:
1505 dodiff = 'binary'
1506 dodiff = 'binary'
1506 elif binary or nflag != oflag:
1507 elif binary or nflag != oflag:
1507 losedatafn(f)
1508 losedatafn(f)
1508 if opts.git:
1509 if opts.git:
1509 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1510 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1510
1511
1511 if dodiff:
1512 if dodiff:
1512 if dodiff == 'binary':
1513 if dodiff == 'binary':
1513 text = b85diff(to, tn)
1514 text = b85diff(to, tn)
1514 else:
1515 else:
1515 text = mdiff.unidiff(to, date1,
1516 text = mdiff.unidiff(to, date1,
1516 # ctx2 date may be dynamic
1517 # ctx2 date may be dynamic
1517 tn, util.datestr(ctx2.date()),
1518 tn, util.datestr(ctx2.date()),
1518 join(a), join(b), revs, opts=opts)
1519 join(a), join(b), revs, opts=opts)
1519 if header and (text or len(header) > 1):
1520 if header and (text or len(header) > 1):
1520 yield ''.join(header)
1521 yield ''.join(header)
1521 if text:
1522 if text:
1522 yield text
1523 yield text
1523
1524
1524 def diffstatdata(lines):
1525 def diffstatdata(lines):
1525 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1526 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1526
1527
1527 filename, adds, removes = None, 0, 0
1528 filename, adds, removes = None, 0, 0
1528 for line in lines:
1529 for line in lines:
1529 if line.startswith('diff'):
1530 if line.startswith('diff'):
1530 if filename:
1531 if filename:
1531 isbinary = adds == 0 and removes == 0
1532 isbinary = adds == 0 and removes == 0
1532 yield (filename, adds, removes, isbinary)
1533 yield (filename, adds, removes, isbinary)
1533 # set numbers to 0 anyway when starting new file
1534 # set numbers to 0 anyway when starting new file
1534 adds, removes = 0, 0
1535 adds, removes = 0, 0
1535 if line.startswith('diff --git'):
1536 if line.startswith('diff --git'):
1536 filename = gitre.search(line).group(1)
1537 filename = gitre.search(line).group(1)
1537 elif line.startswith('diff -r'):
1538 elif line.startswith('diff -r'):
1538 # format: "diff -r ... -r ... filename"
1539 # format: "diff -r ... -r ... filename"
1539 filename = diffre.search(line).group(1)
1540 filename = diffre.search(line).group(1)
1540 elif line.startswith('+') and not line.startswith('+++'):
1541 elif line.startswith('+') and not line.startswith('+++'):
1541 adds += 1
1542 adds += 1
1542 elif line.startswith('-') and not line.startswith('---'):
1543 elif line.startswith('-') and not line.startswith('---'):
1543 removes += 1
1544 removes += 1
1544 if filename:
1545 if filename:
1545 isbinary = adds == 0 and removes == 0
1546 isbinary = adds == 0 and removes == 0
1546 yield (filename, adds, removes, isbinary)
1547 yield (filename, adds, removes, isbinary)
1547
1548
1548 def diffstat(lines, width=80, git=False):
1549 def diffstat(lines, width=80, git=False):
1549 output = []
1550 output = []
1550 stats = list(diffstatdata(lines))
1551 stats = list(diffstatdata(lines))
1551
1552
1552 maxtotal, maxname = 0, 0
1553 maxtotal, maxname = 0, 0
1553 totaladds, totalremoves = 0, 0
1554 totaladds, totalremoves = 0, 0
1554 hasbinary = False
1555 hasbinary = False
1555
1556
1556 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1557 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1557 for filename, adds, removes, isbinary in stats]
1558 for filename, adds, removes, isbinary in stats]
1558
1559
1559 for filename, adds, removes, isbinary, namewidth in sized:
1560 for filename, adds, removes, isbinary, namewidth in sized:
1560 totaladds += adds
1561 totaladds += adds
1561 totalremoves += removes
1562 totalremoves += removes
1562 maxname = max(maxname, namewidth)
1563 maxname = max(maxname, namewidth)
1563 maxtotal = max(maxtotal, adds + removes)
1564 maxtotal = max(maxtotal, adds + removes)
1564 if isbinary:
1565 if isbinary:
1565 hasbinary = True
1566 hasbinary = True
1566
1567
1567 countwidth = len(str(maxtotal))
1568 countwidth = len(str(maxtotal))
1568 if hasbinary and countwidth < 3:
1569 if hasbinary and countwidth < 3:
1569 countwidth = 3
1570 countwidth = 3
1570 graphwidth = width - countwidth - maxname - 6
1571 graphwidth = width - countwidth - maxname - 6
1571 if graphwidth < 10:
1572 if graphwidth < 10:
1572 graphwidth = 10
1573 graphwidth = 10
1573
1574
1574 def scale(i):
1575 def scale(i):
1575 if maxtotal <= graphwidth:
1576 if maxtotal <= graphwidth:
1576 return i
1577 return i
1577 # If diffstat runs out of room it doesn't print anything,
1578 # If diffstat runs out of room it doesn't print anything,
1578 # which isn't very useful, so always print at least one + or -
1579 # which isn't very useful, so always print at least one + or -
1579 # if there were at least some changes.
1580 # if there were at least some changes.
1580 return max(i * graphwidth // maxtotal, int(bool(i)))
1581 return max(i * graphwidth // maxtotal, int(bool(i)))
1581
1582
1582 for filename, adds, removes, isbinary, namewidth in sized:
1583 for filename, adds, removes, isbinary, namewidth in sized:
1583 if git and isbinary:
1584 if git and isbinary:
1584 count = 'Bin'
1585 count = 'Bin'
1585 else:
1586 else:
1586 count = adds + removes
1587 count = adds + removes
1587 pluses = '+' * scale(adds)
1588 pluses = '+' * scale(adds)
1588 minuses = '-' * scale(removes)
1589 minuses = '-' * scale(removes)
1589 output.append(' %s%s | %*s %s%s\n' %
1590 output.append(' %s%s | %*s %s%s\n' %
1590 (filename, ' ' * (maxname - namewidth),
1591 (filename, ' ' * (maxname - namewidth),
1591 countwidth, count,
1592 countwidth, count,
1592 pluses, minuses))
1593 pluses, minuses))
1593
1594
1594 if stats:
1595 if stats:
1595 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1596 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1596 % (len(stats), totaladds, totalremoves))
1597 % (len(stats), totaladds, totalremoves))
1597
1598
1598 return ''.join(output)
1599 return ''.join(output)
1599
1600
1600 def diffstatui(*args, **kw):
1601 def diffstatui(*args, **kw):
1601 '''like diffstat(), but yields 2-tuples of (output, label) for
1602 '''like diffstat(), but yields 2-tuples of (output, label) for
1602 ui.write()
1603 ui.write()
1603 '''
1604 '''
1604
1605
1605 for line in diffstat(*args, **kw).splitlines():
1606 for line in diffstat(*args, **kw).splitlines():
1606 if line and line[-1] in '+-':
1607 if line and line[-1] in '+-':
1607 name, graph = line.rsplit(' ', 1)
1608 name, graph = line.rsplit(' ', 1)
1608 yield (name + ' ', '')
1609 yield (name + ' ', '')
1609 m = re.search(r'\++', graph)
1610 m = re.search(r'\++', graph)
1610 if m:
1611 if m:
1611 yield (m.group(0), 'diffstat.inserted')
1612 yield (m.group(0), 'diffstat.inserted')
1612 m = re.search(r'-+', graph)
1613 m = re.search(r'-+', graph)
1613 if m:
1614 if m:
1614 yield (m.group(0), 'diffstat.deleted')
1615 yield (m.group(0), 'diffstat.deleted')
1615 else:
1616 else:
1616 yield (line, '')
1617 yield (line, '')
1617 yield ('\n', '')
1618 yield ('\n', '')
@@ -1,122 +1,170 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import util, error
9 import util, error
10 import os, errno
10 import os, errno
11
11
12 def checkportable(ui, f):
12 def checkportable(ui, f):
13 '''Check if filename f is portable and warn or abort depending on config'''
13 '''Check if filename f is portable and warn or abort depending on config'''
14 util.checkfilename(f)
14 util.checkfilename(f)
15 val = ui.config('ui', 'portablefilenames', 'warn')
15 val = ui.config('ui', 'portablefilenames', 'warn')
16 lval = val.lower()
16 lval = val.lower()
17 abort = os.name == 'nt' or lval == 'abort'
17 abort = os.name == 'nt' or lval == 'abort'
18 bval = util.parsebool(val)
18 bval = util.parsebool(val)
19 if abort or lval == 'warn' or bval:
19 if abort or lval == 'warn' or bval:
20 msg = util.checkwinfilename(f)
20 msg = util.checkwinfilename(f)
21 if msg:
21 if msg:
22 if abort:
22 if abort:
23 raise util.Abort("%s: %r" % (msg, f))
23 raise util.Abort("%s: %r" % (msg, f))
24 ui.warn(_("warning: %s: %r\n") % (msg, f))
24 ui.warn(_("warning: %s: %r\n") % (msg, f))
25 elif bval is None and lval != 'ignore':
25 elif bval is None and lval != 'ignore':
26 raise error.ConfigError(
26 raise error.ConfigError(
27 _("ui.portablefilenames value is invalid ('%s')") % val)
27 _("ui.portablefilenames value is invalid ('%s')") % val)
28
28
29 class opener(object):
29 class opener(object):
30 '''Open files relative to a base directory
30 '''Open files relative to a base directory
31
31
32 This class is used to hide the details of COW semantics and
32 This class is used to hide the details of COW semantics and
33 remote file access from higher level code.
33 remote file access from higher level code.
34 '''
34 '''
35 def __init__(self, base, audit=True):
35 def __init__(self, base, audit=True):
36 self.base = base
36 self.base = base
37 if audit:
37 if audit:
38 self.auditor = util.path_auditor(base)
38 self.auditor = util.path_auditor(base)
39 else:
39 else:
40 self.auditor = util.always
40 self.auditor = util.always
41 self.createmode = None
41 self.createmode = None
42 self._trustnlink = None
42 self._trustnlink = None
43
43
44 @util.propertycache
44 @util.propertycache
45 def _can_symlink(self):
45 def _can_symlink(self):
46 return util.checklink(self.base)
46 return util.checklink(self.base)
47
47
48 def _fixfilemode(self, name):
48 def _fixfilemode(self, name):
49 if self.createmode is None:
49 if self.createmode is None:
50 return
50 return
51 os.chmod(name, self.createmode & 0666)
51 os.chmod(name, self.createmode & 0666)
52
52
53 def __call__(self, path, mode="r", text=False, atomictemp=False):
53 def __call__(self, path, mode="r", text=False, atomictemp=False):
54 r = util.checkosfilename(path)
54 r = util.checkosfilename(path)
55 if r:
55 if r:
56 raise Abort("%s: %r" % (r, path))
56 raise Abort("%s: %r" % (r, path))
57 self.auditor(path)
57 self.auditor(path)
58 f = os.path.join(self.base, path)
58 f = os.path.join(self.base, path)
59
59
60 if not text and "b" not in mode:
60 if not text and "b" not in mode:
61 mode += "b" # for that other OS
61 mode += "b" # for that other OS
62
62
63 nlink = -1
63 nlink = -1
64 dirname, basename = os.path.split(f)
64 dirname, basename = os.path.split(f)
65 # If basename is empty, then the path is malformed because it points
65 # If basename is empty, then the path is malformed because it points
66 # to a directory. Let the posixfile() call below raise IOError.
66 # to a directory. Let the posixfile() call below raise IOError.
67 if basename and mode not in ('r', 'rb'):
67 if basename and mode not in ('r', 'rb'):
68 if atomictemp:
68 if atomictemp:
69 if not os.path.isdir(dirname):
69 if not os.path.isdir(dirname):
70 util.makedirs(dirname, self.createmode)
70 util.makedirs(dirname, self.createmode)
71 return util.atomictempfile(f, mode, self.createmode)
71 return util.atomictempfile(f, mode, self.createmode)
72 try:
72 try:
73 if 'w' in mode:
73 if 'w' in mode:
74 util.unlink(f)
74 util.unlink(f)
75 nlink = 0
75 nlink = 0
76 else:
76 else:
77 # nlinks() may behave differently for files on Windows
77 # nlinks() may behave differently for files on Windows
78 # shares if the file is open.
78 # shares if the file is open.
79 fd = util.posixfile(f)
79 fd = util.posixfile(f)
80 nlink = util.nlinks(f)
80 nlink = util.nlinks(f)
81 if nlink < 1:
81 if nlink < 1:
82 nlink = 2 # force mktempcopy (issue1922)
82 nlink = 2 # force mktempcopy (issue1922)
83 fd.close()
83 fd.close()
84 except (OSError, IOError), e:
84 except (OSError, IOError), e:
85 if e.errno != errno.ENOENT:
85 if e.errno != errno.ENOENT:
86 raise
86 raise
87 nlink = 0
87 nlink = 0
88 if not os.path.isdir(dirname):
88 if not os.path.isdir(dirname):
89 util.makedirs(dirname, self.createmode)
89 util.makedirs(dirname, self.createmode)
90 if nlink > 0:
90 if nlink > 0:
91 if self._trustnlink is None:
91 if self._trustnlink is None:
92 self._trustnlink = nlink > 1 or util.checknlink(f)
92 self._trustnlink = nlink > 1 or util.checknlink(f)
93 if nlink > 1 or not self._trustnlink:
93 if nlink > 1 or not self._trustnlink:
94 util.rename(util.mktempcopy(f), f)
94 util.rename(util.mktempcopy(f), f)
95 fp = util.posixfile(f, mode)
95 fp = util.posixfile(f, mode)
96 if nlink == 0:
96 if nlink == 0:
97 self._fixfilemode(f)
97 self._fixfilemode(f)
98 return fp
98 return fp
99
99
100 def symlink(self, src, dst):
100 def symlink(self, src, dst):
101 self.auditor(dst)
101 self.auditor(dst)
102 linkname = os.path.join(self.base, dst)
102 linkname = os.path.join(self.base, dst)
103 try:
103 try:
104 os.unlink(linkname)
104 os.unlink(linkname)
105 except OSError:
105 except OSError:
106 pass
106 pass
107
107
108 dirname = os.path.dirname(linkname)
108 dirname = os.path.dirname(linkname)
109 if not os.path.exists(dirname):
109 if not os.path.exists(dirname):
110 util.makedirs(dirname, self.createmode)
110 util.makedirs(dirname, self.createmode)
111
111
112 if self._can_symlink:
112 if self._can_symlink:
113 try:
113 try:
114 os.symlink(src, linkname)
114 os.symlink(src, linkname)
115 except OSError, err:
115 except OSError, err:
116 raise OSError(err.errno, _('could not symlink to %r: %s') %
116 raise OSError(err.errno, _('could not symlink to %r: %s') %
117 (src, err.strerror), linkname)
117 (src, err.strerror), linkname)
118 else:
118 else:
119 f = self(dst, "w")
119 f = self(dst, "w")
120 f.write(src)
120 f.write(src)
121 f.close()
121 f.close()
122 self._fixfilemode(dst)
122 self._fixfilemode(dst)
123
124 def canonpath(root, cwd, myname, auditor=None):
125 '''return the canonical path of myname, given cwd and root'''
126 if util.endswithsep(root):
127 rootsep = root
128 else:
129 rootsep = root + os.sep
130 name = myname
131 if not os.path.isabs(name):
132 name = os.path.join(root, cwd, name)
133 name = os.path.normpath(name)
134 if auditor is None:
135 auditor = util.path_auditor(root)
136 if name != rootsep and name.startswith(rootsep):
137 name = name[len(rootsep):]
138 auditor(name)
139 return util.pconvert(name)
140 elif name == root:
141 return ''
142 else:
143 # Determine whether `name' is in the hierarchy at or beneath `root',
144 # by iterating name=dirname(name) until that causes no change (can't
145 # check name == '/', because that doesn't work on windows). For each
146 # `name', compare dev/inode numbers. If they match, the list `rel'
147 # holds the reversed list of components making up the relative file
148 # name we want.
149 root_st = os.stat(root)
150 rel = []
151 while True:
152 try:
153 name_st = os.stat(name)
154 except OSError:
155 break
156 if util.samestat(name_st, root_st):
157 if not rel:
158 # name was actually the same as root (maybe a symlink)
159 return ''
160 rel.reverse()
161 name = os.path.join(*rel)
162 auditor(name)
163 return util.pconvert(name)
164 dirname, basename = os.path.split(name)
165 rel.append(basename)
166 if dirname == name:
167 break
168 name = dirname
169
170 raise util.Abort('%s not under root' % myname)
@@ -1,1490 +1,1442 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import errno, re, shutil, sys, tempfile, traceback
18 import errno, re, shutil, sys, tempfile, traceback
19 import os, stat, time, calendar, textwrap, unicodedata, signal
19 import os, stat, time, calendar, textwrap, unicodedata, signal
20 import imp, socket
20 import imp, socket
21
21
22 # Python compatibility
22 # Python compatibility
23
23
24 def sha1(s):
24 def sha1(s):
25 return _fastsha1(s)
25 return _fastsha1(s)
26
26
27 def _fastsha1(s):
27 def _fastsha1(s):
28 # This function will import sha1 from hashlib or sha (whichever is
28 # This function will import sha1 from hashlib or sha (whichever is
29 # available) and overwrite itself with it on the first call.
29 # available) and overwrite itself with it on the first call.
30 # Subsequent calls will go directly to the imported function.
30 # Subsequent calls will go directly to the imported function.
31 if sys.version_info >= (2, 5):
31 if sys.version_info >= (2, 5):
32 from hashlib import sha1 as _sha1
32 from hashlib import sha1 as _sha1
33 else:
33 else:
34 from sha import sha as _sha1
34 from sha import sha as _sha1
35 global _fastsha1, sha1
35 global _fastsha1, sha1
36 _fastsha1 = sha1 = _sha1
36 _fastsha1 = sha1 = _sha1
37 return _sha1(s)
37 return _sha1(s)
38
38
39 import __builtin__
39 import __builtin__
40
40
41 if sys.version_info[0] < 3:
41 if sys.version_info[0] < 3:
42 def fakebuffer(sliceable, offset=0):
42 def fakebuffer(sliceable, offset=0):
43 return sliceable[offset:]
43 return sliceable[offset:]
44 else:
44 else:
45 def fakebuffer(sliceable, offset=0):
45 def fakebuffer(sliceable, offset=0):
46 return memoryview(sliceable)[offset:]
46 return memoryview(sliceable)[offset:]
47 try:
47 try:
48 buffer
48 buffer
49 except NameError:
49 except NameError:
50 __builtin__.buffer = fakebuffer
50 __builtin__.buffer = fakebuffer
51
51
52 import subprocess
52 import subprocess
53 closefds = os.name == 'posix'
53 closefds = os.name == 'posix'
54
54
55 def popen2(cmd, env=None, newlines=False):
55 def popen2(cmd, env=None, newlines=False):
56 # Setting bufsize to -1 lets the system decide the buffer size.
56 # Setting bufsize to -1 lets the system decide the buffer size.
57 # The default for bufsize is 0, meaning unbuffered. This leads to
57 # The default for bufsize is 0, meaning unbuffered. This leads to
58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
60 close_fds=closefds,
60 close_fds=closefds,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
62 universal_newlines=newlines,
62 universal_newlines=newlines,
63 env=env)
63 env=env)
64 return p.stdin, p.stdout
64 return p.stdin, p.stdout
65
65
66 def popen3(cmd, env=None, newlines=False):
66 def popen3(cmd, env=None, newlines=False):
67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
68 close_fds=closefds,
68 close_fds=closefds,
69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
70 stderr=subprocess.PIPE,
70 stderr=subprocess.PIPE,
71 universal_newlines=newlines,
71 universal_newlines=newlines,
72 env=env)
72 env=env)
73 return p.stdin, p.stdout, p.stderr
73 return p.stdin, p.stdout, p.stderr
74
74
75 def version():
75 def version():
76 """Return version information if available."""
76 """Return version information if available."""
77 try:
77 try:
78 import __version__
78 import __version__
79 return __version__.version
79 return __version__.version
80 except ImportError:
80 except ImportError:
81 return 'unknown'
81 return 'unknown'
82
82
83 # used by parsedate
83 # used by parsedate
84 defaultdateformats = (
84 defaultdateformats = (
85 '%Y-%m-%d %H:%M:%S',
85 '%Y-%m-%d %H:%M:%S',
86 '%Y-%m-%d %I:%M:%S%p',
86 '%Y-%m-%d %I:%M:%S%p',
87 '%Y-%m-%d %H:%M',
87 '%Y-%m-%d %H:%M',
88 '%Y-%m-%d %I:%M%p',
88 '%Y-%m-%d %I:%M%p',
89 '%Y-%m-%d',
89 '%Y-%m-%d',
90 '%m-%d',
90 '%m-%d',
91 '%m/%d',
91 '%m/%d',
92 '%m/%d/%y',
92 '%m/%d/%y',
93 '%m/%d/%Y',
93 '%m/%d/%Y',
94 '%a %b %d %H:%M:%S %Y',
94 '%a %b %d %H:%M:%S %Y',
95 '%a %b %d %I:%M:%S%p %Y',
95 '%a %b %d %I:%M:%S%p %Y',
96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
97 '%b %d %H:%M:%S %Y',
97 '%b %d %H:%M:%S %Y',
98 '%b %d %I:%M:%S%p %Y',
98 '%b %d %I:%M:%S%p %Y',
99 '%b %d %H:%M:%S',
99 '%b %d %H:%M:%S',
100 '%b %d %I:%M:%S%p',
100 '%b %d %I:%M:%S%p',
101 '%b %d %H:%M',
101 '%b %d %H:%M',
102 '%b %d %I:%M%p',
102 '%b %d %I:%M%p',
103 '%b %d %Y',
103 '%b %d %Y',
104 '%b %d',
104 '%b %d',
105 '%H:%M:%S',
105 '%H:%M:%S',
106 '%I:%M:%S%p',
106 '%I:%M:%S%p',
107 '%H:%M',
107 '%H:%M',
108 '%I:%M%p',
108 '%I:%M%p',
109 )
109 )
110
110
111 extendeddateformats = defaultdateformats + (
111 extendeddateformats = defaultdateformats + (
112 "%Y",
112 "%Y",
113 "%Y-%m",
113 "%Y-%m",
114 "%b",
114 "%b",
115 "%b %Y",
115 "%b %Y",
116 )
116 )
117
117
118 def cachefunc(func):
118 def cachefunc(func):
119 '''cache the result of function calls'''
119 '''cache the result of function calls'''
120 # XXX doesn't handle keywords args
120 # XXX doesn't handle keywords args
121 cache = {}
121 cache = {}
122 if func.func_code.co_argcount == 1:
122 if func.func_code.co_argcount == 1:
123 # we gain a small amount of time because
123 # we gain a small amount of time because
124 # we don't need to pack/unpack the list
124 # we don't need to pack/unpack the list
125 def f(arg):
125 def f(arg):
126 if arg not in cache:
126 if arg not in cache:
127 cache[arg] = func(arg)
127 cache[arg] = func(arg)
128 return cache[arg]
128 return cache[arg]
129 else:
129 else:
130 def f(*args):
130 def f(*args):
131 if args not in cache:
131 if args not in cache:
132 cache[args] = func(*args)
132 cache[args] = func(*args)
133 return cache[args]
133 return cache[args]
134
134
135 return f
135 return f
136
136
137 def lrucachefunc(func):
137 def lrucachefunc(func):
138 '''cache most recent results of function calls'''
138 '''cache most recent results of function calls'''
139 cache = {}
139 cache = {}
140 order = []
140 order = []
141 if func.func_code.co_argcount == 1:
141 if func.func_code.co_argcount == 1:
142 def f(arg):
142 def f(arg):
143 if arg not in cache:
143 if arg not in cache:
144 if len(cache) > 20:
144 if len(cache) > 20:
145 del cache[order.pop(0)]
145 del cache[order.pop(0)]
146 cache[arg] = func(arg)
146 cache[arg] = func(arg)
147 else:
147 else:
148 order.remove(arg)
148 order.remove(arg)
149 order.append(arg)
149 order.append(arg)
150 return cache[arg]
150 return cache[arg]
151 else:
151 else:
152 def f(*args):
152 def f(*args):
153 if args not in cache:
153 if args not in cache:
154 if len(cache) > 20:
154 if len(cache) > 20:
155 del cache[order.pop(0)]
155 del cache[order.pop(0)]
156 cache[args] = func(*args)
156 cache[args] = func(*args)
157 else:
157 else:
158 order.remove(args)
158 order.remove(args)
159 order.append(args)
159 order.append(args)
160 return cache[args]
160 return cache[args]
161
161
162 return f
162 return f
163
163
164 class propertycache(object):
164 class propertycache(object):
165 def __init__(self, func):
165 def __init__(self, func):
166 self.func = func
166 self.func = func
167 self.name = func.__name__
167 self.name = func.__name__
168 def __get__(self, obj, type=None):
168 def __get__(self, obj, type=None):
169 result = self.func(obj)
169 result = self.func(obj)
170 setattr(obj, self.name, result)
170 setattr(obj, self.name, result)
171 return result
171 return result
172
172
173 def pipefilter(s, cmd):
173 def pipefilter(s, cmd):
174 '''filter string S through command CMD, returning its output'''
174 '''filter string S through command CMD, returning its output'''
175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
177 pout, perr = p.communicate(s)
177 pout, perr = p.communicate(s)
178 return pout
178 return pout
179
179
180 def tempfilter(s, cmd):
180 def tempfilter(s, cmd):
181 '''filter string S through a pair of temporary files with CMD.
181 '''filter string S through a pair of temporary files with CMD.
182 CMD is used as a template to create the real command to be run,
182 CMD is used as a template to create the real command to be run,
183 with the strings INFILE and OUTFILE replaced by the real names of
183 with the strings INFILE and OUTFILE replaced by the real names of
184 the temporary files generated.'''
184 the temporary files generated.'''
185 inname, outname = None, None
185 inname, outname = None, None
186 try:
186 try:
187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
188 fp = os.fdopen(infd, 'wb')
188 fp = os.fdopen(infd, 'wb')
189 fp.write(s)
189 fp.write(s)
190 fp.close()
190 fp.close()
191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
192 os.close(outfd)
192 os.close(outfd)
193 cmd = cmd.replace('INFILE', inname)
193 cmd = cmd.replace('INFILE', inname)
194 cmd = cmd.replace('OUTFILE', outname)
194 cmd = cmd.replace('OUTFILE', outname)
195 code = os.system(cmd)
195 code = os.system(cmd)
196 if sys.platform == 'OpenVMS' and code & 1:
196 if sys.platform == 'OpenVMS' and code & 1:
197 code = 0
197 code = 0
198 if code:
198 if code:
199 raise Abort(_("command '%s' failed: %s") %
199 raise Abort(_("command '%s' failed: %s") %
200 (cmd, explain_exit(code)))
200 (cmd, explain_exit(code)))
201 fp = open(outname, 'rb')
201 fp = open(outname, 'rb')
202 r = fp.read()
202 r = fp.read()
203 fp.close()
203 fp.close()
204 return r
204 return r
205 finally:
205 finally:
206 try:
206 try:
207 if inname:
207 if inname:
208 os.unlink(inname)
208 os.unlink(inname)
209 except:
209 except:
210 pass
210 pass
211 try:
211 try:
212 if outname:
212 if outname:
213 os.unlink(outname)
213 os.unlink(outname)
214 except:
214 except:
215 pass
215 pass
216
216
217 filtertable = {
217 filtertable = {
218 'tempfile:': tempfilter,
218 'tempfile:': tempfilter,
219 'pipe:': pipefilter,
219 'pipe:': pipefilter,
220 }
220 }
221
221
222 def filter(s, cmd):
222 def filter(s, cmd):
223 "filter a string through a command that transforms its input to its output"
223 "filter a string through a command that transforms its input to its output"
224 for name, fn in filtertable.iteritems():
224 for name, fn in filtertable.iteritems():
225 if cmd.startswith(name):
225 if cmd.startswith(name):
226 return fn(s, cmd[len(name):].lstrip())
226 return fn(s, cmd[len(name):].lstrip())
227 return pipefilter(s, cmd)
227 return pipefilter(s, cmd)
228
228
229 def binary(s):
229 def binary(s):
230 """return true if a string is binary data"""
230 """return true if a string is binary data"""
231 return bool(s and '\0' in s)
231 return bool(s and '\0' in s)
232
232
233 def increasingchunks(source, min=1024, max=65536):
233 def increasingchunks(source, min=1024, max=65536):
234 '''return no less than min bytes per chunk while data remains,
234 '''return no less than min bytes per chunk while data remains,
235 doubling min after each chunk until it reaches max'''
235 doubling min after each chunk until it reaches max'''
236 def log2(x):
236 def log2(x):
237 if not x:
237 if not x:
238 return 0
238 return 0
239 i = 0
239 i = 0
240 while x:
240 while x:
241 x >>= 1
241 x >>= 1
242 i += 1
242 i += 1
243 return i - 1
243 return i - 1
244
244
245 buf = []
245 buf = []
246 blen = 0
246 blen = 0
247 for chunk in source:
247 for chunk in source:
248 buf.append(chunk)
248 buf.append(chunk)
249 blen += len(chunk)
249 blen += len(chunk)
250 if blen >= min:
250 if blen >= min:
251 if min < max:
251 if min < max:
252 min = min << 1
252 min = min << 1
253 nmin = 1 << log2(blen)
253 nmin = 1 << log2(blen)
254 if nmin > min:
254 if nmin > min:
255 min = nmin
255 min = nmin
256 if min > max:
256 if min > max:
257 min = max
257 min = max
258 yield ''.join(buf)
258 yield ''.join(buf)
259 blen = 0
259 blen = 0
260 buf = []
260 buf = []
261 if buf:
261 if buf:
262 yield ''.join(buf)
262 yield ''.join(buf)
263
263
264 Abort = error.Abort
264 Abort = error.Abort
265
265
266 def always(fn):
266 def always(fn):
267 return True
267 return True
268
268
269 def never(fn):
269 def never(fn):
270 return False
270 return False
271
271
272 def pathto(root, n1, n2):
272 def pathto(root, n1, n2):
273 '''return the relative path from one place to another.
273 '''return the relative path from one place to another.
274 root should use os.sep to separate directories
274 root should use os.sep to separate directories
275 n1 should use os.sep to separate directories
275 n1 should use os.sep to separate directories
276 n2 should use "/" to separate directories
276 n2 should use "/" to separate directories
277 returns an os.sep-separated path.
277 returns an os.sep-separated path.
278
278
279 If n1 is a relative path, it's assumed it's
279 If n1 is a relative path, it's assumed it's
280 relative to root.
280 relative to root.
281 n2 should always be relative to root.
281 n2 should always be relative to root.
282 '''
282 '''
283 if not n1:
283 if not n1:
284 return localpath(n2)
284 return localpath(n2)
285 if os.path.isabs(n1):
285 if os.path.isabs(n1):
286 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
286 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
287 return os.path.join(root, localpath(n2))
287 return os.path.join(root, localpath(n2))
288 n2 = '/'.join((pconvert(root), n2))
288 n2 = '/'.join((pconvert(root), n2))
289 a, b = splitpath(n1), n2.split('/')
289 a, b = splitpath(n1), n2.split('/')
290 a.reverse()
290 a.reverse()
291 b.reverse()
291 b.reverse()
292 while a and b and a[-1] == b[-1]:
292 while a and b and a[-1] == b[-1]:
293 a.pop()
293 a.pop()
294 b.pop()
294 b.pop()
295 b.reverse()
295 b.reverse()
296 return os.sep.join((['..'] * len(a)) + b) or '.'
296 return os.sep.join((['..'] * len(a)) + b) or '.'
297
297
298 def canonpath(root, cwd, myname, auditor=None):
299 """return the canonical path of myname, given cwd and root"""
300 if endswithsep(root):
301 rootsep = root
302 else:
303 rootsep = root + os.sep
304 name = myname
305 if not os.path.isabs(name):
306 name = os.path.join(root, cwd, name)
307 name = os.path.normpath(name)
308 if auditor is None:
309 auditor = path_auditor(root)
310 if name != rootsep and name.startswith(rootsep):
311 name = name[len(rootsep):]
312 auditor(name)
313 return pconvert(name)
314 elif name == root:
315 return ''
316 else:
317 # Determine whether `name' is in the hierarchy at or beneath `root',
318 # by iterating name=dirname(name) until that causes no change (can't
319 # check name == '/', because that doesn't work on windows). For each
320 # `name', compare dev/inode numbers. If they match, the list `rel'
321 # holds the reversed list of components making up the relative file
322 # name we want.
323 root_st = os.stat(root)
324 rel = []
325 while True:
326 try:
327 name_st = os.stat(name)
328 except OSError:
329 break
330 if samestat(name_st, root_st):
331 if not rel:
332 # name was actually the same as root (maybe a symlink)
333 return ''
334 rel.reverse()
335 name = os.path.join(*rel)
336 auditor(name)
337 return pconvert(name)
338 dirname, basename = os.path.split(name)
339 rel.append(basename)
340 if dirname == name:
341 break
342 name = dirname
343
344 raise Abort('%s not under root' % myname)
345
346 _hgexecutable = None
298 _hgexecutable = None
347
299
348 def main_is_frozen():
300 def main_is_frozen():
349 """return True if we are a frozen executable.
301 """return True if we are a frozen executable.
350
302
351 The code supports py2exe (most common, Windows only) and tools/freeze
303 The code supports py2exe (most common, Windows only) and tools/freeze
352 (portable, not much used).
304 (portable, not much used).
353 """
305 """
354 return (hasattr(sys, "frozen") or # new py2exe
306 return (hasattr(sys, "frozen") or # new py2exe
355 hasattr(sys, "importers") or # old py2exe
307 hasattr(sys, "importers") or # old py2exe
356 imp.is_frozen("__main__")) # tools/freeze
308 imp.is_frozen("__main__")) # tools/freeze
357
309
358 def hgexecutable():
310 def hgexecutable():
359 """return location of the 'hg' executable.
311 """return location of the 'hg' executable.
360
312
361 Defaults to $HG or 'hg' in the search path.
313 Defaults to $HG or 'hg' in the search path.
362 """
314 """
363 if _hgexecutable is None:
315 if _hgexecutable is None:
364 hg = os.environ.get('HG')
316 hg = os.environ.get('HG')
365 if hg:
317 if hg:
366 set_hgexecutable(hg)
318 set_hgexecutable(hg)
367 elif main_is_frozen():
319 elif main_is_frozen():
368 set_hgexecutable(sys.executable)
320 set_hgexecutable(sys.executable)
369 else:
321 else:
370 exe = find_exe('hg') or os.path.basename(sys.argv[0])
322 exe = find_exe('hg') or os.path.basename(sys.argv[0])
371 set_hgexecutable(exe)
323 set_hgexecutable(exe)
372 return _hgexecutable
324 return _hgexecutable
373
325
374 def set_hgexecutable(path):
326 def set_hgexecutable(path):
375 """set location of the 'hg' executable"""
327 """set location of the 'hg' executable"""
376 global _hgexecutable
328 global _hgexecutable
377 _hgexecutable = path
329 _hgexecutable = path
378
330
379 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
331 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
380 '''enhanced shell command execution.
332 '''enhanced shell command execution.
381 run with environment maybe modified, maybe in different dir.
333 run with environment maybe modified, maybe in different dir.
382
334
383 if command fails and onerr is None, return status. if ui object,
335 if command fails and onerr is None, return status. if ui object,
384 print error message and return status, else raise onerr object as
336 print error message and return status, else raise onerr object as
385 exception.
337 exception.
386
338
387 if out is specified, it is assumed to be a file-like object that has a
339 if out is specified, it is assumed to be a file-like object that has a
388 write() method. stdout and stderr will be redirected to out.'''
340 write() method. stdout and stderr will be redirected to out.'''
389 try:
341 try:
390 sys.stdout.flush()
342 sys.stdout.flush()
391 except Exception:
343 except Exception:
392 pass
344 pass
393 def py2shell(val):
345 def py2shell(val):
394 'convert python object into string that is useful to shell'
346 'convert python object into string that is useful to shell'
395 if val is None or val is False:
347 if val is None or val is False:
396 return '0'
348 return '0'
397 if val is True:
349 if val is True:
398 return '1'
350 return '1'
399 return str(val)
351 return str(val)
400 origcmd = cmd
352 origcmd = cmd
401 cmd = quotecommand(cmd)
353 cmd = quotecommand(cmd)
402 env = dict(os.environ)
354 env = dict(os.environ)
403 env.update((k, py2shell(v)) for k, v in environ.iteritems())
355 env.update((k, py2shell(v)) for k, v in environ.iteritems())
404 env['HG'] = hgexecutable()
356 env['HG'] = hgexecutable()
405 if out is None:
357 if out is None:
406 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
358 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
407 env=env, cwd=cwd)
359 env=env, cwd=cwd)
408 else:
360 else:
409 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
361 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
410 env=env, cwd=cwd, stdout=subprocess.PIPE,
362 env=env, cwd=cwd, stdout=subprocess.PIPE,
411 stderr=subprocess.STDOUT)
363 stderr=subprocess.STDOUT)
412 for line in proc.stdout:
364 for line in proc.stdout:
413 out.write(line)
365 out.write(line)
414 proc.wait()
366 proc.wait()
415 rc = proc.returncode
367 rc = proc.returncode
416 if sys.platform == 'OpenVMS' and rc & 1:
368 if sys.platform == 'OpenVMS' and rc & 1:
417 rc = 0
369 rc = 0
418 if rc and onerr:
370 if rc and onerr:
419 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
371 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
420 explain_exit(rc)[0])
372 explain_exit(rc)[0])
421 if errprefix:
373 if errprefix:
422 errmsg = '%s: %s' % (errprefix, errmsg)
374 errmsg = '%s: %s' % (errprefix, errmsg)
423 try:
375 try:
424 onerr.warn(errmsg + '\n')
376 onerr.warn(errmsg + '\n')
425 except AttributeError:
377 except AttributeError:
426 raise onerr(errmsg)
378 raise onerr(errmsg)
427 return rc
379 return rc
428
380
429 def checksignature(func):
381 def checksignature(func):
430 '''wrap a function with code to check for calling errors'''
382 '''wrap a function with code to check for calling errors'''
431 def check(*args, **kwargs):
383 def check(*args, **kwargs):
432 try:
384 try:
433 return func(*args, **kwargs)
385 return func(*args, **kwargs)
434 except TypeError:
386 except TypeError:
435 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
387 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
436 raise error.SignatureError
388 raise error.SignatureError
437 raise
389 raise
438
390
439 return check
391 return check
440
392
441 def makedir(path, notindexed):
393 def makedir(path, notindexed):
442 os.mkdir(path)
394 os.mkdir(path)
443
395
444 def unlinkpath(f):
396 def unlinkpath(f):
445 """unlink and remove the directory if it is empty"""
397 """unlink and remove the directory if it is empty"""
446 os.unlink(f)
398 os.unlink(f)
447 # try removing directories that might now be empty
399 # try removing directories that might now be empty
448 try:
400 try:
449 os.removedirs(os.path.dirname(f))
401 os.removedirs(os.path.dirname(f))
450 except OSError:
402 except OSError:
451 pass
403 pass
452
404
453 def copyfile(src, dest):
405 def copyfile(src, dest):
454 "copy a file, preserving mode and atime/mtime"
406 "copy a file, preserving mode and atime/mtime"
455 if os.path.islink(src):
407 if os.path.islink(src):
456 try:
408 try:
457 os.unlink(dest)
409 os.unlink(dest)
458 except:
410 except:
459 pass
411 pass
460 os.symlink(os.readlink(src), dest)
412 os.symlink(os.readlink(src), dest)
461 else:
413 else:
462 try:
414 try:
463 shutil.copyfile(src, dest)
415 shutil.copyfile(src, dest)
464 shutil.copymode(src, dest)
416 shutil.copymode(src, dest)
465 except shutil.Error, inst:
417 except shutil.Error, inst:
466 raise Abort(str(inst))
418 raise Abort(str(inst))
467
419
468 def copyfiles(src, dst, hardlink=None):
420 def copyfiles(src, dst, hardlink=None):
469 """Copy a directory tree using hardlinks if possible"""
421 """Copy a directory tree using hardlinks if possible"""
470
422
471 if hardlink is None:
423 if hardlink is None:
472 hardlink = (os.stat(src).st_dev ==
424 hardlink = (os.stat(src).st_dev ==
473 os.stat(os.path.dirname(dst)).st_dev)
425 os.stat(os.path.dirname(dst)).st_dev)
474
426
475 num = 0
427 num = 0
476 if os.path.isdir(src):
428 if os.path.isdir(src):
477 os.mkdir(dst)
429 os.mkdir(dst)
478 for name, kind in osutil.listdir(src):
430 for name, kind in osutil.listdir(src):
479 srcname = os.path.join(src, name)
431 srcname = os.path.join(src, name)
480 dstname = os.path.join(dst, name)
432 dstname = os.path.join(dst, name)
481 hardlink, n = copyfiles(srcname, dstname, hardlink)
433 hardlink, n = copyfiles(srcname, dstname, hardlink)
482 num += n
434 num += n
483 else:
435 else:
484 if hardlink:
436 if hardlink:
485 try:
437 try:
486 os_link(src, dst)
438 os_link(src, dst)
487 except (IOError, OSError):
439 except (IOError, OSError):
488 hardlink = False
440 hardlink = False
489 shutil.copy(src, dst)
441 shutil.copy(src, dst)
490 else:
442 else:
491 shutil.copy(src, dst)
443 shutil.copy(src, dst)
492 num += 1
444 num += 1
493
445
494 return hardlink, num
446 return hardlink, num
495
447
496 def checkfilename(f):
448 def checkfilename(f):
497 '''Check that the filename f is an acceptable filename for a tracked file'''
449 '''Check that the filename f is an acceptable filename for a tracked file'''
498 if '\r' in f or '\n' in f:
450 if '\r' in f or '\n' in f:
499 raise Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
451 raise Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
500
452
501 _windows_reserved_filenames = '''con prn aux nul
453 _windows_reserved_filenames = '''con prn aux nul
502 com1 com2 com3 com4 com5 com6 com7 com8 com9
454 com1 com2 com3 com4 com5 com6 com7 com8 com9
503 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
455 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
504 _windows_reserved_chars = ':*?"<>|'
456 _windows_reserved_chars = ':*?"<>|'
505 def checkwinfilename(path):
457 def checkwinfilename(path):
506 '''Check that the base-relative path is a valid filename on Windows.
458 '''Check that the base-relative path is a valid filename on Windows.
507 Returns None if the path is ok, or a UI string describing the problem.
459 Returns None if the path is ok, or a UI string describing the problem.
508
460
509 >>> checkwinfilename("just/a/normal/path")
461 >>> checkwinfilename("just/a/normal/path")
510 >>> checkwinfilename("foo/bar/con.xml")
462 >>> checkwinfilename("foo/bar/con.xml")
511 "filename contains 'con', which is reserved on Windows"
463 "filename contains 'con', which is reserved on Windows"
512 >>> checkwinfilename("foo/con.xml/bar")
464 >>> checkwinfilename("foo/con.xml/bar")
513 "filename contains 'con', which is reserved on Windows"
465 "filename contains 'con', which is reserved on Windows"
514 >>> checkwinfilename("foo/bar/xml.con")
466 >>> checkwinfilename("foo/bar/xml.con")
515 >>> checkwinfilename("foo/bar/AUX/bla.txt")
467 >>> checkwinfilename("foo/bar/AUX/bla.txt")
516 "filename contains 'AUX', which is reserved on Windows"
468 "filename contains 'AUX', which is reserved on Windows"
517 >>> checkwinfilename("foo/bar/bla:.txt")
469 >>> checkwinfilename("foo/bar/bla:.txt")
518 "filename contains ':', which is reserved on Windows"
470 "filename contains ':', which is reserved on Windows"
519 >>> checkwinfilename("foo/bar/b\07la.txt")
471 >>> checkwinfilename("foo/bar/b\07la.txt")
520 "filename contains '\\\\x07', which is invalid on Windows"
472 "filename contains '\\\\x07', which is invalid on Windows"
521 >>> checkwinfilename("foo/bar/bla ")
473 >>> checkwinfilename("foo/bar/bla ")
522 "filename ends with ' ', which is not allowed on Windows"
474 "filename ends with ' ', which is not allowed on Windows"
523 '''
475 '''
524 for n in path.replace('\\', '/').split('/'):
476 for n in path.replace('\\', '/').split('/'):
525 if not n:
477 if not n:
526 continue
478 continue
527 for c in n:
479 for c in n:
528 if c in _windows_reserved_chars:
480 if c in _windows_reserved_chars:
529 return _("filename contains '%s', which is reserved "
481 return _("filename contains '%s', which is reserved "
530 "on Windows") % c
482 "on Windows") % c
531 if ord(c) <= 31:
483 if ord(c) <= 31:
532 return _("filename contains %r, which is invalid "
484 return _("filename contains %r, which is invalid "
533 "on Windows") % c
485 "on Windows") % c
534 base = n.split('.')[0]
486 base = n.split('.')[0]
535 if base and base.lower() in _windows_reserved_filenames:
487 if base and base.lower() in _windows_reserved_filenames:
536 return _("filename contains '%s', which is reserved "
488 return _("filename contains '%s', which is reserved "
537 "on Windows") % base
489 "on Windows") % base
538 t = n[-1]
490 t = n[-1]
539 if t in '. ':
491 if t in '. ':
540 return _("filename ends with '%s', which is not allowed "
492 return _("filename ends with '%s', which is not allowed "
541 "on Windows") % t
493 "on Windows") % t
542
494
543 class path_auditor(object):
495 class path_auditor(object):
544 '''ensure that a filesystem path contains no banned components.
496 '''ensure that a filesystem path contains no banned components.
545 the following properties of a path are checked:
497 the following properties of a path are checked:
546
498
547 - ends with a directory separator
499 - ends with a directory separator
548 - under top-level .hg
500 - under top-level .hg
549 - starts at the root of a windows drive
501 - starts at the root of a windows drive
550 - contains ".."
502 - contains ".."
551 - traverses a symlink (e.g. a/symlink_here/b)
503 - traverses a symlink (e.g. a/symlink_here/b)
552 - inside a nested repository (a callback can be used to approve
504 - inside a nested repository (a callback can be used to approve
553 some nested repositories, e.g., subrepositories)
505 some nested repositories, e.g., subrepositories)
554 '''
506 '''
555
507
556 def __init__(self, root, callback=None):
508 def __init__(self, root, callback=None):
557 self.audited = set()
509 self.audited = set()
558 self.auditeddir = set()
510 self.auditeddir = set()
559 self.root = root
511 self.root = root
560 self.callback = callback
512 self.callback = callback
561
513
562 def __call__(self, path):
514 def __call__(self, path):
563 '''Check the relative path.
515 '''Check the relative path.
564 path may contain a pattern (e.g. foodir/**.txt)'''
516 path may contain a pattern (e.g. foodir/**.txt)'''
565
517
566 if path in self.audited:
518 if path in self.audited:
567 return
519 return
568 # AIX ignores "/" at end of path, others raise EISDIR.
520 # AIX ignores "/" at end of path, others raise EISDIR.
569 if endswithsep(path):
521 if endswithsep(path):
570 raise Abort(_("path ends in directory separator: %s") % path)
522 raise Abort(_("path ends in directory separator: %s") % path)
571 normpath = os.path.normcase(path)
523 normpath = os.path.normcase(path)
572 parts = splitpath(normpath)
524 parts = splitpath(normpath)
573 if (os.path.splitdrive(path)[0]
525 if (os.path.splitdrive(path)[0]
574 or parts[0].lower() in ('.hg', '.hg.', '')
526 or parts[0].lower() in ('.hg', '.hg.', '')
575 or os.pardir in parts):
527 or os.pardir in parts):
576 raise Abort(_("path contains illegal component: %s") % path)
528 raise Abort(_("path contains illegal component: %s") % path)
577 if '.hg' in path.lower():
529 if '.hg' in path.lower():
578 lparts = [p.lower() for p in parts]
530 lparts = [p.lower() for p in parts]
579 for p in '.hg', '.hg.':
531 for p in '.hg', '.hg.':
580 if p in lparts[1:]:
532 if p in lparts[1:]:
581 pos = lparts.index(p)
533 pos = lparts.index(p)
582 base = os.path.join(*parts[:pos])
534 base = os.path.join(*parts[:pos])
583 raise Abort(_('path %r is inside nested repo %r')
535 raise Abort(_('path %r is inside nested repo %r')
584 % (path, base))
536 % (path, base))
585
537
586 parts.pop()
538 parts.pop()
587 prefixes = []
539 prefixes = []
588 while parts:
540 while parts:
589 prefix = os.sep.join(parts)
541 prefix = os.sep.join(parts)
590 if prefix in self.auditeddir:
542 if prefix in self.auditeddir:
591 break
543 break
592 curpath = os.path.join(self.root, prefix)
544 curpath = os.path.join(self.root, prefix)
593 try:
545 try:
594 st = os.lstat(curpath)
546 st = os.lstat(curpath)
595 except OSError, err:
547 except OSError, err:
596 # EINVAL can be raised as invalid path syntax under win32.
548 # EINVAL can be raised as invalid path syntax under win32.
597 # They must be ignored for patterns can be checked too.
549 # They must be ignored for patterns can be checked too.
598 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
550 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
599 raise
551 raise
600 else:
552 else:
601 if stat.S_ISLNK(st.st_mode):
553 if stat.S_ISLNK(st.st_mode):
602 raise Abort(_('path %r traverses symbolic link %r') %
554 raise Abort(_('path %r traverses symbolic link %r') %
603 (path, prefix))
555 (path, prefix))
604 elif (stat.S_ISDIR(st.st_mode) and
556 elif (stat.S_ISDIR(st.st_mode) and
605 os.path.isdir(os.path.join(curpath, '.hg'))):
557 os.path.isdir(os.path.join(curpath, '.hg'))):
606 if not self.callback or not self.callback(curpath):
558 if not self.callback or not self.callback(curpath):
607 raise Abort(_('path %r is inside nested repo %r') %
559 raise Abort(_('path %r is inside nested repo %r') %
608 (path, prefix))
560 (path, prefix))
609 prefixes.append(prefix)
561 prefixes.append(prefix)
610 parts.pop()
562 parts.pop()
611
563
612 self.audited.add(path)
564 self.audited.add(path)
613 # only add prefixes to the cache after checking everything: we don't
565 # only add prefixes to the cache after checking everything: we don't
614 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
566 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
615 self.auditeddir.update(prefixes)
567 self.auditeddir.update(prefixes)
616
568
617 def lookup_reg(key, name=None, scope=None):
569 def lookup_reg(key, name=None, scope=None):
618 return None
570 return None
619
571
620 def hidewindow():
572 def hidewindow():
621 """Hide current shell window.
573 """Hide current shell window.
622
574
623 Used to hide the window opened when starting asynchronous
575 Used to hide the window opened when starting asynchronous
624 child process under Windows, unneeded on other systems.
576 child process under Windows, unneeded on other systems.
625 """
577 """
626 pass
578 pass
627
579
628 if os.name == 'nt':
580 if os.name == 'nt':
629 checkosfilename = checkwinfilename
581 checkosfilename = checkwinfilename
630 from windows import *
582 from windows import *
631 else:
583 else:
632 from posix import *
584 from posix import *
633
585
634 def makelock(info, pathname):
586 def makelock(info, pathname):
635 try:
587 try:
636 return os.symlink(info, pathname)
588 return os.symlink(info, pathname)
637 except OSError, why:
589 except OSError, why:
638 if why.errno == errno.EEXIST:
590 if why.errno == errno.EEXIST:
639 raise
591 raise
640 except AttributeError: # no symlink in os
592 except AttributeError: # no symlink in os
641 pass
593 pass
642
594
643 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
595 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
644 os.write(ld, info)
596 os.write(ld, info)
645 os.close(ld)
597 os.close(ld)
646
598
647 def readlock(pathname):
599 def readlock(pathname):
648 try:
600 try:
649 return os.readlink(pathname)
601 return os.readlink(pathname)
650 except OSError, why:
602 except OSError, why:
651 if why.errno not in (errno.EINVAL, errno.ENOSYS):
603 if why.errno not in (errno.EINVAL, errno.ENOSYS):
652 raise
604 raise
653 except AttributeError: # no symlink in os
605 except AttributeError: # no symlink in os
654 pass
606 pass
655 fp = posixfile(pathname)
607 fp = posixfile(pathname)
656 r = fp.read()
608 r = fp.read()
657 fp.close()
609 fp.close()
658 return r
610 return r
659
611
660 def fstat(fp):
612 def fstat(fp):
661 '''stat file object that may not have fileno method.'''
613 '''stat file object that may not have fileno method.'''
662 try:
614 try:
663 return os.fstat(fp.fileno())
615 return os.fstat(fp.fileno())
664 except AttributeError:
616 except AttributeError:
665 return os.stat(fp.name)
617 return os.stat(fp.name)
666
618
667 # File system features
619 # File system features
668
620
669 def checkcase(path):
621 def checkcase(path):
670 """
622 """
671 Check whether the given path is on a case-sensitive filesystem
623 Check whether the given path is on a case-sensitive filesystem
672
624
673 Requires a path (like /foo/.hg) ending with a foldable final
625 Requires a path (like /foo/.hg) ending with a foldable final
674 directory component.
626 directory component.
675 """
627 """
676 s1 = os.stat(path)
628 s1 = os.stat(path)
677 d, b = os.path.split(path)
629 d, b = os.path.split(path)
678 p2 = os.path.join(d, b.upper())
630 p2 = os.path.join(d, b.upper())
679 if path == p2:
631 if path == p2:
680 p2 = os.path.join(d, b.lower())
632 p2 = os.path.join(d, b.lower())
681 try:
633 try:
682 s2 = os.stat(p2)
634 s2 = os.stat(p2)
683 if s2 == s1:
635 if s2 == s1:
684 return False
636 return False
685 return True
637 return True
686 except:
638 except:
687 return True
639 return True
688
640
689 _fspathcache = {}
641 _fspathcache = {}
690 def fspath(name, root):
642 def fspath(name, root):
691 '''Get name in the case stored in the filesystem
643 '''Get name in the case stored in the filesystem
692
644
693 The name is either relative to root, or it is an absolute path starting
645 The name is either relative to root, or it is an absolute path starting
694 with root. Note that this function is unnecessary, and should not be
646 with root. Note that this function is unnecessary, and should not be
695 called, for case-sensitive filesystems (simply because it's expensive).
647 called, for case-sensitive filesystems (simply because it's expensive).
696 '''
648 '''
697 # If name is absolute, make it relative
649 # If name is absolute, make it relative
698 if name.lower().startswith(root.lower()):
650 if name.lower().startswith(root.lower()):
699 l = len(root)
651 l = len(root)
700 if name[l] == os.sep or name[l] == os.altsep:
652 if name[l] == os.sep or name[l] == os.altsep:
701 l = l + 1
653 l = l + 1
702 name = name[l:]
654 name = name[l:]
703
655
704 if not os.path.lexists(os.path.join(root, name)):
656 if not os.path.lexists(os.path.join(root, name)):
705 return None
657 return None
706
658
707 seps = os.sep
659 seps = os.sep
708 if os.altsep:
660 if os.altsep:
709 seps = seps + os.altsep
661 seps = seps + os.altsep
710 # Protect backslashes. This gets silly very quickly.
662 # Protect backslashes. This gets silly very quickly.
711 seps.replace('\\','\\\\')
663 seps.replace('\\','\\\\')
712 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
664 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
713 dir = os.path.normcase(os.path.normpath(root))
665 dir = os.path.normcase(os.path.normpath(root))
714 result = []
666 result = []
715 for part, sep in pattern.findall(name):
667 for part, sep in pattern.findall(name):
716 if sep:
668 if sep:
717 result.append(sep)
669 result.append(sep)
718 continue
670 continue
719
671
720 if dir not in _fspathcache:
672 if dir not in _fspathcache:
721 _fspathcache[dir] = os.listdir(dir)
673 _fspathcache[dir] = os.listdir(dir)
722 contents = _fspathcache[dir]
674 contents = _fspathcache[dir]
723
675
724 lpart = part.lower()
676 lpart = part.lower()
725 lenp = len(part)
677 lenp = len(part)
726 for n in contents:
678 for n in contents:
727 if lenp == len(n) and n.lower() == lpart:
679 if lenp == len(n) and n.lower() == lpart:
728 result.append(n)
680 result.append(n)
729 break
681 break
730 else:
682 else:
731 # Cannot happen, as the file exists!
683 # Cannot happen, as the file exists!
732 result.append(part)
684 result.append(part)
733 dir = os.path.join(dir, lpart)
685 dir = os.path.join(dir, lpart)
734
686
735 return ''.join(result)
687 return ''.join(result)
736
688
737 def checknlink(testfile):
689 def checknlink(testfile):
738 '''check whether hardlink count reporting works properly'''
690 '''check whether hardlink count reporting works properly'''
739
691
740 # testfile may be open, so we need a separate file for checking to
692 # testfile may be open, so we need a separate file for checking to
741 # work around issue2543 (or testfile may get lost on Samba shares)
693 # work around issue2543 (or testfile may get lost on Samba shares)
742 f1 = testfile + ".hgtmp1"
694 f1 = testfile + ".hgtmp1"
743 if os.path.lexists(f1):
695 if os.path.lexists(f1):
744 return False
696 return False
745 try:
697 try:
746 posixfile(f1, 'w').close()
698 posixfile(f1, 'w').close()
747 except IOError:
699 except IOError:
748 return False
700 return False
749
701
750 f2 = testfile + ".hgtmp2"
702 f2 = testfile + ".hgtmp2"
751 fd = None
703 fd = None
752 try:
704 try:
753 try:
705 try:
754 os_link(f1, f2)
706 os_link(f1, f2)
755 except OSError:
707 except OSError:
756 return False
708 return False
757
709
758 # nlinks() may behave differently for files on Windows shares if
710 # nlinks() may behave differently for files on Windows shares if
759 # the file is open.
711 # the file is open.
760 fd = posixfile(f2)
712 fd = posixfile(f2)
761 return nlinks(f2) > 1
713 return nlinks(f2) > 1
762 finally:
714 finally:
763 if fd is not None:
715 if fd is not None:
764 fd.close()
716 fd.close()
765 for f in (f1, f2):
717 for f in (f1, f2):
766 try:
718 try:
767 os.unlink(f)
719 os.unlink(f)
768 except OSError:
720 except OSError:
769 pass
721 pass
770
722
771 return False
723 return False
772
724
773 def endswithsep(path):
725 def endswithsep(path):
774 '''Check path ends with os.sep or os.altsep.'''
726 '''Check path ends with os.sep or os.altsep.'''
775 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
727 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
776
728
777 def splitpath(path):
729 def splitpath(path):
778 '''Split path by os.sep.
730 '''Split path by os.sep.
779 Note that this function does not use os.altsep because this is
731 Note that this function does not use os.altsep because this is
780 an alternative of simple "xxx.split(os.sep)".
732 an alternative of simple "xxx.split(os.sep)".
781 It is recommended to use os.path.normpath() before using this
733 It is recommended to use os.path.normpath() before using this
782 function if need.'''
734 function if need.'''
783 return path.split(os.sep)
735 return path.split(os.sep)
784
736
785 def gui():
737 def gui():
786 '''Are we running in a GUI?'''
738 '''Are we running in a GUI?'''
787 if sys.platform == 'darwin':
739 if sys.platform == 'darwin':
788 if 'SSH_CONNECTION' in os.environ:
740 if 'SSH_CONNECTION' in os.environ:
789 # handle SSH access to a box where the user is logged in
741 # handle SSH access to a box where the user is logged in
790 return False
742 return False
791 elif getattr(osutil, 'isgui', None):
743 elif getattr(osutil, 'isgui', None):
792 # check if a CoreGraphics session is available
744 # check if a CoreGraphics session is available
793 return osutil.isgui()
745 return osutil.isgui()
794 else:
746 else:
795 # pure build; use a safe default
747 # pure build; use a safe default
796 return True
748 return True
797 else:
749 else:
798 return os.name == "nt" or os.environ.get("DISPLAY")
750 return os.name == "nt" or os.environ.get("DISPLAY")
799
751
800 def mktempcopy(name, emptyok=False, createmode=None):
752 def mktempcopy(name, emptyok=False, createmode=None):
801 """Create a temporary file with the same contents from name
753 """Create a temporary file with the same contents from name
802
754
803 The permission bits are copied from the original file.
755 The permission bits are copied from the original file.
804
756
805 If the temporary file is going to be truncated immediately, you
757 If the temporary file is going to be truncated immediately, you
806 can use emptyok=True as an optimization.
758 can use emptyok=True as an optimization.
807
759
808 Returns the name of the temporary file.
760 Returns the name of the temporary file.
809 """
761 """
810 d, fn = os.path.split(name)
762 d, fn = os.path.split(name)
811 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
763 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
812 os.close(fd)
764 os.close(fd)
813 # Temporary files are created with mode 0600, which is usually not
765 # Temporary files are created with mode 0600, which is usually not
814 # what we want. If the original file already exists, just copy
766 # what we want. If the original file already exists, just copy
815 # its mode. Otherwise, manually obey umask.
767 # its mode. Otherwise, manually obey umask.
816 try:
768 try:
817 st_mode = os.lstat(name).st_mode & 0777
769 st_mode = os.lstat(name).st_mode & 0777
818 except OSError, inst:
770 except OSError, inst:
819 if inst.errno != errno.ENOENT:
771 if inst.errno != errno.ENOENT:
820 raise
772 raise
821 st_mode = createmode
773 st_mode = createmode
822 if st_mode is None:
774 if st_mode is None:
823 st_mode = ~umask
775 st_mode = ~umask
824 st_mode &= 0666
776 st_mode &= 0666
825 os.chmod(temp, st_mode)
777 os.chmod(temp, st_mode)
826 if emptyok:
778 if emptyok:
827 return temp
779 return temp
828 try:
780 try:
829 try:
781 try:
830 ifp = posixfile(name, "rb")
782 ifp = posixfile(name, "rb")
831 except IOError, inst:
783 except IOError, inst:
832 if inst.errno == errno.ENOENT:
784 if inst.errno == errno.ENOENT:
833 return temp
785 return temp
834 if not getattr(inst, 'filename', None):
786 if not getattr(inst, 'filename', None):
835 inst.filename = name
787 inst.filename = name
836 raise
788 raise
837 ofp = posixfile(temp, "wb")
789 ofp = posixfile(temp, "wb")
838 for chunk in filechunkiter(ifp):
790 for chunk in filechunkiter(ifp):
839 ofp.write(chunk)
791 ofp.write(chunk)
840 ifp.close()
792 ifp.close()
841 ofp.close()
793 ofp.close()
842 except:
794 except:
843 try: os.unlink(temp)
795 try: os.unlink(temp)
844 except: pass
796 except: pass
845 raise
797 raise
846 return temp
798 return temp
847
799
848 class atomictempfile(object):
800 class atomictempfile(object):
849 """file-like object that atomically updates a file
801 """file-like object that atomically updates a file
850
802
851 All writes will be redirected to a temporary copy of the original
803 All writes will be redirected to a temporary copy of the original
852 file. When rename is called, the copy is renamed to the original
804 file. When rename is called, the copy is renamed to the original
853 name, making the changes visible.
805 name, making the changes visible.
854 """
806 """
855 def __init__(self, name, mode='w+b', createmode=None):
807 def __init__(self, name, mode='w+b', createmode=None):
856 self.__name = name
808 self.__name = name
857 self._fp = None
809 self._fp = None
858 self.temp = mktempcopy(name, emptyok=('w' in mode),
810 self.temp = mktempcopy(name, emptyok=('w' in mode),
859 createmode=createmode)
811 createmode=createmode)
860 self._fp = posixfile(self.temp, mode)
812 self._fp = posixfile(self.temp, mode)
861
813
862 def __getattr__(self, name):
814 def __getattr__(self, name):
863 return getattr(self._fp, name)
815 return getattr(self._fp, name)
864
816
865 def rename(self):
817 def rename(self):
866 if not self._fp.closed:
818 if not self._fp.closed:
867 self._fp.close()
819 self._fp.close()
868 rename(self.temp, localpath(self.__name))
820 rename(self.temp, localpath(self.__name))
869
821
870 def close(self):
822 def close(self):
871 if not self._fp:
823 if not self._fp:
872 return
824 return
873 if not self._fp.closed:
825 if not self._fp.closed:
874 try:
826 try:
875 os.unlink(self.temp)
827 os.unlink(self.temp)
876 except: pass
828 except: pass
877 self._fp.close()
829 self._fp.close()
878
830
879 def __del__(self):
831 def __del__(self):
880 self.close()
832 self.close()
881
833
882 def makedirs(name, mode=None):
834 def makedirs(name, mode=None):
883 """recursive directory creation with parent mode inheritance"""
835 """recursive directory creation with parent mode inheritance"""
884 parent = os.path.abspath(os.path.dirname(name))
836 parent = os.path.abspath(os.path.dirname(name))
885 try:
837 try:
886 os.mkdir(name)
838 os.mkdir(name)
887 if mode is not None:
839 if mode is not None:
888 os.chmod(name, mode)
840 os.chmod(name, mode)
889 return
841 return
890 except OSError, err:
842 except OSError, err:
891 if err.errno == errno.EEXIST:
843 if err.errno == errno.EEXIST:
892 return
844 return
893 if not name or parent == name or err.errno != errno.ENOENT:
845 if not name or parent == name or err.errno != errno.ENOENT:
894 raise
846 raise
895 makedirs(parent, mode)
847 makedirs(parent, mode)
896 makedirs(name, mode)
848 makedirs(name, mode)
897
849
898 class chunkbuffer(object):
850 class chunkbuffer(object):
899 """Allow arbitrary sized chunks of data to be efficiently read from an
851 """Allow arbitrary sized chunks of data to be efficiently read from an
900 iterator over chunks of arbitrary size."""
852 iterator over chunks of arbitrary size."""
901
853
902 def __init__(self, in_iter):
854 def __init__(self, in_iter):
903 """in_iter is the iterator that's iterating over the input chunks.
855 """in_iter is the iterator that's iterating over the input chunks.
904 targetsize is how big a buffer to try to maintain."""
856 targetsize is how big a buffer to try to maintain."""
905 def splitbig(chunks):
857 def splitbig(chunks):
906 for chunk in chunks:
858 for chunk in chunks:
907 if len(chunk) > 2**20:
859 if len(chunk) > 2**20:
908 pos = 0
860 pos = 0
909 while pos < len(chunk):
861 while pos < len(chunk):
910 end = pos + 2 ** 18
862 end = pos + 2 ** 18
911 yield chunk[pos:end]
863 yield chunk[pos:end]
912 pos = end
864 pos = end
913 else:
865 else:
914 yield chunk
866 yield chunk
915 self.iter = splitbig(in_iter)
867 self.iter = splitbig(in_iter)
916 self._queue = []
868 self._queue = []
917
869
918 def read(self, l):
870 def read(self, l):
919 """Read L bytes of data from the iterator of chunks of data.
871 """Read L bytes of data from the iterator of chunks of data.
920 Returns less than L bytes if the iterator runs dry."""
872 Returns less than L bytes if the iterator runs dry."""
921 left = l
873 left = l
922 buf = ''
874 buf = ''
923 queue = self._queue
875 queue = self._queue
924 while left > 0:
876 while left > 0:
925 # refill the queue
877 # refill the queue
926 if not queue:
878 if not queue:
927 target = 2**18
879 target = 2**18
928 for chunk in self.iter:
880 for chunk in self.iter:
929 queue.append(chunk)
881 queue.append(chunk)
930 target -= len(chunk)
882 target -= len(chunk)
931 if target <= 0:
883 if target <= 0:
932 break
884 break
933 if not queue:
885 if not queue:
934 break
886 break
935
887
936 chunk = queue.pop(0)
888 chunk = queue.pop(0)
937 left -= len(chunk)
889 left -= len(chunk)
938 if left < 0:
890 if left < 0:
939 queue.insert(0, chunk[left:])
891 queue.insert(0, chunk[left:])
940 buf += chunk[:left]
892 buf += chunk[:left]
941 else:
893 else:
942 buf += chunk
894 buf += chunk
943
895
944 return buf
896 return buf
945
897
946 def filechunkiter(f, size=65536, limit=None):
898 def filechunkiter(f, size=65536, limit=None):
947 """Create a generator that produces the data in the file size
899 """Create a generator that produces the data in the file size
948 (default 65536) bytes at a time, up to optional limit (default is
900 (default 65536) bytes at a time, up to optional limit (default is
949 to read all data). Chunks may be less than size bytes if the
901 to read all data). Chunks may be less than size bytes if the
950 chunk is the last chunk in the file, or the file is a socket or
902 chunk is the last chunk in the file, or the file is a socket or
951 some other type of file that sometimes reads less data than is
903 some other type of file that sometimes reads less data than is
952 requested."""
904 requested."""
953 assert size >= 0
905 assert size >= 0
954 assert limit is None or limit >= 0
906 assert limit is None or limit >= 0
955 while True:
907 while True:
956 if limit is None:
908 if limit is None:
957 nbytes = size
909 nbytes = size
958 else:
910 else:
959 nbytes = min(limit, size)
911 nbytes = min(limit, size)
960 s = nbytes and f.read(nbytes)
912 s = nbytes and f.read(nbytes)
961 if not s:
913 if not s:
962 break
914 break
963 if limit:
915 if limit:
964 limit -= len(s)
916 limit -= len(s)
965 yield s
917 yield s
966
918
967 def makedate():
919 def makedate():
968 lt = time.localtime()
920 lt = time.localtime()
969 if lt[8] == 1 and time.daylight:
921 if lt[8] == 1 and time.daylight:
970 tz = time.altzone
922 tz = time.altzone
971 else:
923 else:
972 tz = time.timezone
924 tz = time.timezone
973 t = time.mktime(lt)
925 t = time.mktime(lt)
974 if t < 0:
926 if t < 0:
975 hint = _("check your clock")
927 hint = _("check your clock")
976 raise Abort(_("negative timestamp: %d") % t, hint=hint)
928 raise Abort(_("negative timestamp: %d") % t, hint=hint)
977 return t, tz
929 return t, tz
978
930
979 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
931 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
980 """represent a (unixtime, offset) tuple as a localized time.
932 """represent a (unixtime, offset) tuple as a localized time.
981 unixtime is seconds since the epoch, and offset is the time zone's
933 unixtime is seconds since the epoch, and offset is the time zone's
982 number of seconds away from UTC. if timezone is false, do not
934 number of seconds away from UTC. if timezone is false, do not
983 append time zone to string."""
935 append time zone to string."""
984 t, tz = date or makedate()
936 t, tz = date or makedate()
985 if t < 0:
937 if t < 0:
986 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
938 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
987 tz = 0
939 tz = 0
988 if "%1" in format or "%2" in format:
940 if "%1" in format or "%2" in format:
989 sign = (tz > 0) and "-" or "+"
941 sign = (tz > 0) and "-" or "+"
990 minutes = abs(tz) // 60
942 minutes = abs(tz) // 60
991 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
943 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
992 format = format.replace("%2", "%02d" % (minutes % 60))
944 format = format.replace("%2", "%02d" % (minutes % 60))
993 s = time.strftime(format, time.gmtime(float(t) - tz))
945 s = time.strftime(format, time.gmtime(float(t) - tz))
994 return s
946 return s
995
947
996 def shortdate(date=None):
948 def shortdate(date=None):
997 """turn (timestamp, tzoff) tuple into iso 8631 date."""
949 """turn (timestamp, tzoff) tuple into iso 8631 date."""
998 return datestr(date, format='%Y-%m-%d')
950 return datestr(date, format='%Y-%m-%d')
999
951
1000 def strdate(string, format, defaults=[]):
952 def strdate(string, format, defaults=[]):
1001 """parse a localized time string and return a (unixtime, offset) tuple.
953 """parse a localized time string and return a (unixtime, offset) tuple.
1002 if the string cannot be parsed, ValueError is raised."""
954 if the string cannot be parsed, ValueError is raised."""
1003 def timezone(string):
955 def timezone(string):
1004 tz = string.split()[-1]
956 tz = string.split()[-1]
1005 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
957 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1006 sign = (tz[0] == "+") and 1 or -1
958 sign = (tz[0] == "+") and 1 or -1
1007 hours = int(tz[1:3])
959 hours = int(tz[1:3])
1008 minutes = int(tz[3:5])
960 minutes = int(tz[3:5])
1009 return -sign * (hours * 60 + minutes) * 60
961 return -sign * (hours * 60 + minutes) * 60
1010 if tz == "GMT" or tz == "UTC":
962 if tz == "GMT" or tz == "UTC":
1011 return 0
963 return 0
1012 return None
964 return None
1013
965
1014 # NOTE: unixtime = localunixtime + offset
966 # NOTE: unixtime = localunixtime + offset
1015 offset, date = timezone(string), string
967 offset, date = timezone(string), string
1016 if offset is not None:
968 if offset is not None:
1017 date = " ".join(string.split()[:-1])
969 date = " ".join(string.split()[:-1])
1018
970
1019 # add missing elements from defaults
971 # add missing elements from defaults
1020 usenow = False # default to using biased defaults
972 usenow = False # default to using biased defaults
1021 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
973 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1022 found = [True for p in part if ("%"+p) in format]
974 found = [True for p in part if ("%"+p) in format]
1023 if not found:
975 if not found:
1024 date += "@" + defaults[part][usenow]
976 date += "@" + defaults[part][usenow]
1025 format += "@%" + part[0]
977 format += "@%" + part[0]
1026 else:
978 else:
1027 # We've found a specific time element, less specific time
979 # We've found a specific time element, less specific time
1028 # elements are relative to today
980 # elements are relative to today
1029 usenow = True
981 usenow = True
1030
982
1031 timetuple = time.strptime(date, format)
983 timetuple = time.strptime(date, format)
1032 localunixtime = int(calendar.timegm(timetuple))
984 localunixtime = int(calendar.timegm(timetuple))
1033 if offset is None:
985 if offset is None:
1034 # local timezone
986 # local timezone
1035 unixtime = int(time.mktime(timetuple))
987 unixtime = int(time.mktime(timetuple))
1036 offset = unixtime - localunixtime
988 offset = unixtime - localunixtime
1037 else:
989 else:
1038 unixtime = localunixtime + offset
990 unixtime = localunixtime + offset
1039 return unixtime, offset
991 return unixtime, offset
1040
992
1041 def parsedate(date, formats=None, bias={}):
993 def parsedate(date, formats=None, bias={}):
1042 """parse a localized date/time and return a (unixtime, offset) tuple.
994 """parse a localized date/time and return a (unixtime, offset) tuple.
1043
995
1044 The date may be a "unixtime offset" string or in one of the specified
996 The date may be a "unixtime offset" string or in one of the specified
1045 formats. If the date already is a (unixtime, offset) tuple, it is returned.
997 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1046 """
998 """
1047 if not date:
999 if not date:
1048 return 0, 0
1000 return 0, 0
1049 if isinstance(date, tuple) and len(date) == 2:
1001 if isinstance(date, tuple) and len(date) == 2:
1050 return date
1002 return date
1051 if not formats:
1003 if not formats:
1052 formats = defaultdateformats
1004 formats = defaultdateformats
1053 date = date.strip()
1005 date = date.strip()
1054 try:
1006 try:
1055 when, offset = map(int, date.split(' '))
1007 when, offset = map(int, date.split(' '))
1056 except ValueError:
1008 except ValueError:
1057 # fill out defaults
1009 # fill out defaults
1058 now = makedate()
1010 now = makedate()
1059 defaults = {}
1011 defaults = {}
1060 nowmap = {}
1012 nowmap = {}
1061 for part in ("d", "mb", "yY", "HI", "M", "S"):
1013 for part in ("d", "mb", "yY", "HI", "M", "S"):
1062 # this piece is for rounding the specific end of unknowns
1014 # this piece is for rounding the specific end of unknowns
1063 b = bias.get(part)
1015 b = bias.get(part)
1064 if b is None:
1016 if b is None:
1065 if part[0] in "HMS":
1017 if part[0] in "HMS":
1066 b = "00"
1018 b = "00"
1067 else:
1019 else:
1068 b = "0"
1020 b = "0"
1069
1021
1070 # this piece is for matching the generic end to today's date
1022 # this piece is for matching the generic end to today's date
1071 n = datestr(now, "%" + part[0])
1023 n = datestr(now, "%" + part[0])
1072
1024
1073 defaults[part] = (b, n)
1025 defaults[part] = (b, n)
1074
1026
1075 for format in formats:
1027 for format in formats:
1076 try:
1028 try:
1077 when, offset = strdate(date, format, defaults)
1029 when, offset = strdate(date, format, defaults)
1078 except (ValueError, OverflowError):
1030 except (ValueError, OverflowError):
1079 pass
1031 pass
1080 else:
1032 else:
1081 break
1033 break
1082 else:
1034 else:
1083 raise Abort(_('invalid date: %r') % date)
1035 raise Abort(_('invalid date: %r') % date)
1084 # validate explicit (probably user-specified) date and
1036 # validate explicit (probably user-specified) date and
1085 # time zone offset. values must fit in signed 32 bits for
1037 # time zone offset. values must fit in signed 32 bits for
1086 # current 32-bit linux runtimes. timezones go from UTC-12
1038 # current 32-bit linux runtimes. timezones go from UTC-12
1087 # to UTC+14
1039 # to UTC+14
1088 if abs(when) > 0x7fffffff:
1040 if abs(when) > 0x7fffffff:
1089 raise Abort(_('date exceeds 32 bits: %d') % when)
1041 raise Abort(_('date exceeds 32 bits: %d') % when)
1090 if when < 0:
1042 if when < 0:
1091 raise Abort(_('negative date value: %d') % when)
1043 raise Abort(_('negative date value: %d') % when)
1092 if offset < -50400 or offset > 43200:
1044 if offset < -50400 or offset > 43200:
1093 raise Abort(_('impossible time zone offset: %d') % offset)
1045 raise Abort(_('impossible time zone offset: %d') % offset)
1094 return when, offset
1046 return when, offset
1095
1047
1096 def matchdate(date):
1048 def matchdate(date):
1097 """Return a function that matches a given date match specifier
1049 """Return a function that matches a given date match specifier
1098
1050
1099 Formats include:
1051 Formats include:
1100
1052
1101 '{date}' match a given date to the accuracy provided
1053 '{date}' match a given date to the accuracy provided
1102
1054
1103 '<{date}' on or before a given date
1055 '<{date}' on or before a given date
1104
1056
1105 '>{date}' on or after a given date
1057 '>{date}' on or after a given date
1106
1058
1107 >>> p1 = parsedate("10:29:59")
1059 >>> p1 = parsedate("10:29:59")
1108 >>> p2 = parsedate("10:30:00")
1060 >>> p2 = parsedate("10:30:00")
1109 >>> p3 = parsedate("10:30:59")
1061 >>> p3 = parsedate("10:30:59")
1110 >>> p4 = parsedate("10:31:00")
1062 >>> p4 = parsedate("10:31:00")
1111 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1063 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1112 >>> f = matchdate("10:30")
1064 >>> f = matchdate("10:30")
1113 >>> f(p1[0])
1065 >>> f(p1[0])
1114 False
1066 False
1115 >>> f(p2[0])
1067 >>> f(p2[0])
1116 True
1068 True
1117 >>> f(p3[0])
1069 >>> f(p3[0])
1118 True
1070 True
1119 >>> f(p4[0])
1071 >>> f(p4[0])
1120 False
1072 False
1121 >>> f(p5[0])
1073 >>> f(p5[0])
1122 False
1074 False
1123 """
1075 """
1124
1076
1125 def lower(date):
1077 def lower(date):
1126 d = dict(mb="1", d="1")
1078 d = dict(mb="1", d="1")
1127 return parsedate(date, extendeddateformats, d)[0]
1079 return parsedate(date, extendeddateformats, d)[0]
1128
1080
1129 def upper(date):
1081 def upper(date):
1130 d = dict(mb="12", HI="23", M="59", S="59")
1082 d = dict(mb="12", HI="23", M="59", S="59")
1131 for days in ("31", "30", "29"):
1083 for days in ("31", "30", "29"):
1132 try:
1084 try:
1133 d["d"] = days
1085 d["d"] = days
1134 return parsedate(date, extendeddateformats, d)[0]
1086 return parsedate(date, extendeddateformats, d)[0]
1135 except:
1087 except:
1136 pass
1088 pass
1137 d["d"] = "28"
1089 d["d"] = "28"
1138 return parsedate(date, extendeddateformats, d)[0]
1090 return parsedate(date, extendeddateformats, d)[0]
1139
1091
1140 date = date.strip()
1092 date = date.strip()
1141
1093
1142 if not date:
1094 if not date:
1143 raise Abort(_("dates cannot consist entirely of whitespace"))
1095 raise Abort(_("dates cannot consist entirely of whitespace"))
1144 elif date[0] == "<":
1096 elif date[0] == "<":
1145 if not date[1:]:
1097 if not date[1:]:
1146 raise Abort(_("invalid day spec, use '<DATE'"))
1098 raise Abort(_("invalid day spec, use '<DATE'"))
1147 when = upper(date[1:])
1099 when = upper(date[1:])
1148 return lambda x: x <= when
1100 return lambda x: x <= when
1149 elif date[0] == ">":
1101 elif date[0] == ">":
1150 if not date[1:]:
1102 if not date[1:]:
1151 raise Abort(_("invalid day spec, use '>DATE'"))
1103 raise Abort(_("invalid day spec, use '>DATE'"))
1152 when = lower(date[1:])
1104 when = lower(date[1:])
1153 return lambda x: x >= when
1105 return lambda x: x >= when
1154 elif date[0] == "-":
1106 elif date[0] == "-":
1155 try:
1107 try:
1156 days = int(date[1:])
1108 days = int(date[1:])
1157 except ValueError:
1109 except ValueError:
1158 raise Abort(_("invalid day spec: %s") % date[1:])
1110 raise Abort(_("invalid day spec: %s") % date[1:])
1159 if days < 0:
1111 if days < 0:
1160 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1112 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1161 % date[1:])
1113 % date[1:])
1162 when = makedate()[0] - days * 3600 * 24
1114 when = makedate()[0] - days * 3600 * 24
1163 return lambda x: x >= when
1115 return lambda x: x >= when
1164 elif " to " in date:
1116 elif " to " in date:
1165 a, b = date.split(" to ")
1117 a, b = date.split(" to ")
1166 start, stop = lower(a), upper(b)
1118 start, stop = lower(a), upper(b)
1167 return lambda x: x >= start and x <= stop
1119 return lambda x: x >= start and x <= stop
1168 else:
1120 else:
1169 start, stop = lower(date), upper(date)
1121 start, stop = lower(date), upper(date)
1170 return lambda x: x >= start and x <= stop
1122 return lambda x: x >= start and x <= stop
1171
1123
1172 def shortuser(user):
1124 def shortuser(user):
1173 """Return a short representation of a user name or email address."""
1125 """Return a short representation of a user name or email address."""
1174 f = user.find('@')
1126 f = user.find('@')
1175 if f >= 0:
1127 if f >= 0:
1176 user = user[:f]
1128 user = user[:f]
1177 f = user.find('<')
1129 f = user.find('<')
1178 if f >= 0:
1130 if f >= 0:
1179 user = user[f + 1:]
1131 user = user[f + 1:]
1180 f = user.find(' ')
1132 f = user.find(' ')
1181 if f >= 0:
1133 if f >= 0:
1182 user = user[:f]
1134 user = user[:f]
1183 f = user.find('.')
1135 f = user.find('.')
1184 if f >= 0:
1136 if f >= 0:
1185 user = user[:f]
1137 user = user[:f]
1186 return user
1138 return user
1187
1139
1188 def email(author):
1140 def email(author):
1189 '''get email of author.'''
1141 '''get email of author.'''
1190 r = author.find('>')
1142 r = author.find('>')
1191 if r == -1:
1143 if r == -1:
1192 r = None
1144 r = None
1193 return author[author.find('<') + 1:r]
1145 return author[author.find('<') + 1:r]
1194
1146
1195 def _ellipsis(text, maxlength):
1147 def _ellipsis(text, maxlength):
1196 if len(text) <= maxlength:
1148 if len(text) <= maxlength:
1197 return text, False
1149 return text, False
1198 else:
1150 else:
1199 return "%s..." % (text[:maxlength - 3]), True
1151 return "%s..." % (text[:maxlength - 3]), True
1200
1152
1201 def ellipsis(text, maxlength=400):
1153 def ellipsis(text, maxlength=400):
1202 """Trim string to at most maxlength (default: 400) characters."""
1154 """Trim string to at most maxlength (default: 400) characters."""
1203 try:
1155 try:
1204 # use unicode not to split at intermediate multi-byte sequence
1156 # use unicode not to split at intermediate multi-byte sequence
1205 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1157 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1206 maxlength)
1158 maxlength)
1207 if not truncated:
1159 if not truncated:
1208 return text
1160 return text
1209 return utext.encode(encoding.encoding)
1161 return utext.encode(encoding.encoding)
1210 except (UnicodeDecodeError, UnicodeEncodeError):
1162 except (UnicodeDecodeError, UnicodeEncodeError):
1211 return _ellipsis(text, maxlength)[0]
1163 return _ellipsis(text, maxlength)[0]
1212
1164
1213 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1165 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1214 '''yield every hg repository under path, recursively.'''
1166 '''yield every hg repository under path, recursively.'''
1215 def errhandler(err):
1167 def errhandler(err):
1216 if err.filename == path:
1168 if err.filename == path:
1217 raise err
1169 raise err
1218 if followsym and hasattr(os.path, 'samestat'):
1170 if followsym and hasattr(os.path, 'samestat'):
1219 def _add_dir_if_not_there(dirlst, dirname):
1171 def _add_dir_if_not_there(dirlst, dirname):
1220 match = False
1172 match = False
1221 samestat = os.path.samestat
1173 samestat = os.path.samestat
1222 dirstat = os.stat(dirname)
1174 dirstat = os.stat(dirname)
1223 for lstdirstat in dirlst:
1175 for lstdirstat in dirlst:
1224 if samestat(dirstat, lstdirstat):
1176 if samestat(dirstat, lstdirstat):
1225 match = True
1177 match = True
1226 break
1178 break
1227 if not match:
1179 if not match:
1228 dirlst.append(dirstat)
1180 dirlst.append(dirstat)
1229 return not match
1181 return not match
1230 else:
1182 else:
1231 followsym = False
1183 followsym = False
1232
1184
1233 if (seen_dirs is None) and followsym:
1185 if (seen_dirs is None) and followsym:
1234 seen_dirs = []
1186 seen_dirs = []
1235 _add_dir_if_not_there(seen_dirs, path)
1187 _add_dir_if_not_there(seen_dirs, path)
1236 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1188 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1237 dirs.sort()
1189 dirs.sort()
1238 if '.hg' in dirs:
1190 if '.hg' in dirs:
1239 yield root # found a repository
1191 yield root # found a repository
1240 qroot = os.path.join(root, '.hg', 'patches')
1192 qroot = os.path.join(root, '.hg', 'patches')
1241 if os.path.isdir(os.path.join(qroot, '.hg')):
1193 if os.path.isdir(os.path.join(qroot, '.hg')):
1242 yield qroot # we have a patch queue repo here
1194 yield qroot # we have a patch queue repo here
1243 if recurse:
1195 if recurse:
1244 # avoid recursing inside the .hg directory
1196 # avoid recursing inside the .hg directory
1245 dirs.remove('.hg')
1197 dirs.remove('.hg')
1246 else:
1198 else:
1247 dirs[:] = [] # don't descend further
1199 dirs[:] = [] # don't descend further
1248 elif followsym:
1200 elif followsym:
1249 newdirs = []
1201 newdirs = []
1250 for d in dirs:
1202 for d in dirs:
1251 fname = os.path.join(root, d)
1203 fname = os.path.join(root, d)
1252 if _add_dir_if_not_there(seen_dirs, fname):
1204 if _add_dir_if_not_there(seen_dirs, fname):
1253 if os.path.islink(fname):
1205 if os.path.islink(fname):
1254 for hgname in walkrepos(fname, True, seen_dirs):
1206 for hgname in walkrepos(fname, True, seen_dirs):
1255 yield hgname
1207 yield hgname
1256 else:
1208 else:
1257 newdirs.append(d)
1209 newdirs.append(d)
1258 dirs[:] = newdirs
1210 dirs[:] = newdirs
1259
1211
1260 _rcpath = None
1212 _rcpath = None
1261
1213
1262 def os_rcpath():
1214 def os_rcpath():
1263 '''return default os-specific hgrc search path'''
1215 '''return default os-specific hgrc search path'''
1264 path = system_rcpath()
1216 path = system_rcpath()
1265 path.extend(user_rcpath())
1217 path.extend(user_rcpath())
1266 path = [os.path.normpath(f) for f in path]
1218 path = [os.path.normpath(f) for f in path]
1267 return path
1219 return path
1268
1220
1269 def rcpath():
1221 def rcpath():
1270 '''return hgrc search path. if env var HGRCPATH is set, use it.
1222 '''return hgrc search path. if env var HGRCPATH is set, use it.
1271 for each item in path, if directory, use files ending in .rc,
1223 for each item in path, if directory, use files ending in .rc,
1272 else use item.
1224 else use item.
1273 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1225 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1274 if no HGRCPATH, use default os-specific path.'''
1226 if no HGRCPATH, use default os-specific path.'''
1275 global _rcpath
1227 global _rcpath
1276 if _rcpath is None:
1228 if _rcpath is None:
1277 if 'HGRCPATH' in os.environ:
1229 if 'HGRCPATH' in os.environ:
1278 _rcpath = []
1230 _rcpath = []
1279 for p in os.environ['HGRCPATH'].split(os.pathsep):
1231 for p in os.environ['HGRCPATH'].split(os.pathsep):
1280 if not p:
1232 if not p:
1281 continue
1233 continue
1282 p = expandpath(p)
1234 p = expandpath(p)
1283 if os.path.isdir(p):
1235 if os.path.isdir(p):
1284 for f, kind in osutil.listdir(p):
1236 for f, kind in osutil.listdir(p):
1285 if f.endswith('.rc'):
1237 if f.endswith('.rc'):
1286 _rcpath.append(os.path.join(p, f))
1238 _rcpath.append(os.path.join(p, f))
1287 else:
1239 else:
1288 _rcpath.append(p)
1240 _rcpath.append(p)
1289 else:
1241 else:
1290 _rcpath = os_rcpath()
1242 _rcpath = os_rcpath()
1291 return _rcpath
1243 return _rcpath
1292
1244
1293 def bytecount(nbytes):
1245 def bytecount(nbytes):
1294 '''return byte count formatted as readable string, with units'''
1246 '''return byte count formatted as readable string, with units'''
1295
1247
1296 units = (
1248 units = (
1297 (100, 1 << 30, _('%.0f GB')),
1249 (100, 1 << 30, _('%.0f GB')),
1298 (10, 1 << 30, _('%.1f GB')),
1250 (10, 1 << 30, _('%.1f GB')),
1299 (1, 1 << 30, _('%.2f GB')),
1251 (1, 1 << 30, _('%.2f GB')),
1300 (100, 1 << 20, _('%.0f MB')),
1252 (100, 1 << 20, _('%.0f MB')),
1301 (10, 1 << 20, _('%.1f MB')),
1253 (10, 1 << 20, _('%.1f MB')),
1302 (1, 1 << 20, _('%.2f MB')),
1254 (1, 1 << 20, _('%.2f MB')),
1303 (100, 1 << 10, _('%.0f KB')),
1255 (100, 1 << 10, _('%.0f KB')),
1304 (10, 1 << 10, _('%.1f KB')),
1256 (10, 1 << 10, _('%.1f KB')),
1305 (1, 1 << 10, _('%.2f KB')),
1257 (1, 1 << 10, _('%.2f KB')),
1306 (1, 1, _('%.0f bytes')),
1258 (1, 1, _('%.0f bytes')),
1307 )
1259 )
1308
1260
1309 for multiplier, divisor, format in units:
1261 for multiplier, divisor, format in units:
1310 if nbytes >= divisor * multiplier:
1262 if nbytes >= divisor * multiplier:
1311 return format % (nbytes / float(divisor))
1263 return format % (nbytes / float(divisor))
1312 return units[-1][2] % nbytes
1264 return units[-1][2] % nbytes
1313
1265
1314 def uirepr(s):
1266 def uirepr(s):
1315 # Avoid double backslash in Windows path repr()
1267 # Avoid double backslash in Windows path repr()
1316 return repr(s).replace('\\\\', '\\')
1268 return repr(s).replace('\\\\', '\\')
1317
1269
1318 # delay import of textwrap
1270 # delay import of textwrap
1319 def MBTextWrapper(**kwargs):
1271 def MBTextWrapper(**kwargs):
1320 class tw(textwrap.TextWrapper):
1272 class tw(textwrap.TextWrapper):
1321 """
1273 """
1322 Extend TextWrapper for double-width characters.
1274 Extend TextWrapper for double-width characters.
1323
1275
1324 Some Asian characters use two terminal columns instead of one.
1276 Some Asian characters use two terminal columns instead of one.
1325 A good example of this behavior can be seen with u'\u65e5\u672c',
1277 A good example of this behavior can be seen with u'\u65e5\u672c',
1326 the two Japanese characters for "Japan":
1278 the two Japanese characters for "Japan":
1327 len() returns 2, but when printed to a terminal, they eat 4 columns.
1279 len() returns 2, but when printed to a terminal, they eat 4 columns.
1328
1280
1329 (Note that this has nothing to do whatsoever with unicode
1281 (Note that this has nothing to do whatsoever with unicode
1330 representation, or encoding of the underlying string)
1282 representation, or encoding of the underlying string)
1331 """
1283 """
1332 def __init__(self, **kwargs):
1284 def __init__(self, **kwargs):
1333 textwrap.TextWrapper.__init__(self, **kwargs)
1285 textwrap.TextWrapper.__init__(self, **kwargs)
1334
1286
1335 def _cutdown(self, str, space_left):
1287 def _cutdown(self, str, space_left):
1336 l = 0
1288 l = 0
1337 ucstr = unicode(str, encoding.encoding)
1289 ucstr = unicode(str, encoding.encoding)
1338 colwidth = unicodedata.east_asian_width
1290 colwidth = unicodedata.east_asian_width
1339 for i in xrange(len(ucstr)):
1291 for i in xrange(len(ucstr)):
1340 l += colwidth(ucstr[i]) in 'WFA' and 2 or 1
1292 l += colwidth(ucstr[i]) in 'WFA' and 2 or 1
1341 if space_left < l:
1293 if space_left < l:
1342 return (ucstr[:i].encode(encoding.encoding),
1294 return (ucstr[:i].encode(encoding.encoding),
1343 ucstr[i:].encode(encoding.encoding))
1295 ucstr[i:].encode(encoding.encoding))
1344 return str, ''
1296 return str, ''
1345
1297
1346 # overriding of base class
1298 # overriding of base class
1347 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1299 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1348 space_left = max(width - cur_len, 1)
1300 space_left = max(width - cur_len, 1)
1349
1301
1350 if self.break_long_words:
1302 if self.break_long_words:
1351 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1303 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1352 cur_line.append(cut)
1304 cur_line.append(cut)
1353 reversed_chunks[-1] = res
1305 reversed_chunks[-1] = res
1354 elif not cur_line:
1306 elif not cur_line:
1355 cur_line.append(reversed_chunks.pop())
1307 cur_line.append(reversed_chunks.pop())
1356
1308
1357 global MBTextWrapper
1309 global MBTextWrapper
1358 MBTextWrapper = tw
1310 MBTextWrapper = tw
1359 return tw(**kwargs)
1311 return tw(**kwargs)
1360
1312
1361 def wrap(line, width, initindent='', hangindent=''):
1313 def wrap(line, width, initindent='', hangindent=''):
1362 maxindent = max(len(hangindent), len(initindent))
1314 maxindent = max(len(hangindent), len(initindent))
1363 if width <= maxindent:
1315 if width <= maxindent:
1364 # adjust for weird terminal size
1316 # adjust for weird terminal size
1365 width = max(78, maxindent + 1)
1317 width = max(78, maxindent + 1)
1366 wrapper = MBTextWrapper(width=width,
1318 wrapper = MBTextWrapper(width=width,
1367 initial_indent=initindent,
1319 initial_indent=initindent,
1368 subsequent_indent=hangindent)
1320 subsequent_indent=hangindent)
1369 return wrapper.fill(line)
1321 return wrapper.fill(line)
1370
1322
1371 def iterlines(iterator):
1323 def iterlines(iterator):
1372 for chunk in iterator:
1324 for chunk in iterator:
1373 for line in chunk.splitlines():
1325 for line in chunk.splitlines():
1374 yield line
1326 yield line
1375
1327
1376 def expandpath(path):
1328 def expandpath(path):
1377 return os.path.expanduser(os.path.expandvars(path))
1329 return os.path.expanduser(os.path.expandvars(path))
1378
1330
1379 def hgcmd():
1331 def hgcmd():
1380 """Return the command used to execute current hg
1332 """Return the command used to execute current hg
1381
1333
1382 This is different from hgexecutable() because on Windows we want
1334 This is different from hgexecutable() because on Windows we want
1383 to avoid things opening new shell windows like batch files, so we
1335 to avoid things opening new shell windows like batch files, so we
1384 get either the python call or current executable.
1336 get either the python call or current executable.
1385 """
1337 """
1386 if main_is_frozen():
1338 if main_is_frozen():
1387 return [sys.executable]
1339 return [sys.executable]
1388 return gethgcmd()
1340 return gethgcmd()
1389
1341
1390 def rundetached(args, condfn):
1342 def rundetached(args, condfn):
1391 """Execute the argument list in a detached process.
1343 """Execute the argument list in a detached process.
1392
1344
1393 condfn is a callable which is called repeatedly and should return
1345 condfn is a callable which is called repeatedly and should return
1394 True once the child process is known to have started successfully.
1346 True once the child process is known to have started successfully.
1395 At this point, the child process PID is returned. If the child
1347 At this point, the child process PID is returned. If the child
1396 process fails to start or finishes before condfn() evaluates to
1348 process fails to start or finishes before condfn() evaluates to
1397 True, return -1.
1349 True, return -1.
1398 """
1350 """
1399 # Windows case is easier because the child process is either
1351 # Windows case is easier because the child process is either
1400 # successfully starting and validating the condition or exiting
1352 # successfully starting and validating the condition or exiting
1401 # on failure. We just poll on its PID. On Unix, if the child
1353 # on failure. We just poll on its PID. On Unix, if the child
1402 # process fails to start, it will be left in a zombie state until
1354 # process fails to start, it will be left in a zombie state until
1403 # the parent wait on it, which we cannot do since we expect a long
1355 # the parent wait on it, which we cannot do since we expect a long
1404 # running process on success. Instead we listen for SIGCHLD telling
1356 # running process on success. Instead we listen for SIGCHLD telling
1405 # us our child process terminated.
1357 # us our child process terminated.
1406 terminated = set()
1358 terminated = set()
1407 def handler(signum, frame):
1359 def handler(signum, frame):
1408 terminated.add(os.wait())
1360 terminated.add(os.wait())
1409 prevhandler = None
1361 prevhandler = None
1410 if hasattr(signal, 'SIGCHLD'):
1362 if hasattr(signal, 'SIGCHLD'):
1411 prevhandler = signal.signal(signal.SIGCHLD, handler)
1363 prevhandler = signal.signal(signal.SIGCHLD, handler)
1412 try:
1364 try:
1413 pid = spawndetached(args)
1365 pid = spawndetached(args)
1414 while not condfn():
1366 while not condfn():
1415 if ((pid in terminated or not testpid(pid))
1367 if ((pid in terminated or not testpid(pid))
1416 and not condfn()):
1368 and not condfn()):
1417 return -1
1369 return -1
1418 time.sleep(0.1)
1370 time.sleep(0.1)
1419 return pid
1371 return pid
1420 finally:
1372 finally:
1421 if prevhandler is not None:
1373 if prevhandler is not None:
1422 signal.signal(signal.SIGCHLD, prevhandler)
1374 signal.signal(signal.SIGCHLD, prevhandler)
1423
1375
1424 try:
1376 try:
1425 any, all = any, all
1377 any, all = any, all
1426 except NameError:
1378 except NameError:
1427 def any(iterable):
1379 def any(iterable):
1428 for i in iterable:
1380 for i in iterable:
1429 if i:
1381 if i:
1430 return True
1382 return True
1431 return False
1383 return False
1432
1384
1433 def all(iterable):
1385 def all(iterable):
1434 for i in iterable:
1386 for i in iterable:
1435 if not i:
1387 if not i:
1436 return False
1388 return False
1437 return True
1389 return True
1438
1390
1439 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1391 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1440 """Return the result of interpolating items in the mapping into string s.
1392 """Return the result of interpolating items in the mapping into string s.
1441
1393
1442 prefix is a single character string, or a two character string with
1394 prefix is a single character string, or a two character string with
1443 a backslash as the first character if the prefix needs to be escaped in
1395 a backslash as the first character if the prefix needs to be escaped in
1444 a regular expression.
1396 a regular expression.
1445
1397
1446 fn is an optional function that will be applied to the replacement text
1398 fn is an optional function that will be applied to the replacement text
1447 just before replacement.
1399 just before replacement.
1448
1400
1449 escape_prefix is an optional flag that allows using doubled prefix for
1401 escape_prefix is an optional flag that allows using doubled prefix for
1450 its escaping.
1402 its escaping.
1451 """
1403 """
1452 fn = fn or (lambda s: s)
1404 fn = fn or (lambda s: s)
1453 patterns = '|'.join(mapping.keys())
1405 patterns = '|'.join(mapping.keys())
1454 if escape_prefix:
1406 if escape_prefix:
1455 patterns += '|' + prefix
1407 patterns += '|' + prefix
1456 if len(prefix) > 1:
1408 if len(prefix) > 1:
1457 prefix_char = prefix[1:]
1409 prefix_char = prefix[1:]
1458 else:
1410 else:
1459 prefix_char = prefix
1411 prefix_char = prefix
1460 mapping[prefix_char] = prefix_char
1412 mapping[prefix_char] = prefix_char
1461 r = re.compile(r'%s(%s)' % (prefix, patterns))
1413 r = re.compile(r'%s(%s)' % (prefix, patterns))
1462 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1414 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1463
1415
1464 def getport(port):
1416 def getport(port):
1465 """Return the port for a given network service.
1417 """Return the port for a given network service.
1466
1418
1467 If port is an integer, it's returned as is. If it's a string, it's
1419 If port is an integer, it's returned as is. If it's a string, it's
1468 looked up using socket.getservbyname(). If there's no matching
1420 looked up using socket.getservbyname(). If there's no matching
1469 service, util.Abort is raised.
1421 service, util.Abort is raised.
1470 """
1422 """
1471 try:
1423 try:
1472 return int(port)
1424 return int(port)
1473 except ValueError:
1425 except ValueError:
1474 pass
1426 pass
1475
1427
1476 try:
1428 try:
1477 return socket.getservbyname(port)
1429 return socket.getservbyname(port)
1478 except socket.error:
1430 except socket.error:
1479 raise Abort(_("no port number associated with service '%s'") % port)
1431 raise Abort(_("no port number associated with service '%s'") % port)
1480
1432
1481 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1433 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1482 '0': False, 'no': False, 'false': False, 'off': False,
1434 '0': False, 'no': False, 'false': False, 'off': False,
1483 'never': False}
1435 'never': False}
1484
1436
1485 def parsebool(s):
1437 def parsebool(s):
1486 """Parse s into a boolean.
1438 """Parse s into a boolean.
1487
1439
1488 If s is not a valid boolean, returns None.
1440 If s is not a valid boolean, returns None.
1489 """
1441 """
1490 return _booleans.get(s.lower(), None)
1442 return _booleans.get(s.lower(), None)
General Comments 0
You need to be logged in to leave comments. Login now