Show More
@@ -1,479 +1,479 b'' | |||||
1 | # ASCII graph log extension for Mercurial |
|
1 | # ASCII graph log extension for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2007 Joel Rosdahl <joel@rosdahl.net> |
|
3 | # Copyright 2007 Joel Rosdahl <joel@rosdahl.net> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of |
|
5 | # This software may be used and distributed according to the terms of | |
6 | # the GNU General Public License, incorporated herein by reference. |
|
6 | # the GNU General Public License, incorporated herein by reference. | |
7 | '''show revision graphs in terminal windows |
|
7 | '''show revision graphs in terminal windows | |
8 |
|
8 | |||
9 | This extension adds a --graph option to the incoming, outgoing and log |
|
9 | This extension adds a --graph option to the incoming, outgoing and log | |
10 | commands. When this options is given, an ascii representation of the |
|
10 | commands. When this options is given, an ascii representation of the | |
11 | revision graph is also shown. |
|
11 | revision graph is also shown. | |
12 | ''' |
|
12 | ''' | |
13 |
|
13 | |||
14 | import os |
|
14 | import os | |
15 | import sys |
|
15 | import sys | |
16 | from mercurial.cmdutil import revrange, show_changeset |
|
16 | from mercurial.cmdutil import revrange, show_changeset | |
17 | from mercurial.commands import templateopts, logopts, remoteopts |
|
17 | from mercurial.commands import templateopts, logopts, remoteopts | |
18 | from mercurial.i18n import _ |
|
18 | from mercurial.i18n import _ | |
19 | from mercurial.node import nullrev |
|
19 | from mercurial.node import nullrev | |
20 | from mercurial.util import Abort, canonpath |
|
20 | from mercurial.util import Abort, canonpath | |
21 | from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions |
|
21 | from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions | |
22 | from mercurial import hg, ui, url |
|
22 | from mercurial import hg, ui, url | |
23 |
|
23 | |||
24 | def revisions(repo, start, stop): |
|
24 | def revisions(repo, start, stop): | |
25 | """cset DAG generator yielding (rev, node, [parents]) tuples |
|
25 | """cset DAG generator yielding (rev, node, [parents]) tuples | |
26 |
|
26 | |||
27 | This generator function walks through the revision history from revision |
|
27 | This generator function walks through the revision history from revision | |
28 | start to revision stop (which must be less than or equal to start). |
|
28 | start to revision stop (which must be less than or equal to start). | |
29 | """ |
|
29 | """ | |
30 | assert start >= stop |
|
30 | assert start >= stop | |
31 | cur = start |
|
31 | cur = start | |
32 | while cur >= stop: |
|
32 | while cur >= stop: | |
33 | ctx = repo[cur] |
|
33 | ctx = repo[cur] | |
34 | parents = [p.rev() for p in ctx.parents() if p.rev() != nullrev] |
|
34 | parents = [p.rev() for p in ctx.parents() if p.rev() != nullrev] | |
35 | parents.sort() |
|
35 | parents.sort() | |
36 | yield (ctx, parents) |
|
36 | yield (ctx, parents) | |
37 | cur -= 1 |
|
37 | cur -= 1 | |
38 |
|
38 | |||
39 | def filerevs(repo, path, start, stop): |
|
39 | def filerevs(repo, path, start, stop): | |
40 | """file cset DAG generator yielding (rev, node, [parents]) tuples |
|
40 | """file cset DAG generator yielding (rev, node, [parents]) tuples | |
41 |
|
41 | |||
42 | This generator function walks through the revision history of a single |
|
42 | This generator function walks through the revision history of a single | |
43 | file from revision start to revision stop (which must be less than or |
|
43 | file from revision start to revision stop (which must be less than or | |
44 | equal to start). |
|
44 | equal to start). | |
45 | """ |
|
45 | """ | |
46 | assert start >= stop |
|
46 | assert start >= stop | |
47 | filerev = len(repo.file(path)) - 1 |
|
47 | filerev = len(repo.file(path)) - 1 | |
48 | while filerev >= 0: |
|
48 | while filerev >= 0: | |
49 | fctx = repo.filectx(path, fileid=filerev) |
|
49 | fctx = repo.filectx(path, fileid=filerev) | |
50 | parents = [f.linkrev() for f in fctx.parents() if f.path() == path] |
|
50 | parents = [f.linkrev() for f in fctx.parents() if f.path() == path] | |
51 | parents.sort() |
|
51 | parents.sort() | |
52 | if fctx.rev() <= start: |
|
52 | if fctx.rev() <= start: | |
53 | yield (fctx, parents) |
|
53 | yield (fctx, parents) | |
54 | if fctx.rev() <= stop: |
|
54 | if fctx.rev() <= stop: | |
55 | break |
|
55 | break | |
56 | filerev -= 1 |
|
56 | filerev -= 1 | |
57 |
|
57 | |||
58 | def grapher(nodes): |
|
58 | def grapher(nodes): | |
59 | """grapher for asciigraph on a list of nodes and their parents |
|
59 | """grapher for asciigraph on a list of nodes and their parents | |
60 |
|
60 | |||
61 | nodes must generate tuples (node, parents, char, lines) where |
|
61 | nodes must generate tuples (node, parents, char, lines) where | |
62 | - parents must generate the parents of node, in sorted order, |
|
62 | - parents must generate the parents of node, in sorted order, | |
63 | and max length 2, |
|
63 | and max length 2, | |
64 | - char is the char to print as the node symbol, and |
|
64 | - char is the char to print as the node symbol, and | |
65 | - lines are the lines to display next to the node. |
|
65 | - lines are the lines to display next to the node. | |
66 | """ |
|
66 | """ | |
67 | seen = [] |
|
67 | seen = [] | |
68 | for node, parents, char, lines in nodes: |
|
68 | for node, parents, char, lines in nodes: | |
69 | if node not in seen: |
|
69 | if node not in seen: | |
70 | seen.append(node) |
|
70 | seen.append(node) | |
71 | nodeidx = seen.index(node) |
|
71 | nodeidx = seen.index(node) | |
72 |
|
72 | |||
73 | knownparents = [] |
|
73 | knownparents = [] | |
74 | newparents = [] |
|
74 | newparents = [] | |
75 | for parent in parents: |
|
75 | for parent in parents: | |
76 | if parent in seen: |
|
76 | if parent in seen: | |
77 | knownparents.append(parent) |
|
77 | knownparents.append(parent) | |
78 | else: |
|
78 | else: | |
79 | newparents.append(parent) |
|
79 | newparents.append(parent) | |
80 |
|
80 | |||
81 | ncols = len(seen) |
|
81 | ncols = len(seen) | |
82 | nextseen = seen[:] |
|
82 | nextseen = seen[:] | |
83 | nextseen[nodeidx:nodeidx + 1] = newparents |
|
83 | nextseen[nodeidx:nodeidx + 1] = newparents | |
84 | edges = [(nodeidx, nextseen.index(p)) for p in knownparents] |
|
84 | edges = [(nodeidx, nextseen.index(p)) for p in knownparents] | |
85 |
|
85 | |||
86 | if len(newparents) > 0: |
|
86 | if len(newparents) > 0: | |
87 | edges.append((nodeidx, nodeidx)) |
|
87 | edges.append((nodeidx, nodeidx)) | |
88 | if len(newparents) > 1: |
|
88 | if len(newparents) > 1: | |
89 | edges.append((nodeidx, nodeidx + 1)) |
|
89 | edges.append((nodeidx, nodeidx + 1)) | |
90 | nmorecols = len(nextseen) - ncols |
|
90 | nmorecols = len(nextseen) - ncols | |
91 | seen = nextseen |
|
91 | seen = nextseen | |
92 | yield (char, lines, nodeidx, edges, ncols, nmorecols) |
|
92 | yield (char, lines, nodeidx, edges, ncols, nmorecols) | |
93 |
|
93 | |||
94 | def fix_long_right_edges(edges): |
|
94 | def fix_long_right_edges(edges): | |
95 | for (i, (start, end)) in enumerate(edges): |
|
95 | for (i, (start, end)) in enumerate(edges): | |
96 | if end > start: |
|
96 | if end > start: | |
97 | edges[i] = (start, end + 1) |
|
97 | edges[i] = (start, end + 1) | |
98 |
|
98 | |||
99 | def get_nodeline_edges_tail( |
|
99 | def get_nodeline_edges_tail( | |
100 | node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail): |
|
100 | node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail): | |
101 | if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0: |
|
101 | if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0: | |
102 | # Still going in the same non-vertical direction. |
|
102 | # Still going in the same non-vertical direction. | |
103 | if n_columns_diff == -1: |
|
103 | if n_columns_diff == -1: | |
104 | start = max(node_index + 1, p_node_index) |
|
104 | start = max(node_index + 1, p_node_index) | |
105 | tail = ["|", " "] * (start - node_index - 1) |
|
105 | tail = ["|", " "] * (start - node_index - 1) | |
106 | tail.extend(["/", " "] * (n_columns - start)) |
|
106 | tail.extend(["/", " "] * (n_columns - start)) | |
107 | return tail |
|
107 | return tail | |
108 | else: |
|
108 | else: | |
109 | return ["\\", " "] * (n_columns - node_index - 1) |
|
109 | return ["\\", " "] * (n_columns - node_index - 1) | |
110 | else: |
|
110 | else: | |
111 | return ["|", " "] * (n_columns - node_index - 1) |
|
111 | return ["|", " "] * (n_columns - node_index - 1) | |
112 |
|
112 | |||
113 | def draw_edges(edges, nodeline, interline): |
|
113 | def draw_edges(edges, nodeline, interline): | |
114 | for (start, end) in edges: |
|
114 | for (start, end) in edges: | |
115 | if start == end + 1: |
|
115 | if start == end + 1: | |
116 | interline[2 * end + 1] = "/" |
|
116 | interline[2 * end + 1] = "/" | |
117 | elif start == end - 1: |
|
117 | elif start == end - 1: | |
118 | interline[2 * start + 1] = "\\" |
|
118 | interline[2 * start + 1] = "\\" | |
119 | elif start == end: |
|
119 | elif start == end: | |
120 | interline[2 * start] = "|" |
|
120 | interline[2 * start] = "|" | |
121 | else: |
|
121 | else: | |
122 | nodeline[2 * end] = "+" |
|
122 | nodeline[2 * end] = "+" | |
123 | if start > end: |
|
123 | if start > end: | |
124 | (start, end) = (end,start) |
|
124 | (start, end) = (end,start) | |
125 | for i in range(2 * start + 1, 2 * end): |
|
125 | for i in range(2 * start + 1, 2 * end): | |
126 | if nodeline[i] != "+": |
|
126 | if nodeline[i] != "+": | |
127 | nodeline[i] = "-" |
|
127 | nodeline[i] = "-" | |
128 |
|
128 | |||
129 | def get_padding_line(ni, n_columns, edges): |
|
129 | def get_padding_line(ni, n_columns, edges): | |
130 | line = [] |
|
130 | line = [] | |
131 | line.extend(["|", " "] * ni) |
|
131 | line.extend(["|", " "] * ni) | |
132 | if (ni, ni - 1) in edges or (ni, ni) in edges: |
|
132 | if (ni, ni - 1) in edges or (ni, ni) in edges: | |
133 | # (ni, ni - 1) (ni, ni) |
|
133 | # (ni, ni - 1) (ni, ni) | |
134 | # | | | | | | | | |
|
134 | # | | | | | | | | | |
135 | # +---o | | o---+ |
|
135 | # +---o | | o---+ | |
136 | # | | c | | c | | |
|
136 | # | | c | | c | | | |
137 | # | |/ / | |/ / |
|
137 | # | |/ / | |/ / | |
138 | # | | | | | | |
|
138 | # | | | | | | | |
139 | c = "|" |
|
139 | c = "|" | |
140 | else: |
|
140 | else: | |
141 | c = " " |
|
141 | c = " " | |
142 | line.extend([c, " "]) |
|
142 | line.extend([c, " "]) | |
143 | line.extend(["|", " "] * (n_columns - ni - 1)) |
|
143 | line.extend(["|", " "] * (n_columns - ni - 1)) | |
144 | return line |
|
144 | return line | |
145 |
|
145 | |||
146 | def ascii(ui, grapher): |
|
146 | def ascii(ui, grapher): | |
147 | """prints an ASCII graph of the DAG returned by the grapher |
|
147 | """prints an ASCII graph of the DAG returned by the grapher | |
148 |
|
148 | |||
149 | grapher is a generator that emits tuples with the following elements: |
|
149 | grapher is a generator that emits tuples with the following elements: | |
150 |
|
150 | |||
151 | - Character to use as node's symbol. |
|
151 | - Character to use as node's symbol. | |
152 | - List of lines to display as the node's text. |
|
152 | - List of lines to display as the node's text. | |
153 | - Column of the current node in the set of ongoing edges. |
|
153 | - Column of the current node in the set of ongoing edges. | |
154 | - Edges; a list of (col, next_col) indicating the edges between |
|
154 | - Edges; a list of (col, next_col) indicating the edges between | |
155 | the current node and its parents. |
|
155 | the current node and its parents. | |
156 | - Number of columns (ongoing edges) in the current revision. |
|
156 | - Number of columns (ongoing edges) in the current revision. | |
157 | - The difference between the number of columns (ongoing edges) |
|
157 | - The difference between the number of columns (ongoing edges) | |
158 | in the next revision and the number of columns (ongoing edges) |
|
158 | in the next revision and the number of columns (ongoing edges) | |
159 | in the current revision. That is: -1 means one column removed; |
|
159 | in the current revision. That is: -1 means one column removed; | |
160 | 0 means no columns added or removed; 1 means one column added. |
|
160 | 0 means no columns added or removed; 1 means one column added. | |
161 | """ |
|
161 | """ | |
162 | prev_n_columns_diff = 0 |
|
162 | prev_n_columns_diff = 0 | |
163 | prev_node_index = 0 |
|
163 | prev_node_index = 0 | |
164 | for (node_ch, node_lines, node_index, edges, n_columns, n_columns_diff) in grapher: |
|
164 | for (node_ch, node_lines, node_index, edges, n_columns, n_columns_diff) in grapher: | |
165 |
|
165 | |||
166 | assert -2 < n_columns_diff < 2 |
|
166 | assert -2 < n_columns_diff < 2 | |
167 | if n_columns_diff == -1: |
|
167 | if n_columns_diff == -1: | |
168 | # Transform |
|
168 | # Transform | |
169 | # |
|
169 | # | |
170 | # | | | | | | |
|
170 | # | | | | | | | |
171 | # o | | into o---+ |
|
171 | # o | | into o---+ | |
172 | # |X / |/ / |
|
172 | # |X / |/ / | |
173 | # | | | | |
|
173 | # | | | | | |
174 | fix_long_right_edges(edges) |
|
174 | fix_long_right_edges(edges) | |
175 |
|
175 | |||
176 | # add_padding_line says whether to rewrite |
|
176 | # add_padding_line says whether to rewrite | |
177 | # |
|
177 | # | |
178 | # | | | | | | | | |
|
178 | # | | | | | | | | | |
179 | # | o---+ into | o---+ |
|
179 | # | o---+ into | o---+ | |
180 | # | / / | | | # <--- padding line |
|
180 | # | / / | | | # <--- padding line | |
181 | # o | | | / / |
|
181 | # o | | | / / | |
182 | # o | | |
|
182 | # o | | | |
183 | add_padding_line = (len(node_lines) > 2 and |
|
183 | add_padding_line = (len(node_lines) > 2 and | |
184 | n_columns_diff == -1 and |
|
184 | n_columns_diff == -1 and | |
185 | [x for (x, y) in edges if x + 1 < y]) |
|
185 | [x for (x, y) in edges if x + 1 < y]) | |
186 |
|
186 | |||
187 | # fix_nodeline_tail says whether to rewrite |
|
187 | # fix_nodeline_tail says whether to rewrite | |
188 | # |
|
188 | # | |
189 | # | | o | | | | o | | |
|
189 | # | | o | | | | o | | | |
190 | # | | |/ / | | |/ / |
|
190 | # | | |/ / | | |/ / | |
191 | # | o | | into | o / / # <--- fixed nodeline tail |
|
191 | # | o | | into | o / / # <--- fixed nodeline tail | |
192 | # | |/ / | |/ / |
|
192 | # | |/ / | |/ / | |
193 | # o | | o | | |
|
193 | # o | | o | | | |
194 | fix_nodeline_tail = len(node_lines) <= 2 and not add_padding_line |
|
194 | fix_nodeline_tail = len(node_lines) <= 2 and not add_padding_line | |
195 |
|
195 | |||
196 | # nodeline is the line containing the node character (typically o) |
|
196 | # nodeline is the line containing the node character (typically o) | |
197 | nodeline = ["|", " "] * node_index |
|
197 | nodeline = ["|", " "] * node_index | |
198 | nodeline.extend([node_ch, " "]) |
|
198 | nodeline.extend([node_ch, " "]) | |
199 |
|
199 | |||
200 | nodeline.extend( |
|
200 | nodeline.extend( | |
201 | get_nodeline_edges_tail( |
|
201 | get_nodeline_edges_tail( | |
202 | node_index, prev_node_index, n_columns, n_columns_diff, |
|
202 | node_index, prev_node_index, n_columns, n_columns_diff, | |
203 | prev_n_columns_diff, fix_nodeline_tail)) |
|
203 | prev_n_columns_diff, fix_nodeline_tail)) | |
204 |
|
204 | |||
205 | # shift_interline is the line containing the non-vertical |
|
205 | # shift_interline is the line containing the non-vertical | |
206 | # edges between this entry and the next |
|
206 | # edges between this entry and the next | |
207 | shift_interline = ["|", " "] * node_index |
|
207 | shift_interline = ["|", " "] * node_index | |
208 | if n_columns_diff == -1: |
|
208 | if n_columns_diff == -1: | |
209 | n_spaces = 1 |
|
209 | n_spaces = 1 | |
210 | edge_ch = "/" |
|
210 | edge_ch = "/" | |
211 | elif n_columns_diff == 0: |
|
211 | elif n_columns_diff == 0: | |
212 | n_spaces = 2 |
|
212 | n_spaces = 2 | |
213 | edge_ch = "|" |
|
213 | edge_ch = "|" | |
214 | else: |
|
214 | else: | |
215 | n_spaces = 3 |
|
215 | n_spaces = 3 | |
216 | edge_ch = "\\" |
|
216 | edge_ch = "\\" | |
217 | shift_interline.extend(n_spaces * [" "]) |
|
217 | shift_interline.extend(n_spaces * [" "]) | |
218 | shift_interline.extend([edge_ch, " "] * (n_columns - node_index - 1)) |
|
218 | shift_interline.extend([edge_ch, " "] * (n_columns - node_index - 1)) | |
219 |
|
219 | |||
220 | # draw edges from the current node to its parents |
|
220 | # draw edges from the current node to its parents | |
221 | draw_edges(edges, nodeline, shift_interline) |
|
221 | draw_edges(edges, nodeline, shift_interline) | |
222 |
|
222 | |||
223 | # lines is the list of all graph lines to print |
|
223 | # lines is the list of all graph lines to print | |
224 | lines = [nodeline] |
|
224 | lines = [nodeline] | |
225 | if add_padding_line: |
|
225 | if add_padding_line: | |
226 | lines.append(get_padding_line(node_index, n_columns, edges)) |
|
226 | lines.append(get_padding_line(node_index, n_columns, edges)) | |
227 | lines.append(shift_interline) |
|
227 | lines.append(shift_interline) | |
228 |
|
228 | |||
229 | # make sure that there are as many graph lines as there are |
|
229 | # make sure that there are as many graph lines as there are | |
230 | # log strings |
|
230 | # log strings | |
231 | while len(node_lines) < len(lines): |
|
231 | while len(node_lines) < len(lines): | |
232 | node_lines.append("") |
|
232 | node_lines.append("") | |
233 | if len(lines) < len(node_lines): |
|
233 | if len(lines) < len(node_lines): | |
234 | extra_interline = ["|", " "] * (n_columns + n_columns_diff) |
|
234 | extra_interline = ["|", " "] * (n_columns + n_columns_diff) | |
235 | while len(lines) < len(node_lines): |
|
235 | while len(lines) < len(node_lines): | |
236 | lines.append(extra_interline) |
|
236 | lines.append(extra_interline) | |
237 |
|
237 | |||
238 | # print lines |
|
238 | # print lines | |
239 | indentation_level = max(n_columns, n_columns + n_columns_diff) |
|
239 | indentation_level = max(n_columns, n_columns + n_columns_diff) | |
240 | for (line, logstr) in zip(lines, node_lines): |
|
240 | for (line, logstr) in zip(lines, node_lines): | |
241 | ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr) |
|
241 | ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr) | |
242 | ui.write(ln.rstrip() + '\n') |
|
242 | ui.write(ln.rstrip() + '\n') | |
243 |
|
243 | |||
244 | # ... and start over |
|
244 | # ... and start over | |
245 | prev_node_index = node_index |
|
245 | prev_node_index = node_index | |
246 | prev_n_columns_diff = n_columns_diff |
|
246 | prev_n_columns_diff = n_columns_diff | |
247 |
|
247 | |||
248 | def get_limit(limit_opt): |
|
248 | def get_limit(limit_opt): | |
249 | if limit_opt: |
|
249 | if limit_opt: | |
250 | try: |
|
250 | try: | |
251 | limit = int(limit_opt) |
|
251 | limit = int(limit_opt) | |
252 | except ValueError: |
|
252 | except ValueError: | |
253 | raise Abort(_("limit must be a positive integer")) |
|
253 | raise Abort(_("limit must be a positive integer")) | |
254 | if limit <= 0: |
|
254 | if limit <= 0: | |
255 | raise Abort(_("limit must be positive")) |
|
255 | raise Abort(_("limit must be positive")) | |
256 | else: |
|
256 | else: | |
257 | limit = sys.maxint |
|
257 | limit = sys.maxint | |
258 | return limit |
|
258 | return limit | |
259 |
|
259 | |||
260 | def get_revs(repo, rev_opt): |
|
260 | def get_revs(repo, rev_opt): | |
261 | if rev_opt: |
|
261 | if rev_opt: | |
262 | revs = revrange(repo, rev_opt) |
|
262 | revs = revrange(repo, rev_opt) | |
263 | return (max(revs), min(revs)) |
|
263 | return (max(revs), min(revs)) | |
264 | else: |
|
264 | else: | |
265 | return (len(repo) - 1, 0) |
|
265 | return (len(repo) - 1, 0) | |
266 |
|
266 | |||
267 | def check_unsupported_flags(opts): |
|
267 | def check_unsupported_flags(opts): | |
268 | for op in ["follow", "follow_first", "date", "copies", "keyword", "remove", |
|
268 | for op in ["follow", "follow_first", "date", "copies", "keyword", "remove", | |
269 | "only_merges", "user", "only_branch", "prune", "newest_first", |
|
269 | "only_merges", "user", "only_branch", "prune", "newest_first", | |
270 | "no_merges", "include", "exclude"]: |
|
270 | "no_merges", "include", "exclude"]: | |
271 | if op in opts and opts[op]: |
|
271 | if op in opts and opts[op]: | |
272 | raise Abort(_("--graph option is incompatible with --%s") % op) |
|
272 | raise Abort(_("--graph option is incompatible with --%s") % op) | |
273 |
|
273 | |||
274 |
|
274 | |||
275 | def graphlog(ui, repo, path=None, **opts): |
|
275 | def graphlog(ui, repo, path=None, **opts): | |
276 | """show revision history alongside an ASCII revision graph |
|
276 | """show revision history alongside an ASCII revision graph | |
277 |
|
277 | |||
278 | Print a revision history alongside a revision graph drawn with |
|
278 | Print a revision history alongside a revision graph drawn with | |
279 | ASCII characters. |
|
279 | ASCII characters. | |
280 |
|
280 | |||
281 | Nodes printed as an @ character are parents of the working |
|
281 | Nodes printed as an @ character are parents of the working | |
282 | directory. |
|
282 | directory. | |
283 | """ |
|
283 | """ | |
284 |
|
284 | |||
285 | check_unsupported_flags(opts) |
|
285 | check_unsupported_flags(opts) | |
286 | limit = get_limit(opts["limit"]) |
|
286 | limit = get_limit(opts["limit"]) | |
287 | start, stop = get_revs(repo, opts["rev"]) |
|
287 | start, stop = get_revs(repo, opts["rev"]) | |
288 | stop = max(stop, start - limit + 1) |
|
288 | stop = max(stop, start - limit + 1) | |
289 | if start == nullrev: |
|
289 | if start == nullrev: | |
290 | return |
|
290 | return | |
291 |
|
291 | |||
292 | if path: |
|
292 | if path: | |
293 | path = canonpath(repo.root, os.getcwd(), path) |
|
293 | path = canonpath(repo.root, os.getcwd(), path) | |
294 | if path: # could be reset in canonpath |
|
294 | if path: # could be reset in canonpath | |
295 | revdag = filerevs(repo, path, start, stop) |
|
295 | revdag = filerevs(repo, path, start, stop) | |
296 | else: |
|
296 | else: | |
297 | revdag = revisions(repo, start, stop) |
|
297 | revdag = revisions(repo, start, stop) | |
298 |
|
298 | |||
299 | repo_parents = repo.dirstate.parents() |
|
299 | repo_parents = repo.dirstate.parents() | |
300 | displayer = show_changeset(ui, repo, opts, buffered=True) |
|
300 | displayer = show_changeset(ui, repo, opts, buffered=True) | |
301 | def graphabledag(): |
|
301 | def graphabledag(): | |
302 | for (ctx, parents) in revdag: |
|
302 | for (ctx, parents) in revdag: | |
303 | # log_strings is the list of all log strings to draw alongside |
|
303 | # log_strings is the list of all log strings to draw alongside | |
304 | # the graph. |
|
304 | # the graph. | |
305 | displayer.show(ctx) |
|
305 | displayer.show(ctx) | |
306 | lines = displayer.hunk.pop(ctx.rev()).split("\n")[:-1] |
|
306 | lines = displayer.hunk.pop(ctx.rev()).split("\n")[:-1] | |
307 | char = ctx.node() in repo_parents and '@' or 'o' |
|
307 | char = ctx.node() in repo_parents and '@' or 'o' | |
308 | yield (ctx.rev(), parents, char, lines) |
|
308 | yield (ctx.rev(), parents, char, lines) | |
309 |
|
309 | |||
310 | ascii(ui, grapher(graphabledag())) |
|
310 | ascii(ui, grapher(graphabledag())) | |
311 |
|
311 | |||
312 | def outgoing_revs(ui, repo, dest, opts): |
|
312 | def outgoing_revs(ui, repo, dest, opts): | |
313 | """cset DAG generator yielding (node, [parents]) tuples |
|
313 | """cset DAG generator yielding (node, [parents]) tuples | |
314 |
|
314 | |||
315 | This generator function walks through the revisions not found |
|
315 | This generator function walks through the revisions not found | |
316 | in the destination |
|
316 | in the destination | |
317 | """ |
|
317 | """ | |
318 | limit = cmdutil.loglimit(opts) |
|
318 | limit = cmdutil.loglimit(opts) | |
319 | dest, revs, checkout = hg.parseurl( |
|
319 | dest, revs, checkout = hg.parseurl( | |
320 | ui.expandpath(dest or 'default-push', dest or 'default'), |
|
320 | ui.expandpath(dest or 'default-push', dest or 'default'), | |
321 | opts.get('rev')) |
|
321 | opts.get('rev')) | |
322 | cmdutil.setremoteconfig(ui, opts) |
|
322 | cmdutil.setremoteconfig(ui, opts) | |
323 | if revs: |
|
323 | if revs: | |
324 | revs = [repo.lookup(rev) for rev in revs] |
|
324 | revs = [repo.lookup(rev) for rev in revs] | |
325 | other = hg.repository(ui, dest) |
|
325 | other = hg.repository(ui, dest) | |
326 | ui.status(_('comparing with %s\n') % url.hidepassword(dest)) |
|
326 | ui.status(_('comparing with %s\n') % url.hidepassword(dest)) | |
327 | o = repo.findoutgoing(other, force=opts.get('force')) |
|
327 | o = repo.findoutgoing(other, force=opts.get('force')) | |
328 | if not o: |
|
328 | if not o: | |
329 | ui.status(_("no changes found\n")) |
|
329 | ui.status(_("no changes found\n")) | |
330 | return |
|
330 | return | |
331 | o = repo.changelog.nodesbetween(o, revs)[0] |
|
331 | o = repo.changelog.nodesbetween(o, revs)[0] | |
332 | o.reverse() |
|
332 | o.reverse() | |
333 | revdict = {} |
|
333 | revdict = {} | |
334 | for n in o: |
|
334 | for n in o: | |
335 | revdict[repo.changectx(n).rev()]=True |
|
335 | revdict[repo.changectx(n).rev()]=True | |
336 | count = 0 |
|
336 | count = 0 | |
337 | for n in o: |
|
337 | for n in o: | |
338 | if count >= limit: |
|
338 | if count >= limit: | |
339 | break |
|
339 | break | |
340 | ctx = repo.changectx(n) |
|
340 | ctx = repo.changectx(n) | |
341 | parents = [p.rev() for p in ctx.parents() if p.rev() in revdict] |
|
341 | parents = [p.rev() for p in ctx.parents() if p.rev() in revdict] | |
342 | parents.sort() |
|
342 | parents.sort() | |
343 | yield (ctx, parents) |
|
343 | yield (ctx, parents) | |
344 | count += 1 |
|
344 | count += 1 | |
345 |
|
345 | |||
346 | def goutgoing(ui, repo, dest=None, **opts): |
|
346 | def goutgoing(ui, repo, dest=None, **opts): | |
347 | """show the outgoing changesets alongside an ASCII revision graph |
|
347 | """show the outgoing changesets alongside an ASCII revision graph | |
348 |
|
348 | |||
349 | Print the outgoing changesets alongside a revision graph drawn with |
|
349 | Print the outgoing changesets alongside a revision graph drawn with | |
350 | ASCII characters. |
|
350 | ASCII characters. | |
351 |
|
351 | |||
352 | Nodes printed as an @ character are parents of the working |
|
352 | Nodes printed as an @ character are parents of the working | |
353 | directory. |
|
353 | directory. | |
354 | """ |
|
354 | """ | |
355 | check_unsupported_flags(opts) |
|
355 | check_unsupported_flags(opts) | |
356 | revdag = outgoing_revs(ui, repo, dest, opts) |
|
356 | revdag = outgoing_revs(ui, repo, dest, opts) | |
357 | repo_parents = repo.dirstate.parents() |
|
357 | repo_parents = repo.dirstate.parents() | |
358 | displayer = show_changeset(ui, repo, opts, buffered=True) |
|
358 | displayer = show_changeset(ui, repo, opts, buffered=True) | |
359 | def graphabledag(): |
|
359 | def graphabledag(): | |
360 | for (ctx, parents) in revdag: |
|
360 | for (ctx, parents) in revdag: | |
361 | # log_strings is the list of all log strings to draw alongside |
|
361 | # log_strings is the list of all log strings to draw alongside | |
362 | # the graph. |
|
362 | # the graph. | |
363 | displayer.show(ctx) |
|
363 | displayer.show(ctx) | |
364 | lines = displayer.hunk.pop(ctx.rev()).split("\n")[:-1] |
|
364 | lines = displayer.hunk.pop(ctx.rev()).split("\n")[:-1] | |
365 | char = ctx.node() in repo_parents and '@' or 'o' |
|
365 | char = ctx.node() in repo_parents and '@' or 'o' | |
366 | yield (ctx.rev(), parents, char, lines) |
|
366 | yield (ctx.rev(), parents, char, lines) | |
367 |
|
367 | |||
368 | ascii(ui, grapher(graphabledag())) |
|
368 | ascii(ui, grapher(graphabledag())) | |
369 |
|
369 | |||
370 | def incoming_revs(other, chlist, opts): |
|
370 | def incoming_revs(other, chlist, opts): | |
371 | """cset DAG generator yielding (node, [parents]) tuples |
|
371 | """cset DAG generator yielding (node, [parents]) tuples | |
372 |
|
372 | |||
373 | This generator function walks through the revisions of the destination |
|
373 | This generator function walks through the revisions of the destination | |
374 | not found in repo |
|
374 | not found in repo | |
375 | """ |
|
375 | """ | |
376 | limit = cmdutil.loglimit(opts) |
|
376 | limit = cmdutil.loglimit(opts) | |
377 | chlist.reverse() |
|
377 | chlist.reverse() | |
378 | revdict = {} |
|
378 | revdict = {} | |
379 | for n in chlist: |
|
379 | for n in chlist: | |
380 | revdict[other.changectx(n).rev()]=True |
|
380 | revdict[other.changectx(n).rev()]=True | |
381 | count = 0 |
|
381 | count = 0 | |
382 | for n in chlist: |
|
382 | for n in chlist: | |
383 | if count >= limit: |
|
383 | if count >= limit: | |
384 | break |
|
384 | break | |
385 | ctx = other.changectx(n) |
|
385 | ctx = other.changectx(n) | |
386 | parents = [p.rev() for p in ctx.parents() if p.rev() in revdict] |
|
386 | parents = [p.rev() for p in ctx.parents() if p.rev() in revdict] | |
387 | parents.sort() |
|
387 | parents.sort() | |
388 | yield (ctx, parents) |
|
388 | yield (ctx, parents) | |
389 | count += 1 |
|
389 | count += 1 | |
390 |
|
390 | |||
391 | def gincoming(ui, repo, source="default", **opts): |
|
391 | def gincoming(ui, repo, source="default", **opts): | |
392 | """show the incoming changesets alongside an ASCII revision graph |
|
392 | """show the incoming changesets alongside an ASCII revision graph | |
393 |
|
393 | |||
394 | Print the incoming changesets alongside a revision graph drawn with |
|
394 | Print the incoming changesets alongside a revision graph drawn with | |
395 | ASCII characters. |
|
395 | ASCII characters. | |
396 |
|
396 | |||
397 | Nodes printed as an @ character are parents of the working |
|
397 | Nodes printed as an @ character are parents of the working | |
398 | directory. |
|
398 | directory. | |
399 | """ |
|
399 | """ | |
400 |
|
400 | |||
401 | check_unsupported_flags(opts) |
|
401 | check_unsupported_flags(opts) | |
402 | source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev')) |
|
402 | source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev')) | |
403 | cmdutil.setremoteconfig(ui, opts) |
|
403 | cmdutil.setremoteconfig(ui, opts) | |
404 |
|
404 | |||
405 | other = hg.repository(ui, source) |
|
405 | other = hg.repository(ui, source) | |
406 | ui.status(_('comparing with %s\n') % url.hidepassword(source)) |
|
406 | ui.status(_('comparing with %s\n') % url.hidepassword(source)) | |
407 | if revs: |
|
407 | if revs: | |
408 | revs = [other.lookup(rev) for rev in revs] |
|
408 | revs = [other.lookup(rev) for rev in revs] | |
409 | incoming = repo.findincoming(other, heads=revs, force=opts["force"]) |
|
409 | incoming = repo.findincoming(other, heads=revs, force=opts["force"]) | |
410 | if not incoming: |
|
410 | if not incoming: | |
411 | try: |
|
411 | try: | |
412 | os.unlink(opts["bundle"]) |
|
412 | os.unlink(opts["bundle"]) | |
413 | except: |
|
413 | except: | |
414 | pass |
|
414 | pass | |
415 | ui.status(_("no changes found\n")) |
|
415 | ui.status(_("no changes found\n")) | |
416 | return |
|
416 | return | |
417 |
|
417 | |||
418 | cleanup = None |
|
418 | cleanup = None | |
419 | try: |
|
419 | try: | |
420 | fname = opts["bundle"] |
|
420 | fname = opts["bundle"] | |
421 | if fname or not other.local(): |
|
421 | if fname or not other.local(): | |
422 | # create a bundle (uncompressed if other repo is not local) |
|
422 | # create a bundle (uncompressed if other repo is not local) | |
423 | if revs is None: |
|
423 | if revs is None: | |
424 | cg = other.changegroup(incoming, "incoming") |
|
424 | cg = other.changegroup(incoming, "incoming") | |
425 | else: |
|
425 | else: | |
426 | cg = other.changegroupsubset(incoming, revs, 'incoming') |
|
426 | cg = other.changegroupsubset(incoming, revs, 'incoming') | |
427 | bundletype = other.local() and "HG10BZ" or "HG10UN" |
|
427 | bundletype = other.local() and "HG10BZ" or "HG10UN" | |
428 | fname = cleanup = changegroup.writebundle(cg, fname, bundletype) |
|
428 | fname = cleanup = changegroup.writebundle(cg, fname, bundletype) | |
429 | # keep written bundle? |
|
429 | # keep written bundle? | |
430 | if opts["bundle"]: |
|
430 | if opts["bundle"]: | |
431 | cleanup = None |
|
431 | cleanup = None | |
432 | if not other.local(): |
|
432 | if not other.local(): | |
433 | # use the created uncompressed bundlerepo |
|
433 | # use the created uncompressed bundlerepo | |
434 | other = bundlerepo.bundlerepository(ui, repo.root, fname) |
|
434 | other = bundlerepo.bundlerepository(ui, repo.root, fname) | |
435 |
|
435 | |||
436 | chlist = other.changelog.nodesbetween(incoming, revs)[0] |
|
436 | chlist = other.changelog.nodesbetween(incoming, revs)[0] | |
437 | revdag = incoming_revs(other, chlist, opts) |
|
437 | revdag = incoming_revs(other, chlist, opts) | |
438 |
other_parents = |
|
438 | other_parents = [] | |
439 | displayer = show_changeset(ui, other, opts, buffered=True) |
|
439 | displayer = show_changeset(ui, other, opts, buffered=True) | |
440 | def graphabledag(): |
|
440 | def graphabledag(): | |
441 | for (ctx, parents) in revdag: |
|
441 | for (ctx, parents) in revdag: | |
442 | # log_strings is the list of all log strings to draw alongside |
|
442 | # log_strings is the list of all log strings to draw alongside | |
443 | # the graph. |
|
443 | # the graph. | |
444 | displayer.show(ctx) |
|
444 | displayer.show(ctx) | |
445 | lines = displayer.hunk.pop(ctx.rev()).split("\n")[:-1] |
|
445 | lines = displayer.hunk.pop(ctx.rev()).split("\n")[:-1] | |
446 | char = ctx.node() in other_parents and '@' or 'o' |
|
446 | char = ctx.node() in other_parents and '@' or 'o' | |
447 | yield (ctx.rev(), parents, char, lines) |
|
447 | yield (ctx.rev(), parents, char, lines) | |
448 |
|
448 | |||
449 | ascii(ui, grapher(graphabledag())) |
|
449 | ascii(ui, grapher(graphabledag())) | |
450 | finally: |
|
450 | finally: | |
451 | if hasattr(other, 'close'): |
|
451 | if hasattr(other, 'close'): | |
452 | other.close() |
|
452 | other.close() | |
453 | if cleanup: |
|
453 | if cleanup: | |
454 | os.unlink(cleanup) |
|
454 | os.unlink(cleanup) | |
455 |
|
455 | |||
456 | def uisetup(ui): |
|
456 | def uisetup(ui): | |
457 | '''Initialize the extension.''' |
|
457 | '''Initialize the extension.''' | |
458 | _wrapcmd(ui, 'log', commands.table, graphlog) |
|
458 | _wrapcmd(ui, 'log', commands.table, graphlog) | |
459 | _wrapcmd(ui, 'incoming', commands.table, gincoming) |
|
459 | _wrapcmd(ui, 'incoming', commands.table, gincoming) | |
460 | _wrapcmd(ui, 'outgoing', commands.table, goutgoing) |
|
460 | _wrapcmd(ui, 'outgoing', commands.table, goutgoing) | |
461 |
|
461 | |||
462 | def _wrapcmd(ui, cmd, table, wrapfn): |
|
462 | def _wrapcmd(ui, cmd, table, wrapfn): | |
463 | '''wrap the command''' |
|
463 | '''wrap the command''' | |
464 | def graph(orig, *args, **kwargs): |
|
464 | def graph(orig, *args, **kwargs): | |
465 | if kwargs['graph']: |
|
465 | if kwargs['graph']: | |
466 | return wrapfn(*args, **kwargs) |
|
466 | return wrapfn(*args, **kwargs) | |
467 | return orig(*args, **kwargs) |
|
467 | return orig(*args, **kwargs) | |
468 | entry = extensions.wrapcommand(table, cmd, graph) |
|
468 | entry = extensions.wrapcommand(table, cmd, graph) | |
469 | entry[1].append(('g', 'graph', None, _("show the revision DAG"))) |
|
469 | entry[1].append(('g', 'graph', None, _("show the revision DAG"))) | |
470 |
|
470 | |||
471 | cmdtable = { |
|
471 | cmdtable = { | |
472 | "glog": |
|
472 | "glog": | |
473 | (graphlog, |
|
473 | (graphlog, | |
474 | [('l', 'limit', '', _('limit number of changes displayed')), |
|
474 | [('l', 'limit', '', _('limit number of changes displayed')), | |
475 | ('p', 'patch', False, _('show patch')), |
|
475 | ('p', 'patch', False, _('show patch')), | |
476 | ('r', 'rev', [], _('show the specified revision or range')), |
|
476 | ('r', 'rev', [], _('show the specified revision or range')), | |
477 | ] + templateopts, |
|
477 | ] + templateopts, | |
478 | _('hg glog [OPTION]... [FILE]')), |
|
478 | _('hg glog [OPTION]... [FILE]')), | |
479 | } |
|
479 | } |
@@ -1,294 +1,298 b'' | |||||
1 | """ |
|
1 | """ | |
2 | bundlerepo.py - repository class for viewing uncompressed bundles |
|
2 | bundlerepo.py - repository class for viewing uncompressed bundles | |
3 |
|
3 | |||
4 | This provides a read-only repository interface to bundles as if |
|
4 | This provides a read-only repository interface to bundles as if | |
5 | they were part of the actual repository. |
|
5 | they were part of the actual repository. | |
6 |
|
6 | |||
7 | Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com> |
|
7 | Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com> | |
8 |
|
8 | |||
9 | This software may be used and distributed according to the terms |
|
9 | This software may be used and distributed according to the terms | |
10 | of the GNU General Public License, incorporated herein by reference. |
|
10 | of the GNU General Public License, incorporated herein by reference. | |
11 | """ |
|
11 | """ | |
12 |
|
12 | |||
13 | from node import hex, nullid, short |
|
13 | from node import hex, nullid, short | |
14 | from i18n import _ |
|
14 | from i18n import _ | |
15 | import changegroup, util, os, struct, bz2, zlib, tempfile, shutil, mdiff |
|
15 | import changegroup, util, os, struct, bz2, zlib, tempfile, shutil, mdiff | |
16 | import repo, localrepo, changelog, manifest, filelog, revlog |
|
16 | import repo, localrepo, changelog, manifest, filelog, revlog, context | |
17 |
|
17 | |||
18 | class bundlerevlog(revlog.revlog): |
|
18 | class bundlerevlog(revlog.revlog): | |
19 | def __init__(self, opener, indexfile, bundlefile, |
|
19 | def __init__(self, opener, indexfile, bundlefile, | |
20 | linkmapper=None): |
|
20 | linkmapper=None): | |
21 | # How it works: |
|
21 | # How it works: | |
22 | # to retrieve a revision, we need to know the offset of |
|
22 | # to retrieve a revision, we need to know the offset of | |
23 | # the revision in the bundlefile (an opened file). |
|
23 | # the revision in the bundlefile (an opened file). | |
24 | # |
|
24 | # | |
25 | # We store this offset in the index (start), to differentiate a |
|
25 | # We store this offset in the index (start), to differentiate a | |
26 | # rev in the bundle and from a rev in the revlog, we check |
|
26 | # rev in the bundle and from a rev in the revlog, we check | |
27 | # len(index[r]). If the tuple is bigger than 7, it is a bundle |
|
27 | # len(index[r]). If the tuple is bigger than 7, it is a bundle | |
28 | # (it is bigger since we store the node to which the delta is) |
|
28 | # (it is bigger since we store the node to which the delta is) | |
29 | # |
|
29 | # | |
30 | revlog.revlog.__init__(self, opener, indexfile) |
|
30 | revlog.revlog.__init__(self, opener, indexfile) | |
31 | self.bundlefile = bundlefile |
|
31 | self.bundlefile = bundlefile | |
32 | self.basemap = {} |
|
32 | self.basemap = {} | |
33 | def chunkpositer(): |
|
33 | def chunkpositer(): | |
34 | for chunk in changegroup.chunkiter(bundlefile): |
|
34 | for chunk in changegroup.chunkiter(bundlefile): | |
35 | pos = bundlefile.tell() |
|
35 | pos = bundlefile.tell() | |
36 | yield chunk, pos - len(chunk) |
|
36 | yield chunk, pos - len(chunk) | |
37 | n = len(self) |
|
37 | n = len(self) | |
38 | prev = None |
|
38 | prev = None | |
39 | for chunk, start in chunkpositer(): |
|
39 | for chunk, start in chunkpositer(): | |
40 | size = len(chunk) |
|
40 | size = len(chunk) | |
41 | if size < 80: |
|
41 | if size < 80: | |
42 | raise util.Abort(_("invalid changegroup")) |
|
42 | raise util.Abort(_("invalid changegroup")) | |
43 | start += 80 |
|
43 | start += 80 | |
44 | size -= 80 |
|
44 | size -= 80 | |
45 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) |
|
45 | node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) | |
46 | if node in self.nodemap: |
|
46 | if node in self.nodemap: | |
47 | prev = node |
|
47 | prev = node | |
48 | continue |
|
48 | continue | |
49 | for p in (p1, p2): |
|
49 | for p in (p1, p2): | |
50 | if not p in self.nodemap: |
|
50 | if not p in self.nodemap: | |
51 | raise revlog.LookupError(p1, self.indexfile, |
|
51 | raise revlog.LookupError(p1, self.indexfile, | |
52 | _("unknown parent")) |
|
52 | _("unknown parent")) | |
53 | if linkmapper is None: |
|
53 | if linkmapper is None: | |
54 | link = n |
|
54 | link = n | |
55 | else: |
|
55 | else: | |
56 | link = linkmapper(cs) |
|
56 | link = linkmapper(cs) | |
57 |
|
57 | |||
58 | if not prev: |
|
58 | if not prev: | |
59 | prev = p1 |
|
59 | prev = p1 | |
60 | # start, size, full unc. size, base (unused), link, p1, p2, node |
|
60 | # start, size, full unc. size, base (unused), link, p1, p2, node | |
61 | e = (revlog.offset_type(start, 0), size, -1, -1, link, |
|
61 | e = (revlog.offset_type(start, 0), size, -1, -1, link, | |
62 | self.rev(p1), self.rev(p2), node) |
|
62 | self.rev(p1), self.rev(p2), node) | |
63 | self.basemap[n] = prev |
|
63 | self.basemap[n] = prev | |
64 | self.index.insert(-1, e) |
|
64 | self.index.insert(-1, e) | |
65 | self.nodemap[node] = n |
|
65 | self.nodemap[node] = n | |
66 | prev = node |
|
66 | prev = node | |
67 | n += 1 |
|
67 | n += 1 | |
68 |
|
68 | |||
69 | def bundle(self, rev): |
|
69 | def bundle(self, rev): | |
70 | """is rev from the bundle""" |
|
70 | """is rev from the bundle""" | |
71 | if rev < 0: |
|
71 | if rev < 0: | |
72 | return False |
|
72 | return False | |
73 | return rev in self.basemap |
|
73 | return rev in self.basemap | |
74 | def bundlebase(self, rev): return self.basemap[rev] |
|
74 | def bundlebase(self, rev): return self.basemap[rev] | |
75 | def chunk(self, rev, df=None, cachelen=4096): |
|
75 | def chunk(self, rev, df=None, cachelen=4096): | |
76 | # Warning: in case of bundle, the diff is against bundlebase, |
|
76 | # Warning: in case of bundle, the diff is against bundlebase, | |
77 | # not against rev - 1 |
|
77 | # not against rev - 1 | |
78 | # XXX: could use some caching |
|
78 | # XXX: could use some caching | |
79 | if not self.bundle(rev): |
|
79 | if not self.bundle(rev): | |
80 | return revlog.revlog.chunk(self, rev, df) |
|
80 | return revlog.revlog.chunk(self, rev, df) | |
81 | self.bundlefile.seek(self.start(rev)) |
|
81 | self.bundlefile.seek(self.start(rev)) | |
82 | return self.bundlefile.read(self.length(rev)) |
|
82 | return self.bundlefile.read(self.length(rev)) | |
83 |
|
83 | |||
84 | def revdiff(self, rev1, rev2): |
|
84 | def revdiff(self, rev1, rev2): | |
85 | """return or calculate a delta between two revisions""" |
|
85 | """return or calculate a delta between two revisions""" | |
86 | if self.bundle(rev1) and self.bundle(rev2): |
|
86 | if self.bundle(rev1) and self.bundle(rev2): | |
87 | # hot path for bundle |
|
87 | # hot path for bundle | |
88 | revb = self.rev(self.bundlebase(rev2)) |
|
88 | revb = self.rev(self.bundlebase(rev2)) | |
89 | if revb == rev1: |
|
89 | if revb == rev1: | |
90 | return self.chunk(rev2) |
|
90 | return self.chunk(rev2) | |
91 | elif not self.bundle(rev1) and not self.bundle(rev2): |
|
91 | elif not self.bundle(rev1) and not self.bundle(rev2): | |
92 | return revlog.revlog.revdiff(self, rev1, rev2) |
|
92 | return revlog.revlog.revdiff(self, rev1, rev2) | |
93 |
|
93 | |||
94 | return mdiff.textdiff(self.revision(self.node(rev1)), |
|
94 | return mdiff.textdiff(self.revision(self.node(rev1)), | |
95 | self.revision(self.node(rev2))) |
|
95 | self.revision(self.node(rev2))) | |
96 |
|
96 | |||
97 | def revision(self, node): |
|
97 | def revision(self, node): | |
98 | """return an uncompressed revision of a given""" |
|
98 | """return an uncompressed revision of a given""" | |
99 | if node == nullid: return "" |
|
99 | if node == nullid: return "" | |
100 |
|
100 | |||
101 | text = None |
|
101 | text = None | |
102 | chain = [] |
|
102 | chain = [] | |
103 | iter_node = node |
|
103 | iter_node = node | |
104 | rev = self.rev(iter_node) |
|
104 | rev = self.rev(iter_node) | |
105 | # reconstruct the revision if it is from a changegroup |
|
105 | # reconstruct the revision if it is from a changegroup | |
106 | while self.bundle(rev): |
|
106 | while self.bundle(rev): | |
107 | if self._cache and self._cache[0] == iter_node: |
|
107 | if self._cache and self._cache[0] == iter_node: | |
108 | text = self._cache[2] |
|
108 | text = self._cache[2] | |
109 | break |
|
109 | break | |
110 | chain.append(rev) |
|
110 | chain.append(rev) | |
111 | iter_node = self.bundlebase(rev) |
|
111 | iter_node = self.bundlebase(rev) | |
112 | rev = self.rev(iter_node) |
|
112 | rev = self.rev(iter_node) | |
113 | if text is None: |
|
113 | if text is None: | |
114 | text = revlog.revlog.revision(self, iter_node) |
|
114 | text = revlog.revlog.revision(self, iter_node) | |
115 |
|
115 | |||
116 | while chain: |
|
116 | while chain: | |
117 | delta = self.chunk(chain.pop()) |
|
117 | delta = self.chunk(chain.pop()) | |
118 | text = mdiff.patches(text, [delta]) |
|
118 | text = mdiff.patches(text, [delta]) | |
119 |
|
119 | |||
120 | p1, p2 = self.parents(node) |
|
120 | p1, p2 = self.parents(node) | |
121 | if node != revlog.hash(text, p1, p2): |
|
121 | if node != revlog.hash(text, p1, p2): | |
122 | raise revlog.RevlogError(_("integrity check failed on %s:%d") |
|
122 | raise revlog.RevlogError(_("integrity check failed on %s:%d") | |
123 | % (self.datafile, self.rev(node))) |
|
123 | % (self.datafile, self.rev(node))) | |
124 |
|
124 | |||
125 | self._cache = (node, self.rev(node), text) |
|
125 | self._cache = (node, self.rev(node), text) | |
126 | return text |
|
126 | return text | |
127 |
|
127 | |||
128 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): |
|
128 | def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): | |
129 | raise NotImplementedError |
|
129 | raise NotImplementedError | |
130 | def addgroup(self, revs, linkmapper, transaction): |
|
130 | def addgroup(self, revs, linkmapper, transaction): | |
131 | raise NotImplementedError |
|
131 | raise NotImplementedError | |
132 | def strip(self, rev, minlink): |
|
132 | def strip(self, rev, minlink): | |
133 | raise NotImplementedError |
|
133 | raise NotImplementedError | |
134 | def checksize(self): |
|
134 | def checksize(self): | |
135 | raise NotImplementedError |
|
135 | raise NotImplementedError | |
136 |
|
136 | |||
137 | class bundlechangelog(bundlerevlog, changelog.changelog): |
|
137 | class bundlechangelog(bundlerevlog, changelog.changelog): | |
138 | def __init__(self, opener, bundlefile): |
|
138 | def __init__(self, opener, bundlefile): | |
139 | changelog.changelog.__init__(self, opener) |
|
139 | changelog.changelog.__init__(self, opener) | |
140 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile) |
|
140 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile) | |
141 |
|
141 | |||
142 | class bundlemanifest(bundlerevlog, manifest.manifest): |
|
142 | class bundlemanifest(bundlerevlog, manifest.manifest): | |
143 | def __init__(self, opener, bundlefile, linkmapper): |
|
143 | def __init__(self, opener, bundlefile, linkmapper): | |
144 | manifest.manifest.__init__(self, opener) |
|
144 | manifest.manifest.__init__(self, opener) | |
145 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile, |
|
145 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile, | |
146 | linkmapper) |
|
146 | linkmapper) | |
147 |
|
147 | |||
148 | class bundlefilelog(bundlerevlog, filelog.filelog): |
|
148 | class bundlefilelog(bundlerevlog, filelog.filelog): | |
149 | def __init__(self, opener, path, bundlefile, linkmapper): |
|
149 | def __init__(self, opener, path, bundlefile, linkmapper): | |
150 | filelog.filelog.__init__(self, opener, path) |
|
150 | filelog.filelog.__init__(self, opener, path) | |
151 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile, |
|
151 | bundlerevlog.__init__(self, opener, self.indexfile, bundlefile, | |
152 | linkmapper) |
|
152 | linkmapper) | |
153 |
|
153 | |||
154 | class bundlerepository(localrepo.localrepository): |
|
154 | class bundlerepository(localrepo.localrepository): | |
155 | def __init__(self, ui, path, bundlename): |
|
155 | def __init__(self, ui, path, bundlename): | |
156 | self._tempparent = None |
|
156 | self._tempparent = None | |
157 | try: |
|
157 | try: | |
158 | localrepo.localrepository.__init__(self, ui, path) |
|
158 | localrepo.localrepository.__init__(self, ui, path) | |
159 | except repo.RepoError: |
|
159 | except repo.RepoError: | |
160 | self._tempparent = tempfile.mkdtemp() |
|
160 | self._tempparent = tempfile.mkdtemp() | |
161 | tmprepo = localrepo.instance(ui,self._tempparent,1) |
|
161 | tmprepo = localrepo.instance(ui,self._tempparent,1) | |
162 | localrepo.localrepository.__init__(self, ui, self._tempparent) |
|
162 | localrepo.localrepository.__init__(self, ui, self._tempparent) | |
163 |
|
163 | |||
164 | if path: |
|
164 | if path: | |
165 | self._url = 'bundle:' + path + '+' + bundlename |
|
165 | self._url = 'bundle:' + path + '+' + bundlename | |
166 | else: |
|
166 | else: | |
167 | self._url = 'bundle:' + bundlename |
|
167 | self._url = 'bundle:' + bundlename | |
168 |
|
168 | |||
169 | self.tempfile = None |
|
169 | self.tempfile = None | |
170 | self.bundlefile = open(bundlename, "rb") |
|
170 | self.bundlefile = open(bundlename, "rb") | |
171 | header = self.bundlefile.read(6) |
|
171 | header = self.bundlefile.read(6) | |
172 | if not header.startswith("HG"): |
|
172 | if not header.startswith("HG"): | |
173 | raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename) |
|
173 | raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename) | |
174 | elif not header.startswith("HG10"): |
|
174 | elif not header.startswith("HG10"): | |
175 | raise util.Abort(_("%s: unknown bundle version") % bundlename) |
|
175 | raise util.Abort(_("%s: unknown bundle version") % bundlename) | |
176 | elif (header == "HG10BZ") or (header == "HG10GZ"): |
|
176 | elif (header == "HG10BZ") or (header == "HG10GZ"): | |
177 | fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-", |
|
177 | fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-", | |
178 | suffix=".hg10un", dir=self.path) |
|
178 | suffix=".hg10un", dir=self.path) | |
179 | self.tempfile = temp |
|
179 | self.tempfile = temp | |
180 | fptemp = os.fdopen(fdtemp, 'wb') |
|
180 | fptemp = os.fdopen(fdtemp, 'wb') | |
181 | def generator(f): |
|
181 | def generator(f): | |
182 | if header == "HG10BZ": |
|
182 | if header == "HG10BZ": | |
183 | zd = bz2.BZ2Decompressor() |
|
183 | zd = bz2.BZ2Decompressor() | |
184 | zd.decompress("BZ") |
|
184 | zd.decompress("BZ") | |
185 | elif header == "HG10GZ": |
|
185 | elif header == "HG10GZ": | |
186 | zd = zlib.decompressobj() |
|
186 | zd = zlib.decompressobj() | |
187 | for chunk in f: |
|
187 | for chunk in f: | |
188 | yield zd.decompress(chunk) |
|
188 | yield zd.decompress(chunk) | |
189 | gen = generator(util.filechunkiter(self.bundlefile, 4096)) |
|
189 | gen = generator(util.filechunkiter(self.bundlefile, 4096)) | |
190 |
|
190 | |||
191 | try: |
|
191 | try: | |
192 | fptemp.write("HG10UN") |
|
192 | fptemp.write("HG10UN") | |
193 | for chunk in gen: |
|
193 | for chunk in gen: | |
194 | fptemp.write(chunk) |
|
194 | fptemp.write(chunk) | |
195 | finally: |
|
195 | finally: | |
196 | fptemp.close() |
|
196 | fptemp.close() | |
197 | self.bundlefile.close() |
|
197 | self.bundlefile.close() | |
198 |
|
198 | |||
199 | self.bundlefile = open(self.tempfile, "rb") |
|
199 | self.bundlefile = open(self.tempfile, "rb") | |
200 | # seek right after the header |
|
200 | # seek right after the header | |
201 | self.bundlefile.seek(6) |
|
201 | self.bundlefile.seek(6) | |
202 | elif header == "HG10UN": |
|
202 | elif header == "HG10UN": | |
203 | # nothing to do |
|
203 | # nothing to do | |
204 | pass |
|
204 | pass | |
205 | else: |
|
205 | else: | |
206 | raise util.Abort(_("%s: unknown bundle compression type") |
|
206 | raise util.Abort(_("%s: unknown bundle compression type") | |
207 | % bundlename) |
|
207 | % bundlename) | |
208 | # dict with the mapping 'filename' -> position in the bundle |
|
208 | # dict with the mapping 'filename' -> position in the bundle | |
209 | self.bundlefilespos = {} |
|
209 | self.bundlefilespos = {} | |
210 |
|
210 | |||
211 | def __getattr__(self, name): |
|
211 | def __getattr__(self, name): | |
212 | if name == 'changelog': |
|
212 | if name == 'changelog': | |
213 | self.changelog = bundlechangelog(self.sopener, self.bundlefile) |
|
213 | self.changelog = bundlechangelog(self.sopener, self.bundlefile) | |
214 | self.manstart = self.bundlefile.tell() |
|
214 | self.manstart = self.bundlefile.tell() | |
215 | return self.changelog |
|
215 | return self.changelog | |
216 | if name == 'manifest': |
|
216 | elif name == 'manifest': | |
217 | self.bundlefile.seek(self.manstart) |
|
217 | self.bundlefile.seek(self.manstart) | |
218 | self.manifest = bundlemanifest(self.sopener, self.bundlefile, |
|
218 | self.manifest = bundlemanifest(self.sopener, self.bundlefile, | |
219 | self.changelog.rev) |
|
219 | self.changelog.rev) | |
220 | self.filestart = self.bundlefile.tell() |
|
220 | self.filestart = self.bundlefile.tell() | |
221 | return self.manifest |
|
221 | return self.manifest | |
222 | if name == 'manstart': |
|
222 | elif name == 'manstart': | |
223 | self.changelog |
|
223 | self.changelog | |
224 | return self.manstart |
|
224 | return self.manstart | |
225 | if name == 'filestart': |
|
225 | elif name == 'filestart': | |
226 | self.manifest |
|
226 | self.manifest | |
227 | return self.filestart |
|
227 | return self.filestart | |
228 | return localrepo.localrepository.__getattr__(self, name) |
|
228 | else: | |
|
229 | raise AttributeError(name) | |||
229 |
|
230 | |||
230 | def url(self): |
|
231 | def url(self): | |
231 | return self._url |
|
232 | return self._url | |
232 |
|
233 | |||
233 | def file(self, f): |
|
234 | def file(self, f): | |
234 | if not self.bundlefilespos: |
|
235 | if not self.bundlefilespos: | |
235 | self.bundlefile.seek(self.filestart) |
|
236 | self.bundlefile.seek(self.filestart) | |
236 | while 1: |
|
237 | while 1: | |
237 | chunk = changegroup.getchunk(self.bundlefile) |
|
238 | chunk = changegroup.getchunk(self.bundlefile) | |
238 | if not chunk: |
|
239 | if not chunk: | |
239 | break |
|
240 | break | |
240 | self.bundlefilespos[chunk] = self.bundlefile.tell() |
|
241 | self.bundlefilespos[chunk] = self.bundlefile.tell() | |
241 | for c in changegroup.chunkiter(self.bundlefile): |
|
242 | for c in changegroup.chunkiter(self.bundlefile): | |
242 | pass |
|
243 | pass | |
243 |
|
244 | |||
244 | if f[0] == '/': |
|
245 | if f[0] == '/': | |
245 | f = f[1:] |
|
246 | f = f[1:] | |
246 | if f in self.bundlefilespos: |
|
247 | if f in self.bundlefilespos: | |
247 | self.bundlefile.seek(self.bundlefilespos[f]) |
|
248 | self.bundlefile.seek(self.bundlefilespos[f]) | |
248 | return bundlefilelog(self.sopener, f, self.bundlefile, |
|
249 | return bundlefilelog(self.sopener, f, self.bundlefile, | |
249 | self.changelog.rev) |
|
250 | self.changelog.rev) | |
250 | else: |
|
251 | else: | |
251 | return filelog.filelog(self.sopener, f) |
|
252 | return filelog.filelog(self.sopener, f) | |
252 |
|
253 | |||
253 | def close(self): |
|
254 | def close(self): | |
254 | """Close assigned bundle file immediately.""" |
|
255 | """Close assigned bundle file immediately.""" | |
255 | self.bundlefile.close() |
|
256 | self.bundlefile.close() | |
256 |
|
257 | |||
257 | def __del__(self): |
|
258 | def __del__(self): | |
258 | bundlefile = getattr(self, 'bundlefile', None) |
|
259 | bundlefile = getattr(self, 'bundlefile', None) | |
259 | if bundlefile and not bundlefile.closed: |
|
260 | if bundlefile and not bundlefile.closed: | |
260 | bundlefile.close() |
|
261 | bundlefile.close() | |
261 | tempfile = getattr(self, 'tempfile', None) |
|
262 | tempfile = getattr(self, 'tempfile', None) | |
262 | if tempfile is not None: |
|
263 | if tempfile is not None: | |
263 | os.unlink(tempfile) |
|
264 | os.unlink(tempfile) | |
264 | if self._tempparent: |
|
265 | if self._tempparent: | |
265 | shutil.rmtree(self._tempparent, True) |
|
266 | shutil.rmtree(self._tempparent, True) | |
266 |
|
267 | |||
267 | def cancopy(self): |
|
268 | def cancopy(self): | |
268 | return False |
|
269 | return False | |
269 |
|
270 | |||
|
271 | def getcwd(self): | |||
|
272 | return os.getcwd() # always outside the repo | |||
|
273 | ||||
270 | def instance(ui, path, create): |
|
274 | def instance(ui, path, create): | |
271 | if create: |
|
275 | if create: | |
272 | raise util.Abort(_('cannot create new bundle repository')) |
|
276 | raise util.Abort(_('cannot create new bundle repository')) | |
273 | parentpath = ui.config("bundle", "mainreporoot", "") |
|
277 | parentpath = ui.config("bundle", "mainreporoot", "") | |
274 | if parentpath: |
|
278 | if parentpath: | |
275 | # Try to make the full path relative so we get a nice, short URL. |
|
279 | # Try to make the full path relative so we get a nice, short URL. | |
276 | # In particular, we don't want temp dir names in test outputs. |
|
280 | # In particular, we don't want temp dir names in test outputs. | |
277 | cwd = os.getcwd() |
|
281 | cwd = os.getcwd() | |
278 | if parentpath == cwd: |
|
282 | if parentpath == cwd: | |
279 | parentpath = '' |
|
283 | parentpath = '' | |
280 | else: |
|
284 | else: | |
281 | cwd = os.path.join(cwd,'') |
|
285 | cwd = os.path.join(cwd,'') | |
282 | if parentpath.startswith(cwd): |
|
286 | if parentpath.startswith(cwd): | |
283 | parentpath = parentpath[len(cwd):] |
|
287 | parentpath = parentpath[len(cwd):] | |
284 | path = util.drop_scheme('file', path) |
|
288 | path = util.drop_scheme('file', path) | |
285 | if path.startswith('bundle:'): |
|
289 | if path.startswith('bundle:'): | |
286 | path = util.drop_scheme('bundle', path) |
|
290 | path = util.drop_scheme('bundle', path) | |
287 | s = path.split("+", 1) |
|
291 | s = path.split("+", 1) | |
288 | if len(s) == 1: |
|
292 | if len(s) == 1: | |
289 | repopath, bundlename = parentpath, s[0] |
|
293 | repopath, bundlename = parentpath, s[0] | |
290 | else: |
|
294 | else: | |
291 | repopath, bundlename = s |
|
295 | repopath, bundlename = s | |
292 | else: |
|
296 | else: | |
293 | repopath, bundlename = parentpath, path |
|
297 | repopath, bundlename = parentpath, path | |
294 | return bundlerepository(ui, repopath, bundlename) |
|
298 | return bundlerepository(ui, repopath, bundlename) |
@@ -1,2150 +1,2150 b'' | |||||
1 | # localrepo.py - read/write repository class for mercurial |
|
1 | # localrepo.py - read/write repository class for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | from node import bin, hex, nullid, nullrev, short |
|
8 | from node import bin, hex, nullid, nullrev, short | |
9 | from i18n import _ |
|
9 | from i18n import _ | |
10 | import repo, changegroup |
|
10 | import repo, changegroup | |
11 | import changelog, dirstate, filelog, manifest, context, weakref |
|
11 | import changelog, dirstate, filelog, manifest, context, weakref | |
12 | import lock, transaction, stat, errno, ui, store |
|
12 | import lock, transaction, stat, errno, ui, store | |
13 | import os, revlog, time, util, extensions, hook, inspect |
|
13 | import os, revlog, time, util, extensions, hook, inspect | |
14 | import match as match_ |
|
14 | import match as match_ | |
15 | import merge as merge_ |
|
15 | import merge as merge_ | |
16 |
|
16 | |||
17 | class localrepository(repo.repository): |
|
17 | class localrepository(repo.repository): | |
18 | capabilities = util.set(('lookup', 'changegroupsubset')) |
|
18 | capabilities = util.set(('lookup', 'changegroupsubset')) | |
19 | supported = ('revlogv1', 'store', 'fncache') |
|
19 | supported = ('revlogv1', 'store', 'fncache') | |
20 |
|
20 | |||
21 | def __init__(self, parentui, path=None, create=0): |
|
21 | def __init__(self, parentui, path=None, create=0): | |
22 | repo.repository.__init__(self) |
|
22 | repo.repository.__init__(self) | |
23 | self.root = os.path.realpath(path) |
|
23 | self.root = os.path.realpath(path) | |
24 | self.path = os.path.join(self.root, ".hg") |
|
24 | self.path = os.path.join(self.root, ".hg") | |
25 | self.origroot = path |
|
25 | self.origroot = path | |
26 | self.opener = util.opener(self.path) |
|
26 | self.opener = util.opener(self.path) | |
27 | self.wopener = util.opener(self.root) |
|
27 | self.wopener = util.opener(self.root) | |
28 |
|
28 | |||
29 | if not os.path.isdir(self.path): |
|
29 | if not os.path.isdir(self.path): | |
30 | if create: |
|
30 | if create: | |
31 | if not os.path.exists(path): |
|
31 | if not os.path.exists(path): | |
32 | os.mkdir(path) |
|
32 | os.mkdir(path) | |
33 | os.mkdir(self.path) |
|
33 | os.mkdir(self.path) | |
34 | requirements = ["revlogv1"] |
|
34 | requirements = ["revlogv1"] | |
35 | if parentui.configbool('format', 'usestore', True): |
|
35 | if parentui.configbool('format', 'usestore', True): | |
36 | os.mkdir(os.path.join(self.path, "store")) |
|
36 | os.mkdir(os.path.join(self.path, "store")) | |
37 | requirements.append("store") |
|
37 | requirements.append("store") | |
38 | if parentui.configbool('format', 'usefncache', True): |
|
38 | if parentui.configbool('format', 'usefncache', True): | |
39 | requirements.append("fncache") |
|
39 | requirements.append("fncache") | |
40 | # create an invalid changelog |
|
40 | # create an invalid changelog | |
41 | self.opener("00changelog.i", "a").write( |
|
41 | self.opener("00changelog.i", "a").write( | |
42 | '\0\0\0\2' # represents revlogv2 |
|
42 | '\0\0\0\2' # represents revlogv2 | |
43 | ' dummy changelog to prevent using the old repo layout' |
|
43 | ' dummy changelog to prevent using the old repo layout' | |
44 | ) |
|
44 | ) | |
45 | reqfile = self.opener("requires", "w") |
|
45 | reqfile = self.opener("requires", "w") | |
46 | for r in requirements: |
|
46 | for r in requirements: | |
47 | reqfile.write("%s\n" % r) |
|
47 | reqfile.write("%s\n" % r) | |
48 | reqfile.close() |
|
48 | reqfile.close() | |
49 | else: |
|
49 | else: | |
50 | raise repo.RepoError(_("repository %s not found") % path) |
|
50 | raise repo.RepoError(_("repository %s not found") % path) | |
51 | elif create: |
|
51 | elif create: | |
52 | raise repo.RepoError(_("repository %s already exists") % path) |
|
52 | raise repo.RepoError(_("repository %s already exists") % path) | |
53 | else: |
|
53 | else: | |
54 | # find requirements |
|
54 | # find requirements | |
55 | requirements = [] |
|
55 | requirements = [] | |
56 | try: |
|
56 | try: | |
57 | requirements = self.opener("requires").read().splitlines() |
|
57 | requirements = self.opener("requires").read().splitlines() | |
58 | for r in requirements: |
|
58 | for r in requirements: | |
59 | if r not in self.supported: |
|
59 | if r not in self.supported: | |
60 | raise repo.RepoError(_("requirement '%s' not supported") % r) |
|
60 | raise repo.RepoError(_("requirement '%s' not supported") % r) | |
61 | except IOError, inst: |
|
61 | except IOError, inst: | |
62 | if inst.errno != errno.ENOENT: |
|
62 | if inst.errno != errno.ENOENT: | |
63 | raise |
|
63 | raise | |
64 |
|
64 | |||
65 | self.store = store.store(requirements, self.path, util.opener) |
|
65 | self.store = store.store(requirements, self.path, util.opener) | |
66 | self.spath = self.store.path |
|
66 | self.spath = self.store.path | |
67 | self.sopener = self.store.opener |
|
67 | self.sopener = self.store.opener | |
68 | self.sjoin = self.store.join |
|
68 | self.sjoin = self.store.join | |
69 | self.opener.createmode = self.store.createmode |
|
69 | self.opener.createmode = self.store.createmode | |
70 |
|
70 | |||
71 | self.ui = ui.ui(parentui=parentui) |
|
71 | self.ui = ui.ui(parentui=parentui) | |
72 | try: |
|
72 | try: | |
73 | self.ui.readconfig(self.join("hgrc"), self.root) |
|
73 | self.ui.readconfig(self.join("hgrc"), self.root) | |
74 | extensions.loadall(self.ui) |
|
74 | extensions.loadall(self.ui) | |
75 | except IOError: |
|
75 | except IOError: | |
76 | pass |
|
76 | pass | |
77 |
|
77 | |||
78 | self.tagscache = None |
|
78 | self.tagscache = None | |
79 | self._tagstypecache = None |
|
79 | self._tagstypecache = None | |
80 | self.branchcache = None |
|
80 | self.branchcache = None | |
81 | self._ubranchcache = None # UTF-8 version of branchcache |
|
81 | self._ubranchcache = None # UTF-8 version of branchcache | |
82 | self._branchcachetip = None |
|
82 | self._branchcachetip = None | |
83 | self.nodetagscache = None |
|
83 | self.nodetagscache = None | |
84 | self.filterpats = {} |
|
84 | self.filterpats = {} | |
85 | self._datafilters = {} |
|
85 | self._datafilters = {} | |
86 | self._transref = self._lockref = self._wlockref = None |
|
86 | self._transref = self._lockref = self._wlockref = None | |
87 |
|
87 | |||
88 | def __getattr__(self, name): |
|
88 | def __getattr__(self, name): | |
89 | if name == 'changelog': |
|
89 | if name == 'changelog': | |
90 | self.changelog = changelog.changelog(self.sopener) |
|
90 | self.changelog = changelog.changelog(self.sopener) | |
91 | self.sopener.defversion = self.changelog.version |
|
91 | self.sopener.defversion = self.changelog.version | |
92 | return self.changelog |
|
92 | return self.changelog | |
93 | if name == 'manifest': |
|
93 | if name == 'manifest': | |
94 | self.changelog |
|
94 | self.changelog | |
95 | self.manifest = manifest.manifest(self.sopener) |
|
95 | self.manifest = manifest.manifest(self.sopener) | |
96 | return self.manifest |
|
96 | return self.manifest | |
97 | if name == 'dirstate': |
|
97 | if name == 'dirstate': | |
98 | self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root) |
|
98 | self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root) | |
99 | return self.dirstate |
|
99 | return self.dirstate | |
100 | else: |
|
100 | else: | |
101 | raise AttributeError(name) |
|
101 | raise AttributeError(name) | |
102 |
|
102 | |||
103 | def __getitem__(self, changeid): |
|
103 | def __getitem__(self, changeid): | |
104 | if changeid == None: |
|
104 | if changeid == None: | |
105 | return context.workingctx(self) |
|
105 | return context.workingctx(self) | |
106 | return context.changectx(self, changeid) |
|
106 | return context.changectx(self, changeid) | |
107 |
|
107 | |||
108 | def __nonzero__(self): |
|
108 | def __nonzero__(self): | |
109 | return True |
|
109 | return True | |
110 |
|
110 | |||
111 | def __len__(self): |
|
111 | def __len__(self): | |
112 | return len(self.changelog) |
|
112 | return len(self.changelog) | |
113 |
|
113 | |||
114 | def __iter__(self): |
|
114 | def __iter__(self): | |
115 | for i in xrange(len(self)): |
|
115 | for i in xrange(len(self)): | |
116 | yield i |
|
116 | yield i | |
117 |
|
117 | |||
118 | def url(self): |
|
118 | def url(self): | |
119 | return 'file:' + self.root |
|
119 | return 'file:' + self.root | |
120 |
|
120 | |||
121 | def hook(self, name, throw=False, **args): |
|
121 | def hook(self, name, throw=False, **args): | |
122 | return hook.hook(self.ui, self, name, throw, **args) |
|
122 | return hook.hook(self.ui, self, name, throw, **args) | |
123 |
|
123 | |||
124 | tag_disallowed = ':\r\n' |
|
124 | tag_disallowed = ':\r\n' | |
125 |
|
125 | |||
126 | def _tag(self, names, node, message, local, user, date, parent=None, |
|
126 | def _tag(self, names, node, message, local, user, date, parent=None, | |
127 | extra={}): |
|
127 | extra={}): | |
128 | use_dirstate = parent is None |
|
128 | use_dirstate = parent is None | |
129 |
|
129 | |||
130 | if isinstance(names, str): |
|
130 | if isinstance(names, str): | |
131 | allchars = names |
|
131 | allchars = names | |
132 | names = (names,) |
|
132 | names = (names,) | |
133 | else: |
|
133 | else: | |
134 | allchars = ''.join(names) |
|
134 | allchars = ''.join(names) | |
135 | for c in self.tag_disallowed: |
|
135 | for c in self.tag_disallowed: | |
136 | if c in allchars: |
|
136 | if c in allchars: | |
137 | raise util.Abort(_('%r cannot be used in a tag name') % c) |
|
137 | raise util.Abort(_('%r cannot be used in a tag name') % c) | |
138 |
|
138 | |||
139 | for name in names: |
|
139 | for name in names: | |
140 | self.hook('pretag', throw=True, node=hex(node), tag=name, |
|
140 | self.hook('pretag', throw=True, node=hex(node), tag=name, | |
141 | local=local) |
|
141 | local=local) | |
142 |
|
142 | |||
143 | def writetags(fp, names, munge, prevtags): |
|
143 | def writetags(fp, names, munge, prevtags): | |
144 | fp.seek(0, 2) |
|
144 | fp.seek(0, 2) | |
145 | if prevtags and prevtags[-1] != '\n': |
|
145 | if prevtags and prevtags[-1] != '\n': | |
146 | fp.write('\n') |
|
146 | fp.write('\n') | |
147 | for name in names: |
|
147 | for name in names: | |
148 | m = munge and munge(name) or name |
|
148 | m = munge and munge(name) or name | |
149 | if self._tagstypecache and name in self._tagstypecache: |
|
149 | if self._tagstypecache and name in self._tagstypecache: | |
150 | old = self.tagscache.get(name, nullid) |
|
150 | old = self.tagscache.get(name, nullid) | |
151 | fp.write('%s %s\n' % (hex(old), m)) |
|
151 | fp.write('%s %s\n' % (hex(old), m)) | |
152 | fp.write('%s %s\n' % (hex(node), m)) |
|
152 | fp.write('%s %s\n' % (hex(node), m)) | |
153 | fp.close() |
|
153 | fp.close() | |
154 |
|
154 | |||
155 | prevtags = '' |
|
155 | prevtags = '' | |
156 | if local: |
|
156 | if local: | |
157 | try: |
|
157 | try: | |
158 | fp = self.opener('localtags', 'r+') |
|
158 | fp = self.opener('localtags', 'r+') | |
159 | except IOError, err: |
|
159 | except IOError, err: | |
160 | fp = self.opener('localtags', 'a') |
|
160 | fp = self.opener('localtags', 'a') | |
161 | else: |
|
161 | else: | |
162 | prevtags = fp.read() |
|
162 | prevtags = fp.read() | |
163 |
|
163 | |||
164 | # local tags are stored in the current charset |
|
164 | # local tags are stored in the current charset | |
165 | writetags(fp, names, None, prevtags) |
|
165 | writetags(fp, names, None, prevtags) | |
166 | for name in names: |
|
166 | for name in names: | |
167 | self.hook('tag', node=hex(node), tag=name, local=local) |
|
167 | self.hook('tag', node=hex(node), tag=name, local=local) | |
168 | return |
|
168 | return | |
169 |
|
169 | |||
170 | if use_dirstate: |
|
170 | if use_dirstate: | |
171 | try: |
|
171 | try: | |
172 | fp = self.wfile('.hgtags', 'rb+') |
|
172 | fp = self.wfile('.hgtags', 'rb+') | |
173 | except IOError, err: |
|
173 | except IOError, err: | |
174 | fp = self.wfile('.hgtags', 'ab') |
|
174 | fp = self.wfile('.hgtags', 'ab') | |
175 | else: |
|
175 | else: | |
176 | prevtags = fp.read() |
|
176 | prevtags = fp.read() | |
177 | else: |
|
177 | else: | |
178 | try: |
|
178 | try: | |
179 | prevtags = self.filectx('.hgtags', parent).data() |
|
179 | prevtags = self.filectx('.hgtags', parent).data() | |
180 | except revlog.LookupError: |
|
180 | except revlog.LookupError: | |
181 | pass |
|
181 | pass | |
182 | fp = self.wfile('.hgtags', 'wb') |
|
182 | fp = self.wfile('.hgtags', 'wb') | |
183 | if prevtags: |
|
183 | if prevtags: | |
184 | fp.write(prevtags) |
|
184 | fp.write(prevtags) | |
185 |
|
185 | |||
186 | # committed tags are stored in UTF-8 |
|
186 | # committed tags are stored in UTF-8 | |
187 | writetags(fp, names, util.fromlocal, prevtags) |
|
187 | writetags(fp, names, util.fromlocal, prevtags) | |
188 |
|
188 | |||
189 | if use_dirstate and '.hgtags' not in self.dirstate: |
|
189 | if use_dirstate and '.hgtags' not in self.dirstate: | |
190 | self.add(['.hgtags']) |
|
190 | self.add(['.hgtags']) | |
191 |
|
191 | |||
192 | tagnode = self.commit(['.hgtags'], message, user, date, p1=parent, |
|
192 | tagnode = self.commit(['.hgtags'], message, user, date, p1=parent, | |
193 | extra=extra) |
|
193 | extra=extra) | |
194 |
|
194 | |||
195 | for name in names: |
|
195 | for name in names: | |
196 | self.hook('tag', node=hex(node), tag=name, local=local) |
|
196 | self.hook('tag', node=hex(node), tag=name, local=local) | |
197 |
|
197 | |||
198 | return tagnode |
|
198 | return tagnode | |
199 |
|
199 | |||
200 | def tag(self, names, node, message, local, user, date): |
|
200 | def tag(self, names, node, message, local, user, date): | |
201 | '''tag a revision with one or more symbolic names. |
|
201 | '''tag a revision with one or more symbolic names. | |
202 |
|
202 | |||
203 | names is a list of strings or, when adding a single tag, names may be a |
|
203 | names is a list of strings or, when adding a single tag, names may be a | |
204 | string. |
|
204 | string. | |
205 |
|
205 | |||
206 | if local is True, the tags are stored in a per-repository file. |
|
206 | if local is True, the tags are stored in a per-repository file. | |
207 | otherwise, they are stored in the .hgtags file, and a new |
|
207 | otherwise, they are stored in the .hgtags file, and a new | |
208 | changeset is committed with the change. |
|
208 | changeset is committed with the change. | |
209 |
|
209 | |||
210 | keyword arguments: |
|
210 | keyword arguments: | |
211 |
|
211 | |||
212 | local: whether to store tags in non-version-controlled file |
|
212 | local: whether to store tags in non-version-controlled file | |
213 | (default False) |
|
213 | (default False) | |
214 |
|
214 | |||
215 | message: commit message to use if committing |
|
215 | message: commit message to use if committing | |
216 |
|
216 | |||
217 | user: name of user to use if committing |
|
217 | user: name of user to use if committing | |
218 |
|
218 | |||
219 | date: date tuple to use if committing''' |
|
219 | date: date tuple to use if committing''' | |
220 |
|
220 | |||
221 | for x in self.status()[:5]: |
|
221 | for x in self.status()[:5]: | |
222 | if '.hgtags' in x: |
|
222 | if '.hgtags' in x: | |
223 | raise util.Abort(_('working copy of .hgtags is changed ' |
|
223 | raise util.Abort(_('working copy of .hgtags is changed ' | |
224 | '(please commit .hgtags manually)')) |
|
224 | '(please commit .hgtags manually)')) | |
225 |
|
225 | |||
226 | self._tag(names, node, message, local, user, date) |
|
226 | self._tag(names, node, message, local, user, date) | |
227 |
|
227 | |||
228 | def tags(self): |
|
228 | def tags(self): | |
229 | '''return a mapping of tag to node''' |
|
229 | '''return a mapping of tag to node''' | |
230 | if self.tagscache: |
|
230 | if self.tagscache: | |
231 | return self.tagscache |
|
231 | return self.tagscache | |
232 |
|
232 | |||
233 | globaltags = {} |
|
233 | globaltags = {} | |
234 | tagtypes = {} |
|
234 | tagtypes = {} | |
235 |
|
235 | |||
236 | def readtags(lines, fn, tagtype): |
|
236 | def readtags(lines, fn, tagtype): | |
237 | filetags = {} |
|
237 | filetags = {} | |
238 | count = 0 |
|
238 | count = 0 | |
239 |
|
239 | |||
240 | def warn(msg): |
|
240 | def warn(msg): | |
241 | self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg)) |
|
241 | self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg)) | |
242 |
|
242 | |||
243 | for l in lines: |
|
243 | for l in lines: | |
244 | count += 1 |
|
244 | count += 1 | |
245 | if not l: |
|
245 | if not l: | |
246 | continue |
|
246 | continue | |
247 | s = l.split(" ", 1) |
|
247 | s = l.split(" ", 1) | |
248 | if len(s) != 2: |
|
248 | if len(s) != 2: | |
249 | warn(_("cannot parse entry")) |
|
249 | warn(_("cannot parse entry")) | |
250 | continue |
|
250 | continue | |
251 | node, key = s |
|
251 | node, key = s | |
252 | key = util.tolocal(key.strip()) # stored in UTF-8 |
|
252 | key = util.tolocal(key.strip()) # stored in UTF-8 | |
253 | try: |
|
253 | try: | |
254 | bin_n = bin(node) |
|
254 | bin_n = bin(node) | |
255 | except TypeError: |
|
255 | except TypeError: | |
256 | warn(_("node '%s' is not well formed") % node) |
|
256 | warn(_("node '%s' is not well formed") % node) | |
257 | continue |
|
257 | continue | |
258 | if bin_n not in self.changelog.nodemap: |
|
258 | if bin_n not in self.changelog.nodemap: | |
259 | warn(_("tag '%s' refers to unknown node") % key) |
|
259 | warn(_("tag '%s' refers to unknown node") % key) | |
260 | continue |
|
260 | continue | |
261 |
|
261 | |||
262 | h = [] |
|
262 | h = [] | |
263 | if key in filetags: |
|
263 | if key in filetags: | |
264 | n, h = filetags[key] |
|
264 | n, h = filetags[key] | |
265 | h.append(n) |
|
265 | h.append(n) | |
266 | filetags[key] = (bin_n, h) |
|
266 | filetags[key] = (bin_n, h) | |
267 |
|
267 | |||
268 | for k, nh in filetags.items(): |
|
268 | for k, nh in filetags.items(): | |
269 | if k not in globaltags: |
|
269 | if k not in globaltags: | |
270 | globaltags[k] = nh |
|
270 | globaltags[k] = nh | |
271 | tagtypes[k] = tagtype |
|
271 | tagtypes[k] = tagtype | |
272 | continue |
|
272 | continue | |
273 |
|
273 | |||
274 | # we prefer the global tag if: |
|
274 | # we prefer the global tag if: | |
275 | # it supercedes us OR |
|
275 | # it supercedes us OR | |
276 | # mutual supercedes and it has a higher rank |
|
276 | # mutual supercedes and it has a higher rank | |
277 | # otherwise we win because we're tip-most |
|
277 | # otherwise we win because we're tip-most | |
278 | an, ah = nh |
|
278 | an, ah = nh | |
279 | bn, bh = globaltags[k] |
|
279 | bn, bh = globaltags[k] | |
280 | if (bn != an and an in bh and |
|
280 | if (bn != an and an in bh and | |
281 | (bn not in ah or len(bh) > len(ah))): |
|
281 | (bn not in ah or len(bh) > len(ah))): | |
282 | an = bn |
|
282 | an = bn | |
283 | ah.extend([n for n in bh if n not in ah]) |
|
283 | ah.extend([n for n in bh if n not in ah]) | |
284 | globaltags[k] = an, ah |
|
284 | globaltags[k] = an, ah | |
285 | tagtypes[k] = tagtype |
|
285 | tagtypes[k] = tagtype | |
286 |
|
286 | |||
287 | # read the tags file from each head, ending with the tip |
|
287 | # read the tags file from each head, ending with the tip | |
288 | f = None |
|
288 | f = None | |
289 | for rev, node, fnode in self._hgtagsnodes(): |
|
289 | for rev, node, fnode in self._hgtagsnodes(): | |
290 | f = (f and f.filectx(fnode) or |
|
290 | f = (f and f.filectx(fnode) or | |
291 | self.filectx('.hgtags', fileid=fnode)) |
|
291 | self.filectx('.hgtags', fileid=fnode)) | |
292 | readtags(f.data().splitlines(), f, "global") |
|
292 | readtags(f.data().splitlines(), f, "global") | |
293 |
|
293 | |||
294 | try: |
|
294 | try: | |
295 | data = util.fromlocal(self.opener("localtags").read()) |
|
295 | data = util.fromlocal(self.opener("localtags").read()) | |
296 | # localtags are stored in the local character set |
|
296 | # localtags are stored in the local character set | |
297 | # while the internal tag table is stored in UTF-8 |
|
297 | # while the internal tag table is stored in UTF-8 | |
298 | readtags(data.splitlines(), "localtags", "local") |
|
298 | readtags(data.splitlines(), "localtags", "local") | |
299 | except IOError: |
|
299 | except IOError: | |
300 | pass |
|
300 | pass | |
301 |
|
301 | |||
302 | self.tagscache = {} |
|
302 | self.tagscache = {} | |
303 | self._tagstypecache = {} |
|
303 | self._tagstypecache = {} | |
304 | for k,nh in globaltags.items(): |
|
304 | for k,nh in globaltags.items(): | |
305 | n = nh[0] |
|
305 | n = nh[0] | |
306 | if n != nullid: |
|
306 | if n != nullid: | |
307 | self.tagscache[k] = n |
|
307 | self.tagscache[k] = n | |
308 | self._tagstypecache[k] = tagtypes[k] |
|
308 | self._tagstypecache[k] = tagtypes[k] | |
309 | self.tagscache['tip'] = self.changelog.tip() |
|
309 | self.tagscache['tip'] = self.changelog.tip() | |
310 | return self.tagscache |
|
310 | return self.tagscache | |
311 |
|
311 | |||
312 | def tagtype(self, tagname): |
|
312 | def tagtype(self, tagname): | |
313 | ''' |
|
313 | ''' | |
314 | return the type of the given tag. result can be: |
|
314 | return the type of the given tag. result can be: | |
315 |
|
315 | |||
316 | 'local' : a local tag |
|
316 | 'local' : a local tag | |
317 | 'global' : a global tag |
|
317 | 'global' : a global tag | |
318 | None : tag does not exist |
|
318 | None : tag does not exist | |
319 | ''' |
|
319 | ''' | |
320 |
|
320 | |||
321 | self.tags() |
|
321 | self.tags() | |
322 |
|
322 | |||
323 | return self._tagstypecache.get(tagname) |
|
323 | return self._tagstypecache.get(tagname) | |
324 |
|
324 | |||
325 | def _hgtagsnodes(self): |
|
325 | def _hgtagsnodes(self): | |
326 | heads = self.heads() |
|
326 | heads = self.heads() | |
327 | heads.reverse() |
|
327 | heads.reverse() | |
328 | last = {} |
|
328 | last = {} | |
329 | ret = [] |
|
329 | ret = [] | |
330 | for node in heads: |
|
330 | for node in heads: | |
331 | c = self[node] |
|
331 | c = self[node] | |
332 | rev = c.rev() |
|
332 | rev = c.rev() | |
333 | try: |
|
333 | try: | |
334 | fnode = c.filenode('.hgtags') |
|
334 | fnode = c.filenode('.hgtags') | |
335 | except revlog.LookupError: |
|
335 | except revlog.LookupError: | |
336 | continue |
|
336 | continue | |
337 | ret.append((rev, node, fnode)) |
|
337 | ret.append((rev, node, fnode)) | |
338 | if fnode in last: |
|
338 | if fnode in last: | |
339 | ret[last[fnode]] = None |
|
339 | ret[last[fnode]] = None | |
340 | last[fnode] = len(ret) - 1 |
|
340 | last[fnode] = len(ret) - 1 | |
341 | return [item for item in ret if item] |
|
341 | return [item for item in ret if item] | |
342 |
|
342 | |||
343 | def tagslist(self): |
|
343 | def tagslist(self): | |
344 | '''return a list of tags ordered by revision''' |
|
344 | '''return a list of tags ordered by revision''' | |
345 | l = [] |
|
345 | l = [] | |
346 | for t, n in self.tags().items(): |
|
346 | for t, n in self.tags().items(): | |
347 | try: |
|
347 | try: | |
348 | r = self.changelog.rev(n) |
|
348 | r = self.changelog.rev(n) | |
349 | except: |
|
349 | except: | |
350 | r = -2 # sort to the beginning of the list if unknown |
|
350 | r = -2 # sort to the beginning of the list if unknown | |
351 | l.append((r, t, n)) |
|
351 | l.append((r, t, n)) | |
352 | return [(t, n) for r, t, n in util.sort(l)] |
|
352 | return [(t, n) for r, t, n in util.sort(l)] | |
353 |
|
353 | |||
354 | def nodetags(self, node): |
|
354 | def nodetags(self, node): | |
355 | '''return the tags associated with a node''' |
|
355 | '''return the tags associated with a node''' | |
356 | if not self.nodetagscache: |
|
356 | if not self.nodetagscache: | |
357 | self.nodetagscache = {} |
|
357 | self.nodetagscache = {} | |
358 | for t, n in self.tags().items(): |
|
358 | for t, n in self.tags().items(): | |
359 | self.nodetagscache.setdefault(n, []).append(t) |
|
359 | self.nodetagscache.setdefault(n, []).append(t) | |
360 | return self.nodetagscache.get(node, []) |
|
360 | return self.nodetagscache.get(node, []) | |
361 |
|
361 | |||
362 | def _branchtags(self, partial, lrev): |
|
362 | def _branchtags(self, partial, lrev): | |
363 | tiprev = len(self) - 1 |
|
363 | tiprev = len(self) - 1 | |
364 | if lrev != tiprev: |
|
364 | if lrev != tiprev: | |
365 | self._updatebranchcache(partial, lrev+1, tiprev+1) |
|
365 | self._updatebranchcache(partial, lrev+1, tiprev+1) | |
366 | self._writebranchcache(partial, self.changelog.tip(), tiprev) |
|
366 | self._writebranchcache(partial, self.changelog.tip(), tiprev) | |
367 |
|
367 | |||
368 | return partial |
|
368 | return partial | |
369 |
|
369 | |||
370 | def branchtags(self): |
|
370 | def branchtags(self): | |
371 | tip = self.changelog.tip() |
|
371 | tip = self.changelog.tip() | |
372 | if self.branchcache is not None and self._branchcachetip == tip: |
|
372 | if self.branchcache is not None and self._branchcachetip == tip: | |
373 | return self.branchcache |
|
373 | return self.branchcache | |
374 |
|
374 | |||
375 | oldtip = self._branchcachetip |
|
375 | oldtip = self._branchcachetip | |
376 | self._branchcachetip = tip |
|
376 | self._branchcachetip = tip | |
377 | if self.branchcache is None: |
|
377 | if self.branchcache is None: | |
378 | self.branchcache = {} # avoid recursion in changectx |
|
378 | self.branchcache = {} # avoid recursion in changectx | |
379 | else: |
|
379 | else: | |
380 | self.branchcache.clear() # keep using the same dict |
|
380 | self.branchcache.clear() # keep using the same dict | |
381 | if oldtip is None or oldtip not in self.changelog.nodemap: |
|
381 | if oldtip is None or oldtip not in self.changelog.nodemap: | |
382 | partial, last, lrev = self._readbranchcache() |
|
382 | partial, last, lrev = self._readbranchcache() | |
383 | else: |
|
383 | else: | |
384 | lrev = self.changelog.rev(oldtip) |
|
384 | lrev = self.changelog.rev(oldtip) | |
385 | partial = self._ubranchcache |
|
385 | partial = self._ubranchcache | |
386 |
|
386 | |||
387 | self._branchtags(partial, lrev) |
|
387 | self._branchtags(partial, lrev) | |
388 |
|
388 | |||
389 | # the branch cache is stored on disk as UTF-8, but in the local |
|
389 | # the branch cache is stored on disk as UTF-8, but in the local | |
390 | # charset internally |
|
390 | # charset internally | |
391 | for k, v in partial.items(): |
|
391 | for k, v in partial.items(): | |
392 | self.branchcache[util.tolocal(k)] = v |
|
392 | self.branchcache[util.tolocal(k)] = v | |
393 | self._ubranchcache = partial |
|
393 | self._ubranchcache = partial | |
394 | return self.branchcache |
|
394 | return self.branchcache | |
395 |
|
395 | |||
396 | def _readbranchcache(self): |
|
396 | def _readbranchcache(self): | |
397 | partial = {} |
|
397 | partial = {} | |
398 | try: |
|
398 | try: | |
399 | f = self.opener("branch.cache") |
|
399 | f = self.opener("branch.cache") | |
400 | lines = f.read().split('\n') |
|
400 | lines = f.read().split('\n') | |
401 | f.close() |
|
401 | f.close() | |
402 | except (IOError, OSError): |
|
402 | except (IOError, OSError): | |
403 | return {}, nullid, nullrev |
|
403 | return {}, nullid, nullrev | |
404 |
|
404 | |||
405 | try: |
|
405 | try: | |
406 | last, lrev = lines.pop(0).split(" ", 1) |
|
406 | last, lrev = lines.pop(0).split(" ", 1) | |
407 | last, lrev = bin(last), int(lrev) |
|
407 | last, lrev = bin(last), int(lrev) | |
408 | if lrev >= len(self) or self[lrev].node() != last: |
|
408 | if lrev >= len(self) or self[lrev].node() != last: | |
409 | # invalidate the cache |
|
409 | # invalidate the cache | |
410 | raise ValueError('invalidating branch cache (tip differs)') |
|
410 | raise ValueError('invalidating branch cache (tip differs)') | |
411 | for l in lines: |
|
411 | for l in lines: | |
412 | if not l: continue |
|
412 | if not l: continue | |
413 | node, label = l.split(" ", 1) |
|
413 | node, label = l.split(" ", 1) | |
414 | partial[label.strip()] = bin(node) |
|
414 | partial[label.strip()] = bin(node) | |
415 | except (KeyboardInterrupt, util.SignalInterrupt): |
|
415 | except (KeyboardInterrupt, util.SignalInterrupt): | |
416 | raise |
|
416 | raise | |
417 | except Exception, inst: |
|
417 | except Exception, inst: | |
418 | if self.ui.debugflag: |
|
418 | if self.ui.debugflag: | |
419 | self.ui.warn(str(inst), '\n') |
|
419 | self.ui.warn(str(inst), '\n') | |
420 | partial, last, lrev = {}, nullid, nullrev |
|
420 | partial, last, lrev = {}, nullid, nullrev | |
421 | return partial, last, lrev |
|
421 | return partial, last, lrev | |
422 |
|
422 | |||
423 | def _writebranchcache(self, branches, tip, tiprev): |
|
423 | def _writebranchcache(self, branches, tip, tiprev): | |
424 | try: |
|
424 | try: | |
425 | f = self.opener("branch.cache", "w", atomictemp=True) |
|
425 | f = self.opener("branch.cache", "w", atomictemp=True) | |
426 | f.write("%s %s\n" % (hex(tip), tiprev)) |
|
426 | f.write("%s %s\n" % (hex(tip), tiprev)) | |
427 | for label, node in branches.iteritems(): |
|
427 | for label, node in branches.iteritems(): | |
428 | f.write("%s %s\n" % (hex(node), label)) |
|
428 | f.write("%s %s\n" % (hex(node), label)) | |
429 | f.rename() |
|
429 | f.rename() | |
430 | except (IOError, OSError): |
|
430 | except (IOError, OSError): | |
431 | pass |
|
431 | pass | |
432 |
|
432 | |||
433 | def _updatebranchcache(self, partial, start, end): |
|
433 | def _updatebranchcache(self, partial, start, end): | |
434 | for r in xrange(start, end): |
|
434 | for r in xrange(start, end): | |
435 | c = self[r] |
|
435 | c = self[r] | |
436 | b = c.branch() |
|
436 | b = c.branch() | |
437 | partial[b] = c.node() |
|
437 | partial[b] = c.node() | |
438 |
|
438 | |||
439 | def lookup(self, key): |
|
439 | def lookup(self, key): | |
440 | if isinstance(key, int): |
|
440 | if isinstance(key, int): | |
441 | return self.changelog.node(key) |
|
441 | return self.changelog.node(key) | |
442 | elif key == '.': |
|
442 | elif key == '.': | |
443 | return self.dirstate.parents()[0] |
|
443 | return self.dirstate.parents()[0] | |
444 | elif key == 'null': |
|
444 | elif key == 'null': | |
445 | return nullid |
|
445 | return nullid | |
446 | elif key == 'tip': |
|
446 | elif key == 'tip': | |
447 | return self.changelog.tip() |
|
447 | return self.changelog.tip() | |
448 | n = self.changelog._match(key) |
|
448 | n = self.changelog._match(key) | |
449 | if n: |
|
449 | if n: | |
450 | return n |
|
450 | return n | |
451 | if key in self.tags(): |
|
451 | if key in self.tags(): | |
452 | return self.tags()[key] |
|
452 | return self.tags()[key] | |
453 | if key in self.branchtags(): |
|
453 | if key in self.branchtags(): | |
454 | return self.branchtags()[key] |
|
454 | return self.branchtags()[key] | |
455 | n = self.changelog._partialmatch(key) |
|
455 | n = self.changelog._partialmatch(key) | |
456 | if n: |
|
456 | if n: | |
457 | return n |
|
457 | return n | |
458 | try: |
|
458 | try: | |
459 | if len(key) == 20: |
|
459 | if len(key) == 20: | |
460 | key = hex(key) |
|
460 | key = hex(key) | |
461 | except: |
|
461 | except: | |
462 | pass |
|
462 | pass | |
463 | raise repo.RepoError(_("unknown revision '%s'") % key) |
|
463 | raise repo.RepoError(_("unknown revision '%s'") % key) | |
464 |
|
464 | |||
465 | def local(self): |
|
465 | def local(self): | |
466 | return True |
|
466 | return True | |
467 |
|
467 | |||
468 | def join(self, f): |
|
468 | def join(self, f): | |
469 | return os.path.join(self.path, f) |
|
469 | return os.path.join(self.path, f) | |
470 |
|
470 | |||
471 | def wjoin(self, f): |
|
471 | def wjoin(self, f): | |
472 | return os.path.join(self.root, f) |
|
472 | return os.path.join(self.root, f) | |
473 |
|
473 | |||
474 | def rjoin(self, f): |
|
474 | def rjoin(self, f): | |
475 | return os.path.join(self.root, util.pconvert(f)) |
|
475 | return os.path.join(self.root, util.pconvert(f)) | |
476 |
|
476 | |||
477 | def file(self, f): |
|
477 | def file(self, f): | |
478 | if f[0] == '/': |
|
478 | if f[0] == '/': | |
479 | f = f[1:] |
|
479 | f = f[1:] | |
480 | return filelog.filelog(self.sopener, f) |
|
480 | return filelog.filelog(self.sopener, f) | |
481 |
|
481 | |||
482 | def changectx(self, changeid): |
|
482 | def changectx(self, changeid): | |
483 | return self[changeid] |
|
483 | return self[changeid] | |
484 |
|
484 | |||
485 | def parents(self, changeid=None): |
|
485 | def parents(self, changeid=None): | |
486 | '''get list of changectxs for parents of changeid''' |
|
486 | '''get list of changectxs for parents of changeid''' | |
487 | return self[changeid].parents() |
|
487 | return self[changeid].parents() | |
488 |
|
488 | |||
489 | def filectx(self, path, changeid=None, fileid=None): |
|
489 | def filectx(self, path, changeid=None, fileid=None): | |
490 | """changeid can be a changeset revision, node, or tag. |
|
490 | """changeid can be a changeset revision, node, or tag. | |
491 | fileid can be a file revision or node.""" |
|
491 | fileid can be a file revision or node.""" | |
492 | return context.filectx(self, path, changeid, fileid) |
|
492 | return context.filectx(self, path, changeid, fileid) | |
493 |
|
493 | |||
494 | def getcwd(self): |
|
494 | def getcwd(self): | |
495 | return self.dirstate.getcwd() |
|
495 | return self.dirstate.getcwd() | |
496 |
|
496 | |||
497 | def pathto(self, f, cwd=None): |
|
497 | def pathto(self, f, cwd=None): | |
498 | return self.dirstate.pathto(f, cwd) |
|
498 | return self.dirstate.pathto(f, cwd) | |
499 |
|
499 | |||
500 | def wfile(self, f, mode='r'): |
|
500 | def wfile(self, f, mode='r'): | |
501 | return self.wopener(f, mode) |
|
501 | return self.wopener(f, mode) | |
502 |
|
502 | |||
503 | def _link(self, f): |
|
503 | def _link(self, f): | |
504 | return os.path.islink(self.wjoin(f)) |
|
504 | return os.path.islink(self.wjoin(f)) | |
505 |
|
505 | |||
506 | def _filter(self, filter, filename, data): |
|
506 | def _filter(self, filter, filename, data): | |
507 | if filter not in self.filterpats: |
|
507 | if filter not in self.filterpats: | |
508 | l = [] |
|
508 | l = [] | |
509 | for pat, cmd in self.ui.configitems(filter): |
|
509 | for pat, cmd in self.ui.configitems(filter): | |
510 | if cmd == '!': |
|
510 | if cmd == '!': | |
511 | continue |
|
511 | continue | |
512 | mf = util.matcher(self.root, "", [pat], [], [])[1] |
|
512 | mf = util.matcher(self.root, "", [pat], [], [])[1] | |
513 | fn = None |
|
513 | fn = None | |
514 | params = cmd |
|
514 | params = cmd | |
515 | for name, filterfn in self._datafilters.iteritems(): |
|
515 | for name, filterfn in self._datafilters.iteritems(): | |
516 | if cmd.startswith(name): |
|
516 | if cmd.startswith(name): | |
517 | fn = filterfn |
|
517 | fn = filterfn | |
518 | params = cmd[len(name):].lstrip() |
|
518 | params = cmd[len(name):].lstrip() | |
519 | break |
|
519 | break | |
520 | if not fn: |
|
520 | if not fn: | |
521 | fn = lambda s, c, **kwargs: util.filter(s, c) |
|
521 | fn = lambda s, c, **kwargs: util.filter(s, c) | |
522 | # Wrap old filters not supporting keyword arguments |
|
522 | # Wrap old filters not supporting keyword arguments | |
523 | if not inspect.getargspec(fn)[2]: |
|
523 | if not inspect.getargspec(fn)[2]: | |
524 | oldfn = fn |
|
524 | oldfn = fn | |
525 | fn = lambda s, c, **kwargs: oldfn(s, c) |
|
525 | fn = lambda s, c, **kwargs: oldfn(s, c) | |
526 | l.append((mf, fn, params)) |
|
526 | l.append((mf, fn, params)) | |
527 | self.filterpats[filter] = l |
|
527 | self.filterpats[filter] = l | |
528 |
|
528 | |||
529 | for mf, fn, cmd in self.filterpats[filter]: |
|
529 | for mf, fn, cmd in self.filterpats[filter]: | |
530 | if mf(filename): |
|
530 | if mf(filename): | |
531 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) |
|
531 | self.ui.debug(_("filtering %s through %s\n") % (filename, cmd)) | |
532 | data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) |
|
532 | data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) | |
533 | break |
|
533 | break | |
534 |
|
534 | |||
535 | return data |
|
535 | return data | |
536 |
|
536 | |||
537 | def adddatafilter(self, name, filter): |
|
537 | def adddatafilter(self, name, filter): | |
538 | self._datafilters[name] = filter |
|
538 | self._datafilters[name] = filter | |
539 |
|
539 | |||
540 | def wread(self, filename): |
|
540 | def wread(self, filename): | |
541 | if self._link(filename): |
|
541 | if self._link(filename): | |
542 | data = os.readlink(self.wjoin(filename)) |
|
542 | data = os.readlink(self.wjoin(filename)) | |
543 | else: |
|
543 | else: | |
544 | data = self.wopener(filename, 'r').read() |
|
544 | data = self.wopener(filename, 'r').read() | |
545 | return self._filter("encode", filename, data) |
|
545 | return self._filter("encode", filename, data) | |
546 |
|
546 | |||
547 | def wwrite(self, filename, data, flags): |
|
547 | def wwrite(self, filename, data, flags): | |
548 | data = self._filter("decode", filename, data) |
|
548 | data = self._filter("decode", filename, data) | |
549 | try: |
|
549 | try: | |
550 | os.unlink(self.wjoin(filename)) |
|
550 | os.unlink(self.wjoin(filename)) | |
551 | except OSError: |
|
551 | except OSError: | |
552 | pass |
|
552 | pass | |
553 | if 'l' in flags: |
|
553 | if 'l' in flags: | |
554 | self.wopener.symlink(data, filename) |
|
554 | self.wopener.symlink(data, filename) | |
555 | else: |
|
555 | else: | |
556 | self.wopener(filename, 'w').write(data) |
|
556 | self.wopener(filename, 'w').write(data) | |
557 | if 'x' in flags: |
|
557 | if 'x' in flags: | |
558 | util.set_flags(self.wjoin(filename), False, True) |
|
558 | util.set_flags(self.wjoin(filename), False, True) | |
559 |
|
559 | |||
560 | def wwritedata(self, filename, data): |
|
560 | def wwritedata(self, filename, data): | |
561 | return self._filter("decode", filename, data) |
|
561 | return self._filter("decode", filename, data) | |
562 |
|
562 | |||
563 | def transaction(self): |
|
563 | def transaction(self): | |
564 | if self._transref and self._transref(): |
|
564 | if self._transref and self._transref(): | |
565 | return self._transref().nest() |
|
565 | return self._transref().nest() | |
566 |
|
566 | |||
567 | # abort here if the journal already exists |
|
567 | # abort here if the journal already exists | |
568 | if os.path.exists(self.sjoin("journal")): |
|
568 | if os.path.exists(self.sjoin("journal")): | |
569 | raise repo.RepoError(_("journal already exists - run hg recover")) |
|
569 | raise repo.RepoError(_("journal already exists - run hg recover")) | |
570 |
|
570 | |||
571 | # save dirstate for rollback |
|
571 | # save dirstate for rollback | |
572 | try: |
|
572 | try: | |
573 | ds = self.opener("dirstate").read() |
|
573 | ds = self.opener("dirstate").read() | |
574 | except IOError: |
|
574 | except IOError: | |
575 | ds = "" |
|
575 | ds = "" | |
576 | self.opener("journal.dirstate", "w").write(ds) |
|
576 | self.opener("journal.dirstate", "w").write(ds) | |
577 | self.opener("journal.branch", "w").write(self.dirstate.branch()) |
|
577 | self.opener("journal.branch", "w").write(self.dirstate.branch()) | |
578 |
|
578 | |||
579 | renames = [(self.sjoin("journal"), self.sjoin("undo")), |
|
579 | renames = [(self.sjoin("journal"), self.sjoin("undo")), | |
580 | (self.join("journal.dirstate"), self.join("undo.dirstate")), |
|
580 | (self.join("journal.dirstate"), self.join("undo.dirstate")), | |
581 | (self.join("journal.branch"), self.join("undo.branch"))] |
|
581 | (self.join("journal.branch"), self.join("undo.branch"))] | |
582 | tr = transaction.transaction(self.ui.warn, self.sopener, |
|
582 | tr = transaction.transaction(self.ui.warn, self.sopener, | |
583 | self.sjoin("journal"), |
|
583 | self.sjoin("journal"), | |
584 | aftertrans(renames), |
|
584 | aftertrans(renames), | |
585 | self.store.createmode) |
|
585 | self.store.createmode) | |
586 | self._transref = weakref.ref(tr) |
|
586 | self._transref = weakref.ref(tr) | |
587 | return tr |
|
587 | return tr | |
588 |
|
588 | |||
589 | def recover(self): |
|
589 | def recover(self): | |
590 | l = self.lock() |
|
590 | l = self.lock() | |
591 | try: |
|
591 | try: | |
592 | if os.path.exists(self.sjoin("journal")): |
|
592 | if os.path.exists(self.sjoin("journal")): | |
593 | self.ui.status(_("rolling back interrupted transaction\n")) |
|
593 | self.ui.status(_("rolling back interrupted transaction\n")) | |
594 | transaction.rollback(self.sopener, self.sjoin("journal")) |
|
594 | transaction.rollback(self.sopener, self.sjoin("journal")) | |
595 | self.invalidate() |
|
595 | self.invalidate() | |
596 | return True |
|
596 | return True | |
597 | else: |
|
597 | else: | |
598 | self.ui.warn(_("no interrupted transaction available\n")) |
|
598 | self.ui.warn(_("no interrupted transaction available\n")) | |
599 | return False |
|
599 | return False | |
600 | finally: |
|
600 | finally: | |
601 | del l |
|
601 | del l | |
602 |
|
602 | |||
603 | def rollback(self): |
|
603 | def rollback(self): | |
604 | wlock = lock = None |
|
604 | wlock = lock = None | |
605 | try: |
|
605 | try: | |
606 | wlock = self.wlock() |
|
606 | wlock = self.wlock() | |
607 | lock = self.lock() |
|
607 | lock = self.lock() | |
608 | if os.path.exists(self.sjoin("undo")): |
|
608 | if os.path.exists(self.sjoin("undo")): | |
609 | self.ui.status(_("rolling back last transaction\n")) |
|
609 | self.ui.status(_("rolling back last transaction\n")) | |
610 | transaction.rollback(self.sopener, self.sjoin("undo")) |
|
610 | transaction.rollback(self.sopener, self.sjoin("undo")) | |
611 | util.rename(self.join("undo.dirstate"), self.join("dirstate")) |
|
611 | util.rename(self.join("undo.dirstate"), self.join("dirstate")) | |
612 | try: |
|
612 | try: | |
613 | branch = self.opener("undo.branch").read() |
|
613 | branch = self.opener("undo.branch").read() | |
614 | self.dirstate.setbranch(branch) |
|
614 | self.dirstate.setbranch(branch) | |
615 | except IOError: |
|
615 | except IOError: | |
616 | self.ui.warn(_("Named branch could not be reset, " |
|
616 | self.ui.warn(_("Named branch could not be reset, " | |
617 | "current branch still is: %s\n") |
|
617 | "current branch still is: %s\n") | |
618 | % util.tolocal(self.dirstate.branch())) |
|
618 | % util.tolocal(self.dirstate.branch())) | |
619 | self.invalidate() |
|
619 | self.invalidate() | |
620 | self.dirstate.invalidate() |
|
620 | self.dirstate.invalidate() | |
621 | else: |
|
621 | else: | |
622 | self.ui.warn(_("no rollback information available\n")) |
|
622 | self.ui.warn(_("no rollback information available\n")) | |
623 | finally: |
|
623 | finally: | |
624 | del lock, wlock |
|
624 | del lock, wlock | |
625 |
|
625 | |||
626 | def invalidate(self): |
|
626 | def invalidate(self): | |
627 | for a in "changelog manifest".split(): |
|
627 | for a in "changelog manifest".split(): | |
628 | if a in self.__dict__: |
|
628 | if a in self.__dict__: | |
629 | delattr(self, a) |
|
629 | delattr(self, a) | |
630 | self.tagscache = None |
|
630 | self.tagscache = None | |
631 | self._tagstypecache = None |
|
631 | self._tagstypecache = None | |
632 | self.nodetagscache = None |
|
632 | self.nodetagscache = None | |
633 | self.branchcache = None |
|
633 | self.branchcache = None | |
634 | self._ubranchcache = None |
|
634 | self._ubranchcache = None | |
635 | self._branchcachetip = None |
|
635 | self._branchcachetip = None | |
636 |
|
636 | |||
637 | def _lock(self, lockname, wait, releasefn, acquirefn, desc): |
|
637 | def _lock(self, lockname, wait, releasefn, acquirefn, desc): | |
638 | try: |
|
638 | try: | |
639 | l = lock.lock(lockname, 0, releasefn, desc=desc) |
|
639 | l = lock.lock(lockname, 0, releasefn, desc=desc) | |
640 | except lock.LockHeld, inst: |
|
640 | except lock.LockHeld, inst: | |
641 | if not wait: |
|
641 | if not wait: | |
642 | raise |
|
642 | raise | |
643 | self.ui.warn(_("waiting for lock on %s held by %r\n") % |
|
643 | self.ui.warn(_("waiting for lock on %s held by %r\n") % | |
644 | (desc, inst.locker)) |
|
644 | (desc, inst.locker)) | |
645 | # default to 600 seconds timeout |
|
645 | # default to 600 seconds timeout | |
646 | l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")), |
|
646 | l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")), | |
647 | releasefn, desc=desc) |
|
647 | releasefn, desc=desc) | |
648 | if acquirefn: |
|
648 | if acquirefn: | |
649 | acquirefn() |
|
649 | acquirefn() | |
650 | return l |
|
650 | return l | |
651 |
|
651 | |||
652 | def lock(self, wait=True): |
|
652 | def lock(self, wait=True): | |
653 | if self._lockref and self._lockref(): |
|
653 | if self._lockref and self._lockref(): | |
654 | return self._lockref() |
|
654 | return self._lockref() | |
655 |
|
655 | |||
656 | l = self._lock(self.sjoin("lock"), wait, None, self.invalidate, |
|
656 | l = self._lock(self.sjoin("lock"), wait, None, self.invalidate, | |
657 | _('repository %s') % self.origroot) |
|
657 | _('repository %s') % self.origroot) | |
658 | self._lockref = weakref.ref(l) |
|
658 | self._lockref = weakref.ref(l) | |
659 | return l |
|
659 | return l | |
660 |
|
660 | |||
661 | def wlock(self, wait=True): |
|
661 | def wlock(self, wait=True): | |
662 | if self._wlockref and self._wlockref(): |
|
662 | if self._wlockref and self._wlockref(): | |
663 | return self._wlockref() |
|
663 | return self._wlockref() | |
664 |
|
664 | |||
665 | l = self._lock(self.join("wlock"), wait, self.dirstate.write, |
|
665 | l = self._lock(self.join("wlock"), wait, self.dirstate.write, | |
666 | self.dirstate.invalidate, _('working directory of %s') % |
|
666 | self.dirstate.invalidate, _('working directory of %s') % | |
667 | self.origroot) |
|
667 | self.origroot) | |
668 | self._wlockref = weakref.ref(l) |
|
668 | self._wlockref = weakref.ref(l) | |
669 | return l |
|
669 | return l | |
670 |
|
670 | |||
671 | def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist): |
|
671 | def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist): | |
672 | """ |
|
672 | """ | |
673 | commit an individual file as part of a larger transaction |
|
673 | commit an individual file as part of a larger transaction | |
674 | """ |
|
674 | """ | |
675 |
|
675 | |||
676 | fn = fctx.path() |
|
676 | fn = fctx.path() | |
677 | t = fctx.data() |
|
677 | t = fctx.data() | |
678 | fl = self.file(fn) |
|
678 | fl = self.file(fn) | |
679 | fp1 = manifest1.get(fn, nullid) |
|
679 | fp1 = manifest1.get(fn, nullid) | |
680 | fp2 = manifest2.get(fn, nullid) |
|
680 | fp2 = manifest2.get(fn, nullid) | |
681 |
|
681 | |||
682 | meta = {} |
|
682 | meta = {} | |
683 | cp = fctx.renamed() |
|
683 | cp = fctx.renamed() | |
684 | if cp and cp[0] != fn: |
|
684 | if cp and cp[0] != fn: | |
685 | # Mark the new revision of this file as a copy of another |
|
685 | # Mark the new revision of this file as a copy of another | |
686 | # file. This copy data will effectively act as a parent |
|
686 | # file. This copy data will effectively act as a parent | |
687 | # of this new revision. If this is a merge, the first |
|
687 | # of this new revision. If this is a merge, the first | |
688 | # parent will be the nullid (meaning "look up the copy data") |
|
688 | # parent will be the nullid (meaning "look up the copy data") | |
689 | # and the second one will be the other parent. For example: |
|
689 | # and the second one will be the other parent. For example: | |
690 | # |
|
690 | # | |
691 | # 0 --- 1 --- 3 rev1 changes file foo |
|
691 | # 0 --- 1 --- 3 rev1 changes file foo | |
692 | # \ / rev2 renames foo to bar and changes it |
|
692 | # \ / rev2 renames foo to bar and changes it | |
693 | # \- 2 -/ rev3 should have bar with all changes and |
|
693 | # \- 2 -/ rev3 should have bar with all changes and | |
694 | # should record that bar descends from |
|
694 | # should record that bar descends from | |
695 | # bar in rev2 and foo in rev1 |
|
695 | # bar in rev2 and foo in rev1 | |
696 | # |
|
696 | # | |
697 | # this allows this merge to succeed: |
|
697 | # this allows this merge to succeed: | |
698 | # |
|
698 | # | |
699 | # 0 --- 1 --- 3 rev4 reverts the content change from rev2 |
|
699 | # 0 --- 1 --- 3 rev4 reverts the content change from rev2 | |
700 | # \ / merging rev3 and rev4 should use bar@rev2 |
|
700 | # \ / merging rev3 and rev4 should use bar@rev2 | |
701 | # \- 2 --- 4 as the merge base |
|
701 | # \- 2 --- 4 as the merge base | |
702 | # |
|
702 | # | |
703 |
|
703 | |||
704 | cf = cp[0] |
|
704 | cf = cp[0] | |
705 | cr = manifest1.get(cf) |
|
705 | cr = manifest1.get(cf) | |
706 | nfp = fp2 |
|
706 | nfp = fp2 | |
707 |
|
707 | |||
708 | if manifest2: # branch merge |
|
708 | if manifest2: # branch merge | |
709 | if fp2 == nullid: # copied on remote side |
|
709 | if fp2 == nullid: # copied on remote side | |
710 | if fp1 != nullid or cf in manifest2: |
|
710 | if fp1 != nullid or cf in manifest2: | |
711 | cr = manifest2[cf] |
|
711 | cr = manifest2[cf] | |
712 | nfp = fp1 |
|
712 | nfp = fp1 | |
713 |
|
713 | |||
714 | # find source in nearest ancestor if we've lost track |
|
714 | # find source in nearest ancestor if we've lost track | |
715 | if not cr: |
|
715 | if not cr: | |
716 | self.ui.debug(_(" %s: searching for copy revision for %s\n") % |
|
716 | self.ui.debug(_(" %s: searching for copy revision for %s\n") % | |
717 | (fn, cf)) |
|
717 | (fn, cf)) | |
718 | for a in self['.'].ancestors(): |
|
718 | for a in self['.'].ancestors(): | |
719 | if cf in a: |
|
719 | if cf in a: | |
720 | cr = a[cf].filenode() |
|
720 | cr = a[cf].filenode() | |
721 | break |
|
721 | break | |
722 |
|
722 | |||
723 | self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr))) |
|
723 | self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr))) | |
724 | meta["copy"] = cf |
|
724 | meta["copy"] = cf | |
725 | meta["copyrev"] = hex(cr) |
|
725 | meta["copyrev"] = hex(cr) | |
726 | fp1, fp2 = nullid, nfp |
|
726 | fp1, fp2 = nullid, nfp | |
727 | elif fp2 != nullid: |
|
727 | elif fp2 != nullid: | |
728 | # is one parent an ancestor of the other? |
|
728 | # is one parent an ancestor of the other? | |
729 | fpa = fl.ancestor(fp1, fp2) |
|
729 | fpa = fl.ancestor(fp1, fp2) | |
730 | if fpa == fp1: |
|
730 | if fpa == fp1: | |
731 | fp1, fp2 = fp2, nullid |
|
731 | fp1, fp2 = fp2, nullid | |
732 | elif fpa == fp2: |
|
732 | elif fpa == fp2: | |
733 | fp2 = nullid |
|
733 | fp2 = nullid | |
734 |
|
734 | |||
735 | # is the file unmodified from the parent? report existing entry |
|
735 | # is the file unmodified from the parent? report existing entry | |
736 | if fp2 == nullid and not fl.cmp(fp1, t) and not meta: |
|
736 | if fp2 == nullid and not fl.cmp(fp1, t) and not meta: | |
737 | return fp1 |
|
737 | return fp1 | |
738 |
|
738 | |||
739 | changelist.append(fn) |
|
739 | changelist.append(fn) | |
740 | return fl.add(t, meta, tr, linkrev, fp1, fp2) |
|
740 | return fl.add(t, meta, tr, linkrev, fp1, fp2) | |
741 |
|
741 | |||
742 | def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}): |
|
742 | def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}): | |
743 | if p1 is None: |
|
743 | if p1 is None: | |
744 | p1, p2 = self.dirstate.parents() |
|
744 | p1, p2 = self.dirstate.parents() | |
745 | return self.commit(files=files, text=text, user=user, date=date, |
|
745 | return self.commit(files=files, text=text, user=user, date=date, | |
746 | p1=p1, p2=p2, extra=extra, empty_ok=True) |
|
746 | p1=p1, p2=p2, extra=extra, empty_ok=True) | |
747 |
|
747 | |||
748 | def commit(self, files=None, text="", user=None, date=None, |
|
748 | def commit(self, files=None, text="", user=None, date=None, | |
749 | match=None, force=False, force_editor=False, |
|
749 | match=None, force=False, force_editor=False, | |
750 | p1=None, p2=None, extra={}, empty_ok=False): |
|
750 | p1=None, p2=None, extra={}, empty_ok=False): | |
751 | wlock = lock = None |
|
751 | wlock = lock = None | |
752 | if files: |
|
752 | if files: | |
753 | files = util.unique(files) |
|
753 | files = util.unique(files) | |
754 | try: |
|
754 | try: | |
755 | wlock = self.wlock() |
|
755 | wlock = self.wlock() | |
756 | lock = self.lock() |
|
756 | lock = self.lock() | |
757 | use_dirstate = (p1 is None) # not rawcommit |
|
757 | use_dirstate = (p1 is None) # not rawcommit | |
758 |
|
758 | |||
759 | if use_dirstate: |
|
759 | if use_dirstate: | |
760 | p1, p2 = self.dirstate.parents() |
|
760 | p1, p2 = self.dirstate.parents() | |
761 | update_dirstate = True |
|
761 | update_dirstate = True | |
762 |
|
762 | |||
763 | if (not force and p2 != nullid and |
|
763 | if (not force and p2 != nullid and | |
764 | (match and (match.files() or match.anypats()))): |
|
764 | (match and (match.files() or match.anypats()))): | |
765 | raise util.Abort(_('cannot partially commit a merge ' |
|
765 | raise util.Abort(_('cannot partially commit a merge ' | |
766 | '(do not specify files or patterns)')) |
|
766 | '(do not specify files or patterns)')) | |
767 |
|
767 | |||
768 | if files: |
|
768 | if files: | |
769 | modified, removed = [], [] |
|
769 | modified, removed = [], [] | |
770 | for f in files: |
|
770 | for f in files: | |
771 | s = self.dirstate[f] |
|
771 | s = self.dirstate[f] | |
772 | if s in 'nma': |
|
772 | if s in 'nma': | |
773 | modified.append(f) |
|
773 | modified.append(f) | |
774 | elif s == 'r': |
|
774 | elif s == 'r': | |
775 | removed.append(f) |
|
775 | removed.append(f) | |
776 | else: |
|
776 | else: | |
777 | self.ui.warn(_("%s not tracked!\n") % f) |
|
777 | self.ui.warn(_("%s not tracked!\n") % f) | |
778 | changes = [modified, [], removed, [], []] |
|
778 | changes = [modified, [], removed, [], []] | |
779 | else: |
|
779 | else: | |
780 | changes = self.status(match=match) |
|
780 | changes = self.status(match=match) | |
781 | else: |
|
781 | else: | |
782 | p1, p2 = p1, p2 or nullid |
|
782 | p1, p2 = p1, p2 or nullid | |
783 | update_dirstate = (self.dirstate.parents()[0] == p1) |
|
783 | update_dirstate = (self.dirstate.parents()[0] == p1) | |
784 | changes = [files, [], [], [], []] |
|
784 | changes = [files, [], [], [], []] | |
785 |
|
785 | |||
786 | ms = merge_.mergestate(self) |
|
786 | ms = merge_.mergestate(self) | |
787 | for f in changes[0]: |
|
787 | for f in changes[0]: | |
788 | if f in ms and ms[f] == 'u': |
|
788 | if f in ms and ms[f] == 'u': | |
789 | raise util.Abort(_("unresolved merge conflicts " |
|
789 | raise util.Abort(_("unresolved merge conflicts " | |
790 | "(see hg resolve)")) |
|
790 | "(see hg resolve)")) | |
791 | wctx = context.workingctx(self, (p1, p2), text, user, date, |
|
791 | wctx = context.workingctx(self, (p1, p2), text, user, date, | |
792 | extra, changes) |
|
792 | extra, changes) | |
793 | return self._commitctx(wctx, force, force_editor, empty_ok, |
|
793 | return self._commitctx(wctx, force, force_editor, empty_ok, | |
794 | use_dirstate, update_dirstate) |
|
794 | use_dirstate, update_dirstate) | |
795 | finally: |
|
795 | finally: | |
796 | del lock, wlock |
|
796 | del lock, wlock | |
797 |
|
797 | |||
798 | def commitctx(self, ctx): |
|
798 | def commitctx(self, ctx): | |
799 | """Add a new revision to current repository. |
|
799 | """Add a new revision to current repository. | |
800 |
|
800 | |||
801 | Revision information is passed in the context.memctx argument. |
|
801 | Revision information is passed in the context.memctx argument. | |
802 | commitctx() does not touch the working directory. |
|
802 | commitctx() does not touch the working directory. | |
803 | """ |
|
803 | """ | |
804 | wlock = lock = None |
|
804 | wlock = lock = None | |
805 | try: |
|
805 | try: | |
806 | wlock = self.wlock() |
|
806 | wlock = self.wlock() | |
807 | lock = self.lock() |
|
807 | lock = self.lock() | |
808 | return self._commitctx(ctx, force=True, force_editor=False, |
|
808 | return self._commitctx(ctx, force=True, force_editor=False, | |
809 | empty_ok=True, use_dirstate=False, |
|
809 | empty_ok=True, use_dirstate=False, | |
810 | update_dirstate=False) |
|
810 | update_dirstate=False) | |
811 | finally: |
|
811 | finally: | |
812 | del lock, wlock |
|
812 | del lock, wlock | |
813 |
|
813 | |||
814 | def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False, |
|
814 | def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False, | |
815 | use_dirstate=True, update_dirstate=True): |
|
815 | use_dirstate=True, update_dirstate=True): | |
816 | tr = None |
|
816 | tr = None | |
817 | valid = 0 # don't save the dirstate if this isn't set |
|
817 | valid = 0 # don't save the dirstate if this isn't set | |
818 | try: |
|
818 | try: | |
819 | commit = util.sort(wctx.modified() + wctx.added()) |
|
819 | commit = util.sort(wctx.modified() + wctx.added()) | |
820 | remove = wctx.removed() |
|
820 | remove = wctx.removed() | |
821 | extra = wctx.extra().copy() |
|
821 | extra = wctx.extra().copy() | |
822 | branchname = extra['branch'] |
|
822 | branchname = extra['branch'] | |
823 | user = wctx.user() |
|
823 | user = wctx.user() | |
824 | text = wctx.description() |
|
824 | text = wctx.description() | |
825 |
|
825 | |||
826 | p1, p2 = [p.node() for p in wctx.parents()] |
|
826 | p1, p2 = [p.node() for p in wctx.parents()] | |
827 | c1 = self.changelog.read(p1) |
|
827 | c1 = self.changelog.read(p1) | |
828 | c2 = self.changelog.read(p2) |
|
828 | c2 = self.changelog.read(p2) | |
829 | m1 = self.manifest.read(c1[0]).copy() |
|
829 | m1 = self.manifest.read(c1[0]).copy() | |
830 | m2 = self.manifest.read(c2[0]) |
|
830 | m2 = self.manifest.read(c2[0]) | |
831 |
|
831 | |||
832 | if use_dirstate: |
|
832 | if use_dirstate: | |
833 | oldname = c1[5].get("branch") # stored in UTF-8 |
|
833 | oldname = c1[5].get("branch") # stored in UTF-8 | |
834 | if (not commit and not remove and not force and p2 == nullid |
|
834 | if (not commit and not remove and not force and p2 == nullid | |
835 | and branchname == oldname): |
|
835 | and branchname == oldname): | |
836 | self.ui.status(_("nothing changed\n")) |
|
836 | self.ui.status(_("nothing changed\n")) | |
837 | return None |
|
837 | return None | |
838 |
|
838 | |||
839 | xp1 = hex(p1) |
|
839 | xp1 = hex(p1) | |
840 | if p2 == nullid: xp2 = '' |
|
840 | if p2 == nullid: xp2 = '' | |
841 | else: xp2 = hex(p2) |
|
841 | else: xp2 = hex(p2) | |
842 |
|
842 | |||
843 | self.hook("precommit", throw=True, parent1=xp1, parent2=xp2) |
|
843 | self.hook("precommit", throw=True, parent1=xp1, parent2=xp2) | |
844 |
|
844 | |||
845 | tr = self.transaction() |
|
845 | tr = self.transaction() | |
846 | trp = weakref.proxy(tr) |
|
846 | trp = weakref.proxy(tr) | |
847 |
|
847 | |||
848 | # check in files |
|
848 | # check in files | |
849 | new = {} |
|
849 | new = {} | |
850 | changed = [] |
|
850 | changed = [] | |
851 | linkrev = len(self) |
|
851 | linkrev = len(self) | |
852 | for f in commit: |
|
852 | for f in commit: | |
853 | self.ui.note(f + "\n") |
|
853 | self.ui.note(f + "\n") | |
854 | try: |
|
854 | try: | |
855 | fctx = wctx.filectx(f) |
|
855 | fctx = wctx.filectx(f) | |
856 | newflags = fctx.flags() |
|
856 | newflags = fctx.flags() | |
857 | new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed) |
|
857 | new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed) | |
858 | if ((not changed or changed[-1] != f) and |
|
858 | if ((not changed or changed[-1] != f) and | |
859 | m2.get(f) != new[f]): |
|
859 | m2.get(f) != new[f]): | |
860 | # mention the file in the changelog if some |
|
860 | # mention the file in the changelog if some | |
861 | # flag changed, even if there was no content |
|
861 | # flag changed, even if there was no content | |
862 | # change. |
|
862 | # change. | |
863 | if m1.flags(f) != newflags: |
|
863 | if m1.flags(f) != newflags: | |
864 | changed.append(f) |
|
864 | changed.append(f) | |
865 | m1.set(f, newflags) |
|
865 | m1.set(f, newflags) | |
866 | if use_dirstate: |
|
866 | if use_dirstate: | |
867 | self.dirstate.normal(f) |
|
867 | self.dirstate.normal(f) | |
868 |
|
868 | |||
869 | except (OSError, IOError): |
|
869 | except (OSError, IOError): | |
870 | if use_dirstate: |
|
870 | if use_dirstate: | |
871 | self.ui.warn(_("trouble committing %s!\n") % f) |
|
871 | self.ui.warn(_("trouble committing %s!\n") % f) | |
872 | raise |
|
872 | raise | |
873 | else: |
|
873 | else: | |
874 | remove.append(f) |
|
874 | remove.append(f) | |
875 |
|
875 | |||
876 | updated, added = [], [] |
|
876 | updated, added = [], [] | |
877 | for f in util.sort(changed): |
|
877 | for f in util.sort(changed): | |
878 | if f in m1 or f in m2: |
|
878 | if f in m1 or f in m2: | |
879 | updated.append(f) |
|
879 | updated.append(f) | |
880 | else: |
|
880 | else: | |
881 | added.append(f) |
|
881 | added.append(f) | |
882 |
|
882 | |||
883 | # update manifest |
|
883 | # update manifest | |
884 | m1.update(new) |
|
884 | m1.update(new) | |
885 | removed = [] |
|
885 | removed = [] | |
886 |
|
886 | |||
887 | for f in util.sort(remove): |
|
887 | for f in util.sort(remove): | |
888 | if f in m1: |
|
888 | if f in m1: | |
889 | del m1[f] |
|
889 | del m1[f] | |
890 | removed.append(f) |
|
890 | removed.append(f) | |
891 | elif f in m2: |
|
891 | elif f in m2: | |
892 | removed.append(f) |
|
892 | removed.append(f) | |
893 | mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0], |
|
893 | mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0], | |
894 | (new, removed)) |
|
894 | (new, removed)) | |
895 |
|
895 | |||
896 | # add changeset |
|
896 | # add changeset | |
897 | if (not empty_ok and not text) or force_editor: |
|
897 | if (not empty_ok and not text) or force_editor: | |
898 | edittext = [] |
|
898 | edittext = [] | |
899 | if text: |
|
899 | if text: | |
900 | edittext.append(text) |
|
900 | edittext.append(text) | |
901 | edittext.append("") |
|
901 | edittext.append("") | |
902 | edittext.append("") # Empty line between message and comments. |
|
902 | edittext.append("") # Empty line between message and comments. | |
903 | edittext.append(_("HG: Enter commit message." |
|
903 | edittext.append(_("HG: Enter commit message." | |
904 | " Lines beginning with 'HG:' are removed.")) |
|
904 | " Lines beginning with 'HG:' are removed.")) | |
905 | edittext.append("HG: --") |
|
905 | edittext.append("HG: --") | |
906 | edittext.append("HG: user: %s" % user) |
|
906 | edittext.append("HG: user: %s" % user) | |
907 | if p2 != nullid: |
|
907 | if p2 != nullid: | |
908 | edittext.append("HG: branch merge") |
|
908 | edittext.append("HG: branch merge") | |
909 | if branchname: |
|
909 | if branchname: | |
910 | edittext.append("HG: branch '%s'" % util.tolocal(branchname)) |
|
910 | edittext.append("HG: branch '%s'" % util.tolocal(branchname)) | |
911 | edittext.extend(["HG: added %s" % f for f in added]) |
|
911 | edittext.extend(["HG: added %s" % f for f in added]) | |
912 | edittext.extend(["HG: changed %s" % f for f in updated]) |
|
912 | edittext.extend(["HG: changed %s" % f for f in updated]) | |
913 | edittext.extend(["HG: removed %s" % f for f in removed]) |
|
913 | edittext.extend(["HG: removed %s" % f for f in removed]) | |
914 | if not added and not updated and not removed: |
|
914 | if not added and not updated and not removed: | |
915 | edittext.append("HG: no files changed") |
|
915 | edittext.append("HG: no files changed") | |
916 | edittext.append("") |
|
916 | edittext.append("") | |
917 | # run editor in the repository root |
|
917 | # run editor in the repository root | |
918 | olddir = os.getcwd() |
|
918 | olddir = os.getcwd() | |
919 | os.chdir(self.root) |
|
919 | os.chdir(self.root) | |
920 | text = self.ui.edit("\n".join(edittext), user) |
|
920 | text = self.ui.edit("\n".join(edittext), user) | |
921 | os.chdir(olddir) |
|
921 | os.chdir(olddir) | |
922 |
|
922 | |||
923 | lines = [line.rstrip() for line in text.rstrip().splitlines()] |
|
923 | lines = [line.rstrip() for line in text.rstrip().splitlines()] | |
924 | while lines and not lines[0]: |
|
924 | while lines and not lines[0]: | |
925 | del lines[0] |
|
925 | del lines[0] | |
926 | if not lines and use_dirstate: |
|
926 | if not lines and use_dirstate: | |
927 | raise util.Abort(_("empty commit message")) |
|
927 | raise util.Abort(_("empty commit message")) | |
928 | text = '\n'.join(lines) |
|
928 | text = '\n'.join(lines) | |
929 |
|
929 | |||
930 | n = self.changelog.add(mn, changed + removed, text, trp, p1, p2, |
|
930 | n = self.changelog.add(mn, changed + removed, text, trp, p1, p2, | |
931 | user, wctx.date(), extra) |
|
931 | user, wctx.date(), extra) | |
932 | self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, |
|
932 | self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, | |
933 | parent2=xp2) |
|
933 | parent2=xp2) | |
934 | tr.close() |
|
934 | tr.close() | |
935 |
|
935 | |||
936 | if self.branchcache: |
|
936 | if self.branchcache: | |
937 | self.branchtags() |
|
937 | self.branchtags() | |
938 |
|
938 | |||
939 | if use_dirstate or update_dirstate: |
|
939 | if use_dirstate or update_dirstate: | |
940 | self.dirstate.setparents(n) |
|
940 | self.dirstate.setparents(n) | |
941 | if use_dirstate: |
|
941 | if use_dirstate: | |
942 | for f in removed: |
|
942 | for f in removed: | |
943 | self.dirstate.forget(f) |
|
943 | self.dirstate.forget(f) | |
944 | valid = 1 # our dirstate updates are complete |
|
944 | valid = 1 # our dirstate updates are complete | |
945 |
|
945 | |||
946 | self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2) |
|
946 | self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2) | |
947 | return n |
|
947 | return n | |
948 | finally: |
|
948 | finally: | |
949 | if not valid: # don't save our updated dirstate |
|
949 | if not valid: # don't save our updated dirstate | |
950 | self.dirstate.invalidate() |
|
950 | self.dirstate.invalidate() | |
951 | del tr |
|
951 | del tr | |
952 |
|
952 | |||
953 | def walk(self, match, node=None): |
|
953 | def walk(self, match, node=None): | |
954 | ''' |
|
954 | ''' | |
955 | walk recursively through the directory tree or a given |
|
955 | walk recursively through the directory tree or a given | |
956 | changeset, finding all files matched by the match |
|
956 | changeset, finding all files matched by the match | |
957 | function |
|
957 | function | |
958 | ''' |
|
958 | ''' | |
959 | return self[node].walk(match) |
|
959 | return self[node].walk(match) | |
960 |
|
960 | |||
961 | def status(self, node1='.', node2=None, match=None, |
|
961 | def status(self, node1='.', node2=None, match=None, | |
962 | ignored=False, clean=False, unknown=False): |
|
962 | ignored=False, clean=False, unknown=False): | |
963 | """return status of files between two nodes or node and working directory |
|
963 | """return status of files between two nodes or node and working directory | |
964 |
|
964 | |||
965 | If node1 is None, use the first dirstate parent instead. |
|
965 | If node1 is None, use the first dirstate parent instead. | |
966 | If node2 is None, compare node1 with working directory. |
|
966 | If node2 is None, compare node1 with working directory. | |
967 | """ |
|
967 | """ | |
968 |
|
968 | |||
969 | def mfmatches(ctx): |
|
969 | def mfmatches(ctx): | |
970 | mf = ctx.manifest().copy() |
|
970 | mf = ctx.manifest().copy() | |
971 | for fn in mf.keys(): |
|
971 | for fn in mf.keys(): | |
972 | if not match(fn): |
|
972 | if not match(fn): | |
973 | del mf[fn] |
|
973 | del mf[fn] | |
974 | return mf |
|
974 | return mf | |
975 |
|
975 | |||
976 | if isinstance(node1, context.changectx): |
|
976 | if isinstance(node1, context.changectx): | |
977 | ctx1 = node1 |
|
977 | ctx1 = node1 | |
978 | else: |
|
978 | else: | |
979 | ctx1 = self[node1] |
|
979 | ctx1 = self[node1] | |
980 | if isinstance(node2, context.changectx): |
|
980 | if isinstance(node2, context.changectx): | |
981 | ctx2 = node2 |
|
981 | ctx2 = node2 | |
982 | else: |
|
982 | else: | |
983 | ctx2 = self[node2] |
|
983 | ctx2 = self[node2] | |
984 |
|
984 | |||
985 |
working = ctx2 |
|
985 | working = ctx2.rev() is None | |
986 | parentworking = working and ctx1 == self['.'] |
|
986 | parentworking = working and ctx1 == self['.'] | |
987 | match = match or match_.always(self.root, self.getcwd()) |
|
987 | match = match or match_.always(self.root, self.getcwd()) | |
988 | listignored, listclean, listunknown = ignored, clean, unknown |
|
988 | listignored, listclean, listunknown = ignored, clean, unknown | |
989 |
|
989 | |||
990 | # load earliest manifest first for caching reasons |
|
990 | # load earliest manifest first for caching reasons | |
991 | if not working and ctx2.rev() < ctx1.rev(): |
|
991 | if not working and ctx2.rev() < ctx1.rev(): | |
992 | ctx2.manifest() |
|
992 | ctx2.manifest() | |
993 |
|
993 | |||
994 | if not parentworking: |
|
994 | if not parentworking: | |
995 | def bad(f, msg): |
|
995 | def bad(f, msg): | |
996 | if f not in ctx1: |
|
996 | if f not in ctx1: | |
997 | self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg)) |
|
997 | self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg)) | |
998 | return False |
|
998 | return False | |
999 | match.bad = bad |
|
999 | match.bad = bad | |
1000 |
|
1000 | |||
1001 | if working: # we need to scan the working dir |
|
1001 | if working: # we need to scan the working dir | |
1002 | s = self.dirstate.status(match, listignored, listclean, listunknown) |
|
1002 | s = self.dirstate.status(match, listignored, listclean, listunknown) | |
1003 | cmp, modified, added, removed, deleted, unknown, ignored, clean = s |
|
1003 | cmp, modified, added, removed, deleted, unknown, ignored, clean = s | |
1004 |
|
1004 | |||
1005 | # check for any possibly clean files |
|
1005 | # check for any possibly clean files | |
1006 | if parentworking and cmp: |
|
1006 | if parentworking and cmp: | |
1007 | fixup = [] |
|
1007 | fixup = [] | |
1008 | # do a full compare of any files that might have changed |
|
1008 | # do a full compare of any files that might have changed | |
1009 | for f in cmp: |
|
1009 | for f in cmp: | |
1010 | if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f) |
|
1010 | if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f) | |
1011 | or ctx1[f].cmp(ctx2[f].data())): |
|
1011 | or ctx1[f].cmp(ctx2[f].data())): | |
1012 | modified.append(f) |
|
1012 | modified.append(f) | |
1013 | else: |
|
1013 | else: | |
1014 | fixup.append(f) |
|
1014 | fixup.append(f) | |
1015 |
|
1015 | |||
1016 | if listclean: |
|
1016 | if listclean: | |
1017 | clean += fixup |
|
1017 | clean += fixup | |
1018 |
|
1018 | |||
1019 | # update dirstate for files that are actually clean |
|
1019 | # update dirstate for files that are actually clean | |
1020 | if fixup: |
|
1020 | if fixup: | |
1021 | wlock = None |
|
1021 | wlock = None | |
1022 | try: |
|
1022 | try: | |
1023 | try: |
|
1023 | try: | |
1024 | wlock = self.wlock(False) |
|
1024 | wlock = self.wlock(False) | |
1025 | for f in fixup: |
|
1025 | for f in fixup: | |
1026 | self.dirstate.normal(f) |
|
1026 | self.dirstate.normal(f) | |
1027 | except lock.LockException: |
|
1027 | except lock.LockException: | |
1028 | pass |
|
1028 | pass | |
1029 | finally: |
|
1029 | finally: | |
1030 | del wlock |
|
1030 | del wlock | |
1031 |
|
1031 | |||
1032 | if not parentworking: |
|
1032 | if not parentworking: | |
1033 | mf1 = mfmatches(ctx1) |
|
1033 | mf1 = mfmatches(ctx1) | |
1034 | if working: |
|
1034 | if working: | |
1035 | # we are comparing working dir against non-parent |
|
1035 | # we are comparing working dir against non-parent | |
1036 | # generate a pseudo-manifest for the working dir |
|
1036 | # generate a pseudo-manifest for the working dir | |
1037 | mf2 = mfmatches(self['.']) |
|
1037 | mf2 = mfmatches(self['.']) | |
1038 | for f in cmp + modified + added: |
|
1038 | for f in cmp + modified + added: | |
1039 | mf2[f] = None |
|
1039 | mf2[f] = None | |
1040 | mf2.set(f, ctx2.flags(f)) |
|
1040 | mf2.set(f, ctx2.flags(f)) | |
1041 | for f in removed: |
|
1041 | for f in removed: | |
1042 | if f in mf2: |
|
1042 | if f in mf2: | |
1043 | del mf2[f] |
|
1043 | del mf2[f] | |
1044 | else: |
|
1044 | else: | |
1045 | # we are comparing two revisions |
|
1045 | # we are comparing two revisions | |
1046 | deleted, unknown, ignored = [], [], [] |
|
1046 | deleted, unknown, ignored = [], [], [] | |
1047 | mf2 = mfmatches(ctx2) |
|
1047 | mf2 = mfmatches(ctx2) | |
1048 |
|
1048 | |||
1049 | modified, added, clean = [], [], [] |
|
1049 | modified, added, clean = [], [], [] | |
1050 | for fn in mf2: |
|
1050 | for fn in mf2: | |
1051 | if fn in mf1: |
|
1051 | if fn in mf1: | |
1052 | if (mf1.flags(fn) != mf2.flags(fn) or |
|
1052 | if (mf1.flags(fn) != mf2.flags(fn) or | |
1053 | (mf1[fn] != mf2[fn] and |
|
1053 | (mf1[fn] != mf2[fn] and | |
1054 | (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))): |
|
1054 | (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))): | |
1055 | modified.append(fn) |
|
1055 | modified.append(fn) | |
1056 | elif listclean: |
|
1056 | elif listclean: | |
1057 | clean.append(fn) |
|
1057 | clean.append(fn) | |
1058 | del mf1[fn] |
|
1058 | del mf1[fn] | |
1059 | else: |
|
1059 | else: | |
1060 | added.append(fn) |
|
1060 | added.append(fn) | |
1061 | removed = mf1.keys() |
|
1061 | removed = mf1.keys() | |
1062 |
|
1062 | |||
1063 | r = modified, added, removed, deleted, unknown, ignored, clean |
|
1063 | r = modified, added, removed, deleted, unknown, ignored, clean | |
1064 | [l.sort() for l in r] |
|
1064 | [l.sort() for l in r] | |
1065 | return r |
|
1065 | return r | |
1066 |
|
1066 | |||
1067 | def add(self, list): |
|
1067 | def add(self, list): | |
1068 | wlock = self.wlock() |
|
1068 | wlock = self.wlock() | |
1069 | try: |
|
1069 | try: | |
1070 | rejected = [] |
|
1070 | rejected = [] | |
1071 | for f in list: |
|
1071 | for f in list: | |
1072 | p = self.wjoin(f) |
|
1072 | p = self.wjoin(f) | |
1073 | try: |
|
1073 | try: | |
1074 | st = os.lstat(p) |
|
1074 | st = os.lstat(p) | |
1075 | except: |
|
1075 | except: | |
1076 | self.ui.warn(_("%s does not exist!\n") % f) |
|
1076 | self.ui.warn(_("%s does not exist!\n") % f) | |
1077 | rejected.append(f) |
|
1077 | rejected.append(f) | |
1078 | continue |
|
1078 | continue | |
1079 | if st.st_size > 10000000: |
|
1079 | if st.st_size > 10000000: | |
1080 | self.ui.warn(_("%s: files over 10MB may cause memory and" |
|
1080 | self.ui.warn(_("%s: files over 10MB may cause memory and" | |
1081 | " performance problems\n" |
|
1081 | " performance problems\n" | |
1082 | "(use 'hg revert %s' to unadd the file)\n") |
|
1082 | "(use 'hg revert %s' to unadd the file)\n") | |
1083 | % (f, f)) |
|
1083 | % (f, f)) | |
1084 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
|
1084 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): | |
1085 | self.ui.warn(_("%s not added: only files and symlinks " |
|
1085 | self.ui.warn(_("%s not added: only files and symlinks " | |
1086 | "supported currently\n") % f) |
|
1086 | "supported currently\n") % f) | |
1087 | rejected.append(p) |
|
1087 | rejected.append(p) | |
1088 | elif self.dirstate[f] in 'amn': |
|
1088 | elif self.dirstate[f] in 'amn': | |
1089 | self.ui.warn(_("%s already tracked!\n") % f) |
|
1089 | self.ui.warn(_("%s already tracked!\n") % f) | |
1090 | elif self.dirstate[f] == 'r': |
|
1090 | elif self.dirstate[f] == 'r': | |
1091 | self.dirstate.normallookup(f) |
|
1091 | self.dirstate.normallookup(f) | |
1092 | else: |
|
1092 | else: | |
1093 | self.dirstate.add(f) |
|
1093 | self.dirstate.add(f) | |
1094 | return rejected |
|
1094 | return rejected | |
1095 | finally: |
|
1095 | finally: | |
1096 | del wlock |
|
1096 | del wlock | |
1097 |
|
1097 | |||
1098 | def forget(self, list): |
|
1098 | def forget(self, list): | |
1099 | wlock = self.wlock() |
|
1099 | wlock = self.wlock() | |
1100 | try: |
|
1100 | try: | |
1101 | for f in list: |
|
1101 | for f in list: | |
1102 | if self.dirstate[f] != 'a': |
|
1102 | if self.dirstate[f] != 'a': | |
1103 | self.ui.warn(_("%s not added!\n") % f) |
|
1103 | self.ui.warn(_("%s not added!\n") % f) | |
1104 | else: |
|
1104 | else: | |
1105 | self.dirstate.forget(f) |
|
1105 | self.dirstate.forget(f) | |
1106 | finally: |
|
1106 | finally: | |
1107 | del wlock |
|
1107 | del wlock | |
1108 |
|
1108 | |||
1109 | def remove(self, list, unlink=False): |
|
1109 | def remove(self, list, unlink=False): | |
1110 | wlock = None |
|
1110 | wlock = None | |
1111 | try: |
|
1111 | try: | |
1112 | if unlink: |
|
1112 | if unlink: | |
1113 | for f in list: |
|
1113 | for f in list: | |
1114 | try: |
|
1114 | try: | |
1115 | util.unlink(self.wjoin(f)) |
|
1115 | util.unlink(self.wjoin(f)) | |
1116 | except OSError, inst: |
|
1116 | except OSError, inst: | |
1117 | if inst.errno != errno.ENOENT: |
|
1117 | if inst.errno != errno.ENOENT: | |
1118 | raise |
|
1118 | raise | |
1119 | wlock = self.wlock() |
|
1119 | wlock = self.wlock() | |
1120 | for f in list: |
|
1120 | for f in list: | |
1121 | if unlink and os.path.exists(self.wjoin(f)): |
|
1121 | if unlink and os.path.exists(self.wjoin(f)): | |
1122 | self.ui.warn(_("%s still exists!\n") % f) |
|
1122 | self.ui.warn(_("%s still exists!\n") % f) | |
1123 | elif self.dirstate[f] == 'a': |
|
1123 | elif self.dirstate[f] == 'a': | |
1124 | self.dirstate.forget(f) |
|
1124 | self.dirstate.forget(f) | |
1125 | elif f not in self.dirstate: |
|
1125 | elif f not in self.dirstate: | |
1126 | self.ui.warn(_("%s not tracked!\n") % f) |
|
1126 | self.ui.warn(_("%s not tracked!\n") % f) | |
1127 | else: |
|
1127 | else: | |
1128 | self.dirstate.remove(f) |
|
1128 | self.dirstate.remove(f) | |
1129 | finally: |
|
1129 | finally: | |
1130 | del wlock |
|
1130 | del wlock | |
1131 |
|
1131 | |||
1132 | def undelete(self, list): |
|
1132 | def undelete(self, list): | |
1133 | wlock = None |
|
1133 | wlock = None | |
1134 | try: |
|
1134 | try: | |
1135 | manifests = [self.manifest.read(self.changelog.read(p)[0]) |
|
1135 | manifests = [self.manifest.read(self.changelog.read(p)[0]) | |
1136 | for p in self.dirstate.parents() if p != nullid] |
|
1136 | for p in self.dirstate.parents() if p != nullid] | |
1137 | wlock = self.wlock() |
|
1137 | wlock = self.wlock() | |
1138 | for f in list: |
|
1138 | for f in list: | |
1139 | if self.dirstate[f] != 'r': |
|
1139 | if self.dirstate[f] != 'r': | |
1140 | self.ui.warn(_("%s not removed!\n") % f) |
|
1140 | self.ui.warn(_("%s not removed!\n") % f) | |
1141 | else: |
|
1141 | else: | |
1142 | m = f in manifests[0] and manifests[0] or manifests[1] |
|
1142 | m = f in manifests[0] and manifests[0] or manifests[1] | |
1143 | t = self.file(f).read(m[f]) |
|
1143 | t = self.file(f).read(m[f]) | |
1144 | self.wwrite(f, t, m.flags(f)) |
|
1144 | self.wwrite(f, t, m.flags(f)) | |
1145 | self.dirstate.normal(f) |
|
1145 | self.dirstate.normal(f) | |
1146 | finally: |
|
1146 | finally: | |
1147 | del wlock |
|
1147 | del wlock | |
1148 |
|
1148 | |||
1149 | def copy(self, source, dest): |
|
1149 | def copy(self, source, dest): | |
1150 | wlock = None |
|
1150 | wlock = None | |
1151 | try: |
|
1151 | try: | |
1152 | p = self.wjoin(dest) |
|
1152 | p = self.wjoin(dest) | |
1153 | if not (os.path.exists(p) or os.path.islink(p)): |
|
1153 | if not (os.path.exists(p) or os.path.islink(p)): | |
1154 | self.ui.warn(_("%s does not exist!\n") % dest) |
|
1154 | self.ui.warn(_("%s does not exist!\n") % dest) | |
1155 | elif not (os.path.isfile(p) or os.path.islink(p)): |
|
1155 | elif not (os.path.isfile(p) or os.path.islink(p)): | |
1156 | self.ui.warn(_("copy failed: %s is not a file or a " |
|
1156 | self.ui.warn(_("copy failed: %s is not a file or a " | |
1157 | "symbolic link\n") % dest) |
|
1157 | "symbolic link\n") % dest) | |
1158 | else: |
|
1158 | else: | |
1159 | wlock = self.wlock() |
|
1159 | wlock = self.wlock() | |
1160 | if self.dirstate[dest] in '?r': |
|
1160 | if self.dirstate[dest] in '?r': | |
1161 | self.dirstate.add(dest) |
|
1161 | self.dirstate.add(dest) | |
1162 | self.dirstate.copy(source, dest) |
|
1162 | self.dirstate.copy(source, dest) | |
1163 | finally: |
|
1163 | finally: | |
1164 | del wlock |
|
1164 | del wlock | |
1165 |
|
1165 | |||
1166 | def heads(self, start=None): |
|
1166 | def heads(self, start=None): | |
1167 | heads = self.changelog.heads(start) |
|
1167 | heads = self.changelog.heads(start) | |
1168 | # sort the output in rev descending order |
|
1168 | # sort the output in rev descending order | |
1169 | heads = [(-self.changelog.rev(h), h) for h in heads] |
|
1169 | heads = [(-self.changelog.rev(h), h) for h in heads] | |
1170 | return [n for (r, n) in util.sort(heads)] |
|
1170 | return [n for (r, n) in util.sort(heads)] | |
1171 |
|
1171 | |||
1172 | def branchheads(self, branch=None, start=None): |
|
1172 | def branchheads(self, branch=None, start=None): | |
1173 | if branch is None: |
|
1173 | if branch is None: | |
1174 | branch = self[None].branch() |
|
1174 | branch = self[None].branch() | |
1175 | branches = self.branchtags() |
|
1175 | branches = self.branchtags() | |
1176 | if branch not in branches: |
|
1176 | if branch not in branches: | |
1177 | return [] |
|
1177 | return [] | |
1178 | # The basic algorithm is this: |
|
1178 | # The basic algorithm is this: | |
1179 | # |
|
1179 | # | |
1180 | # Start from the branch tip since there are no later revisions that can |
|
1180 | # Start from the branch tip since there are no later revisions that can | |
1181 | # possibly be in this branch, and the tip is a guaranteed head. |
|
1181 | # possibly be in this branch, and the tip is a guaranteed head. | |
1182 | # |
|
1182 | # | |
1183 | # Remember the tip's parents as the first ancestors, since these by |
|
1183 | # Remember the tip's parents as the first ancestors, since these by | |
1184 | # definition are not heads. |
|
1184 | # definition are not heads. | |
1185 | # |
|
1185 | # | |
1186 | # Step backwards from the brach tip through all the revisions. We are |
|
1186 | # Step backwards from the brach tip through all the revisions. We are | |
1187 | # guaranteed by the rules of Mercurial that we will now be visiting the |
|
1187 | # guaranteed by the rules of Mercurial that we will now be visiting the | |
1188 | # nodes in reverse topological order (children before parents). |
|
1188 | # nodes in reverse topological order (children before parents). | |
1189 | # |
|
1189 | # | |
1190 | # If a revision is one of the ancestors of a head then we can toss it |
|
1190 | # If a revision is one of the ancestors of a head then we can toss it | |
1191 | # out of the ancestors set (we've already found it and won't be |
|
1191 | # out of the ancestors set (we've already found it and won't be | |
1192 | # visiting it again) and put its parents in the ancestors set. |
|
1192 | # visiting it again) and put its parents in the ancestors set. | |
1193 | # |
|
1193 | # | |
1194 | # Otherwise, if a revision is in the branch it's another head, since it |
|
1194 | # Otherwise, if a revision is in the branch it's another head, since it | |
1195 | # wasn't in the ancestor list of an existing head. So add it to the |
|
1195 | # wasn't in the ancestor list of an existing head. So add it to the | |
1196 | # head list, and add its parents to the ancestor list. |
|
1196 | # head list, and add its parents to the ancestor list. | |
1197 | # |
|
1197 | # | |
1198 | # If it is not in the branch ignore it. |
|
1198 | # If it is not in the branch ignore it. | |
1199 | # |
|
1199 | # | |
1200 | # Once we have a list of heads, use nodesbetween to filter out all the |
|
1200 | # Once we have a list of heads, use nodesbetween to filter out all the | |
1201 | # heads that cannot be reached from startrev. There may be a more |
|
1201 | # heads that cannot be reached from startrev. There may be a more | |
1202 | # efficient way to do this as part of the previous algorithm. |
|
1202 | # efficient way to do this as part of the previous algorithm. | |
1203 |
|
1203 | |||
1204 | set = util.set |
|
1204 | set = util.set | |
1205 | heads = [self.changelog.rev(branches[branch])] |
|
1205 | heads = [self.changelog.rev(branches[branch])] | |
1206 | # Don't care if ancestors contains nullrev or not. |
|
1206 | # Don't care if ancestors contains nullrev or not. | |
1207 | ancestors = set(self.changelog.parentrevs(heads[0])) |
|
1207 | ancestors = set(self.changelog.parentrevs(heads[0])) | |
1208 | for rev in xrange(heads[0] - 1, nullrev, -1): |
|
1208 | for rev in xrange(heads[0] - 1, nullrev, -1): | |
1209 | if rev in ancestors: |
|
1209 | if rev in ancestors: | |
1210 | ancestors.update(self.changelog.parentrevs(rev)) |
|
1210 | ancestors.update(self.changelog.parentrevs(rev)) | |
1211 | ancestors.remove(rev) |
|
1211 | ancestors.remove(rev) | |
1212 | elif self[rev].branch() == branch: |
|
1212 | elif self[rev].branch() == branch: | |
1213 | heads.append(rev) |
|
1213 | heads.append(rev) | |
1214 | ancestors.update(self.changelog.parentrevs(rev)) |
|
1214 | ancestors.update(self.changelog.parentrevs(rev)) | |
1215 | heads = [self.changelog.node(rev) for rev in heads] |
|
1215 | heads = [self.changelog.node(rev) for rev in heads] | |
1216 | if start is not None: |
|
1216 | if start is not None: | |
1217 | heads = self.changelog.nodesbetween([start], heads)[2] |
|
1217 | heads = self.changelog.nodesbetween([start], heads)[2] | |
1218 | return heads |
|
1218 | return heads | |
1219 |
|
1219 | |||
1220 | def branches(self, nodes): |
|
1220 | def branches(self, nodes): | |
1221 | if not nodes: |
|
1221 | if not nodes: | |
1222 | nodes = [self.changelog.tip()] |
|
1222 | nodes = [self.changelog.tip()] | |
1223 | b = [] |
|
1223 | b = [] | |
1224 | for n in nodes: |
|
1224 | for n in nodes: | |
1225 | t = n |
|
1225 | t = n | |
1226 | while 1: |
|
1226 | while 1: | |
1227 | p = self.changelog.parents(n) |
|
1227 | p = self.changelog.parents(n) | |
1228 | if p[1] != nullid or p[0] == nullid: |
|
1228 | if p[1] != nullid or p[0] == nullid: | |
1229 | b.append((t, n, p[0], p[1])) |
|
1229 | b.append((t, n, p[0], p[1])) | |
1230 | break |
|
1230 | break | |
1231 | n = p[0] |
|
1231 | n = p[0] | |
1232 | return b |
|
1232 | return b | |
1233 |
|
1233 | |||
1234 | def between(self, pairs): |
|
1234 | def between(self, pairs): | |
1235 | r = [] |
|
1235 | r = [] | |
1236 |
|
1236 | |||
1237 | for top, bottom in pairs: |
|
1237 | for top, bottom in pairs: | |
1238 | n, l, i = top, [], 0 |
|
1238 | n, l, i = top, [], 0 | |
1239 | f = 1 |
|
1239 | f = 1 | |
1240 |
|
1240 | |||
1241 | while n != bottom: |
|
1241 | while n != bottom: | |
1242 | p = self.changelog.parents(n)[0] |
|
1242 | p = self.changelog.parents(n)[0] | |
1243 | if i == f: |
|
1243 | if i == f: | |
1244 | l.append(n) |
|
1244 | l.append(n) | |
1245 | f = f * 2 |
|
1245 | f = f * 2 | |
1246 | n = p |
|
1246 | n = p | |
1247 | i += 1 |
|
1247 | i += 1 | |
1248 |
|
1248 | |||
1249 | r.append(l) |
|
1249 | r.append(l) | |
1250 |
|
1250 | |||
1251 | return r |
|
1251 | return r | |
1252 |
|
1252 | |||
1253 | def findincoming(self, remote, base=None, heads=None, force=False): |
|
1253 | def findincoming(self, remote, base=None, heads=None, force=False): | |
1254 | """Return list of roots of the subsets of missing nodes from remote |
|
1254 | """Return list of roots of the subsets of missing nodes from remote | |
1255 |
|
1255 | |||
1256 | If base dict is specified, assume that these nodes and their parents |
|
1256 | If base dict is specified, assume that these nodes and their parents | |
1257 | exist on the remote side and that no child of a node of base exists |
|
1257 | exist on the remote side and that no child of a node of base exists | |
1258 | in both remote and self. |
|
1258 | in both remote and self. | |
1259 | Furthermore base will be updated to include the nodes that exists |
|
1259 | Furthermore base will be updated to include the nodes that exists | |
1260 | in self and remote but no children exists in self and remote. |
|
1260 | in self and remote but no children exists in self and remote. | |
1261 | If a list of heads is specified, return only nodes which are heads |
|
1261 | If a list of heads is specified, return only nodes which are heads | |
1262 | or ancestors of these heads. |
|
1262 | or ancestors of these heads. | |
1263 |
|
1263 | |||
1264 | All the ancestors of base are in self and in remote. |
|
1264 | All the ancestors of base are in self and in remote. | |
1265 | All the descendants of the list returned are missing in self. |
|
1265 | All the descendants of the list returned are missing in self. | |
1266 | (and so we know that the rest of the nodes are missing in remote, see |
|
1266 | (and so we know that the rest of the nodes are missing in remote, see | |
1267 | outgoing) |
|
1267 | outgoing) | |
1268 | """ |
|
1268 | """ | |
1269 | return self.findcommonincoming(remote, base, heads, force)[1] |
|
1269 | return self.findcommonincoming(remote, base, heads, force)[1] | |
1270 |
|
1270 | |||
1271 | def findcommonincoming(self, remote, base=None, heads=None, force=False): |
|
1271 | def findcommonincoming(self, remote, base=None, heads=None, force=False): | |
1272 | """Return a tuple (common, missing roots, heads) used to identify |
|
1272 | """Return a tuple (common, missing roots, heads) used to identify | |
1273 | missing nodes from remote. |
|
1273 | missing nodes from remote. | |
1274 |
|
1274 | |||
1275 | If base dict is specified, assume that these nodes and their parents |
|
1275 | If base dict is specified, assume that these nodes and their parents | |
1276 | exist on the remote side and that no child of a node of base exists |
|
1276 | exist on the remote side and that no child of a node of base exists | |
1277 | in both remote and self. |
|
1277 | in both remote and self. | |
1278 | Furthermore base will be updated to include the nodes that exists |
|
1278 | Furthermore base will be updated to include the nodes that exists | |
1279 | in self and remote but no children exists in self and remote. |
|
1279 | in self and remote but no children exists in self and remote. | |
1280 | If a list of heads is specified, return only nodes which are heads |
|
1280 | If a list of heads is specified, return only nodes which are heads | |
1281 | or ancestors of these heads. |
|
1281 | or ancestors of these heads. | |
1282 |
|
1282 | |||
1283 | All the ancestors of base are in self and in remote. |
|
1283 | All the ancestors of base are in self and in remote. | |
1284 | """ |
|
1284 | """ | |
1285 | m = self.changelog.nodemap |
|
1285 | m = self.changelog.nodemap | |
1286 | search = [] |
|
1286 | search = [] | |
1287 | fetch = {} |
|
1287 | fetch = {} | |
1288 | seen = {} |
|
1288 | seen = {} | |
1289 | seenbranch = {} |
|
1289 | seenbranch = {} | |
1290 | if base == None: |
|
1290 | if base == None: | |
1291 | base = {} |
|
1291 | base = {} | |
1292 |
|
1292 | |||
1293 | if not heads: |
|
1293 | if not heads: | |
1294 | heads = remote.heads() |
|
1294 | heads = remote.heads() | |
1295 |
|
1295 | |||
1296 | if self.changelog.tip() == nullid: |
|
1296 | if self.changelog.tip() == nullid: | |
1297 | base[nullid] = 1 |
|
1297 | base[nullid] = 1 | |
1298 | if heads != [nullid]: |
|
1298 | if heads != [nullid]: | |
1299 | return [nullid], [nullid], list(heads) |
|
1299 | return [nullid], [nullid], list(heads) | |
1300 | return [nullid], [], [] |
|
1300 | return [nullid], [], [] | |
1301 |
|
1301 | |||
1302 | # assume we're closer to the tip than the root |
|
1302 | # assume we're closer to the tip than the root | |
1303 | # and start by examining the heads |
|
1303 | # and start by examining the heads | |
1304 | self.ui.status(_("searching for changes\n")) |
|
1304 | self.ui.status(_("searching for changes\n")) | |
1305 |
|
1305 | |||
1306 | unknown = [] |
|
1306 | unknown = [] | |
1307 | for h in heads: |
|
1307 | for h in heads: | |
1308 | if h not in m: |
|
1308 | if h not in m: | |
1309 | unknown.append(h) |
|
1309 | unknown.append(h) | |
1310 | else: |
|
1310 | else: | |
1311 | base[h] = 1 |
|
1311 | base[h] = 1 | |
1312 |
|
1312 | |||
1313 | heads = unknown |
|
1313 | heads = unknown | |
1314 | if not unknown: |
|
1314 | if not unknown: | |
1315 | return base.keys(), [], [] |
|
1315 | return base.keys(), [], [] | |
1316 |
|
1316 | |||
1317 | req = dict.fromkeys(unknown) |
|
1317 | req = dict.fromkeys(unknown) | |
1318 | reqcnt = 0 |
|
1318 | reqcnt = 0 | |
1319 |
|
1319 | |||
1320 | # search through remote branches |
|
1320 | # search through remote branches | |
1321 | # a 'branch' here is a linear segment of history, with four parts: |
|
1321 | # a 'branch' here is a linear segment of history, with four parts: | |
1322 | # head, root, first parent, second parent |
|
1322 | # head, root, first parent, second parent | |
1323 | # (a branch always has two parents (or none) by definition) |
|
1323 | # (a branch always has two parents (or none) by definition) | |
1324 | unknown = remote.branches(unknown) |
|
1324 | unknown = remote.branches(unknown) | |
1325 | while unknown: |
|
1325 | while unknown: | |
1326 | r = [] |
|
1326 | r = [] | |
1327 | while unknown: |
|
1327 | while unknown: | |
1328 | n = unknown.pop(0) |
|
1328 | n = unknown.pop(0) | |
1329 | if n[0] in seen: |
|
1329 | if n[0] in seen: | |
1330 | continue |
|
1330 | continue | |
1331 |
|
1331 | |||
1332 | self.ui.debug(_("examining %s:%s\n") |
|
1332 | self.ui.debug(_("examining %s:%s\n") | |
1333 | % (short(n[0]), short(n[1]))) |
|
1333 | % (short(n[0]), short(n[1]))) | |
1334 | if n[0] == nullid: # found the end of the branch |
|
1334 | if n[0] == nullid: # found the end of the branch | |
1335 | pass |
|
1335 | pass | |
1336 | elif n in seenbranch: |
|
1336 | elif n in seenbranch: | |
1337 | self.ui.debug(_("branch already found\n")) |
|
1337 | self.ui.debug(_("branch already found\n")) | |
1338 | continue |
|
1338 | continue | |
1339 | elif n[1] and n[1] in m: # do we know the base? |
|
1339 | elif n[1] and n[1] in m: # do we know the base? | |
1340 | self.ui.debug(_("found incomplete branch %s:%s\n") |
|
1340 | self.ui.debug(_("found incomplete branch %s:%s\n") | |
1341 | % (short(n[0]), short(n[1]))) |
|
1341 | % (short(n[0]), short(n[1]))) | |
1342 | search.append(n[0:2]) # schedule branch range for scanning |
|
1342 | search.append(n[0:2]) # schedule branch range for scanning | |
1343 | seenbranch[n] = 1 |
|
1343 | seenbranch[n] = 1 | |
1344 | else: |
|
1344 | else: | |
1345 | if n[1] not in seen and n[1] not in fetch: |
|
1345 | if n[1] not in seen and n[1] not in fetch: | |
1346 | if n[2] in m and n[3] in m: |
|
1346 | if n[2] in m and n[3] in m: | |
1347 | self.ui.debug(_("found new changeset %s\n") % |
|
1347 | self.ui.debug(_("found new changeset %s\n") % | |
1348 | short(n[1])) |
|
1348 | short(n[1])) | |
1349 | fetch[n[1]] = 1 # earliest unknown |
|
1349 | fetch[n[1]] = 1 # earliest unknown | |
1350 | for p in n[2:4]: |
|
1350 | for p in n[2:4]: | |
1351 | if p in m: |
|
1351 | if p in m: | |
1352 | base[p] = 1 # latest known |
|
1352 | base[p] = 1 # latest known | |
1353 |
|
1353 | |||
1354 | for p in n[2:4]: |
|
1354 | for p in n[2:4]: | |
1355 | if p not in req and p not in m: |
|
1355 | if p not in req and p not in m: | |
1356 | r.append(p) |
|
1356 | r.append(p) | |
1357 | req[p] = 1 |
|
1357 | req[p] = 1 | |
1358 | seen[n[0]] = 1 |
|
1358 | seen[n[0]] = 1 | |
1359 |
|
1359 | |||
1360 | if r: |
|
1360 | if r: | |
1361 | reqcnt += 1 |
|
1361 | reqcnt += 1 | |
1362 | self.ui.debug(_("request %d: %s\n") % |
|
1362 | self.ui.debug(_("request %d: %s\n") % | |
1363 | (reqcnt, " ".join(map(short, r)))) |
|
1363 | (reqcnt, " ".join(map(short, r)))) | |
1364 | for p in xrange(0, len(r), 10): |
|
1364 | for p in xrange(0, len(r), 10): | |
1365 | for b in remote.branches(r[p:p+10]): |
|
1365 | for b in remote.branches(r[p:p+10]): | |
1366 | self.ui.debug(_("received %s:%s\n") % |
|
1366 | self.ui.debug(_("received %s:%s\n") % | |
1367 | (short(b[0]), short(b[1]))) |
|
1367 | (short(b[0]), short(b[1]))) | |
1368 | unknown.append(b) |
|
1368 | unknown.append(b) | |
1369 |
|
1369 | |||
1370 | # do binary search on the branches we found |
|
1370 | # do binary search on the branches we found | |
1371 | while search: |
|
1371 | while search: | |
1372 | newsearch = [] |
|
1372 | newsearch = [] | |
1373 | reqcnt += 1 |
|
1373 | reqcnt += 1 | |
1374 | for n, l in zip(search, remote.between(search)): |
|
1374 | for n, l in zip(search, remote.between(search)): | |
1375 | l.append(n[1]) |
|
1375 | l.append(n[1]) | |
1376 | p = n[0] |
|
1376 | p = n[0] | |
1377 | f = 1 |
|
1377 | f = 1 | |
1378 | for i in l: |
|
1378 | for i in l: | |
1379 | self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i))) |
|
1379 | self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i))) | |
1380 | if i in m: |
|
1380 | if i in m: | |
1381 | if f <= 2: |
|
1381 | if f <= 2: | |
1382 | self.ui.debug(_("found new branch changeset %s\n") % |
|
1382 | self.ui.debug(_("found new branch changeset %s\n") % | |
1383 | short(p)) |
|
1383 | short(p)) | |
1384 | fetch[p] = 1 |
|
1384 | fetch[p] = 1 | |
1385 | base[i] = 1 |
|
1385 | base[i] = 1 | |
1386 | else: |
|
1386 | else: | |
1387 | self.ui.debug(_("narrowed branch search to %s:%s\n") |
|
1387 | self.ui.debug(_("narrowed branch search to %s:%s\n") | |
1388 | % (short(p), short(i))) |
|
1388 | % (short(p), short(i))) | |
1389 | newsearch.append((p, i)) |
|
1389 | newsearch.append((p, i)) | |
1390 | break |
|
1390 | break | |
1391 | p, f = i, f * 2 |
|
1391 | p, f = i, f * 2 | |
1392 | search = newsearch |
|
1392 | search = newsearch | |
1393 |
|
1393 | |||
1394 | # sanity check our fetch list |
|
1394 | # sanity check our fetch list | |
1395 | for f in fetch.keys(): |
|
1395 | for f in fetch.keys(): | |
1396 | if f in m: |
|
1396 | if f in m: | |
1397 | raise repo.RepoError(_("already have changeset ") + short(f[:4])) |
|
1397 | raise repo.RepoError(_("already have changeset ") + short(f[:4])) | |
1398 |
|
1398 | |||
1399 | if base.keys() == [nullid]: |
|
1399 | if base.keys() == [nullid]: | |
1400 | if force: |
|
1400 | if force: | |
1401 | self.ui.warn(_("warning: repository is unrelated\n")) |
|
1401 | self.ui.warn(_("warning: repository is unrelated\n")) | |
1402 | else: |
|
1402 | else: | |
1403 | raise util.Abort(_("repository is unrelated")) |
|
1403 | raise util.Abort(_("repository is unrelated")) | |
1404 |
|
1404 | |||
1405 | self.ui.debug(_("found new changesets starting at ") + |
|
1405 | self.ui.debug(_("found new changesets starting at ") + | |
1406 | " ".join([short(f) for f in fetch]) + "\n") |
|
1406 | " ".join([short(f) for f in fetch]) + "\n") | |
1407 |
|
1407 | |||
1408 | self.ui.debug(_("%d total queries\n") % reqcnt) |
|
1408 | self.ui.debug(_("%d total queries\n") % reqcnt) | |
1409 |
|
1409 | |||
1410 | return base.keys(), fetch.keys(), heads |
|
1410 | return base.keys(), fetch.keys(), heads | |
1411 |
|
1411 | |||
1412 | def findoutgoing(self, remote, base=None, heads=None, force=False): |
|
1412 | def findoutgoing(self, remote, base=None, heads=None, force=False): | |
1413 | """Return list of nodes that are roots of subsets not in remote |
|
1413 | """Return list of nodes that are roots of subsets not in remote | |
1414 |
|
1414 | |||
1415 | If base dict is specified, assume that these nodes and their parents |
|
1415 | If base dict is specified, assume that these nodes and their parents | |
1416 | exist on the remote side. |
|
1416 | exist on the remote side. | |
1417 | If a list of heads is specified, return only nodes which are heads |
|
1417 | If a list of heads is specified, return only nodes which are heads | |
1418 | or ancestors of these heads, and return a second element which |
|
1418 | or ancestors of these heads, and return a second element which | |
1419 | contains all remote heads which get new children. |
|
1419 | contains all remote heads which get new children. | |
1420 | """ |
|
1420 | """ | |
1421 | if base == None: |
|
1421 | if base == None: | |
1422 | base = {} |
|
1422 | base = {} | |
1423 | self.findincoming(remote, base, heads, force=force) |
|
1423 | self.findincoming(remote, base, heads, force=force) | |
1424 |
|
1424 | |||
1425 | self.ui.debug(_("common changesets up to ") |
|
1425 | self.ui.debug(_("common changesets up to ") | |
1426 | + " ".join(map(short, base.keys())) + "\n") |
|
1426 | + " ".join(map(short, base.keys())) + "\n") | |
1427 |
|
1427 | |||
1428 | remain = dict.fromkeys(self.changelog.nodemap) |
|
1428 | remain = dict.fromkeys(self.changelog.nodemap) | |
1429 |
|
1429 | |||
1430 | # prune everything remote has from the tree |
|
1430 | # prune everything remote has from the tree | |
1431 | del remain[nullid] |
|
1431 | del remain[nullid] | |
1432 | remove = base.keys() |
|
1432 | remove = base.keys() | |
1433 | while remove: |
|
1433 | while remove: | |
1434 | n = remove.pop(0) |
|
1434 | n = remove.pop(0) | |
1435 | if n in remain: |
|
1435 | if n in remain: | |
1436 | del remain[n] |
|
1436 | del remain[n] | |
1437 | for p in self.changelog.parents(n): |
|
1437 | for p in self.changelog.parents(n): | |
1438 | remove.append(p) |
|
1438 | remove.append(p) | |
1439 |
|
1439 | |||
1440 | # find every node whose parents have been pruned |
|
1440 | # find every node whose parents have been pruned | |
1441 | subset = [] |
|
1441 | subset = [] | |
1442 | # find every remote head that will get new children |
|
1442 | # find every remote head that will get new children | |
1443 | updated_heads = {} |
|
1443 | updated_heads = {} | |
1444 | for n in remain: |
|
1444 | for n in remain: | |
1445 | p1, p2 = self.changelog.parents(n) |
|
1445 | p1, p2 = self.changelog.parents(n) | |
1446 | if p1 not in remain and p2 not in remain: |
|
1446 | if p1 not in remain and p2 not in remain: | |
1447 | subset.append(n) |
|
1447 | subset.append(n) | |
1448 | if heads: |
|
1448 | if heads: | |
1449 | if p1 in heads: |
|
1449 | if p1 in heads: | |
1450 | updated_heads[p1] = True |
|
1450 | updated_heads[p1] = True | |
1451 | if p2 in heads: |
|
1451 | if p2 in heads: | |
1452 | updated_heads[p2] = True |
|
1452 | updated_heads[p2] = True | |
1453 |
|
1453 | |||
1454 | # this is the set of all roots we have to push |
|
1454 | # this is the set of all roots we have to push | |
1455 | if heads: |
|
1455 | if heads: | |
1456 | return subset, updated_heads.keys() |
|
1456 | return subset, updated_heads.keys() | |
1457 | else: |
|
1457 | else: | |
1458 | return subset |
|
1458 | return subset | |
1459 |
|
1459 | |||
1460 | def pull(self, remote, heads=None, force=False): |
|
1460 | def pull(self, remote, heads=None, force=False): | |
1461 | lock = self.lock() |
|
1461 | lock = self.lock() | |
1462 | try: |
|
1462 | try: | |
1463 | common, fetch, rheads = self.findcommonincoming(remote, heads=heads, |
|
1463 | common, fetch, rheads = self.findcommonincoming(remote, heads=heads, | |
1464 | force=force) |
|
1464 | force=force) | |
1465 | if fetch == [nullid]: |
|
1465 | if fetch == [nullid]: | |
1466 | self.ui.status(_("requesting all changes\n")) |
|
1466 | self.ui.status(_("requesting all changes\n")) | |
1467 |
|
1467 | |||
1468 | if not fetch: |
|
1468 | if not fetch: | |
1469 | self.ui.status(_("no changes found\n")) |
|
1469 | self.ui.status(_("no changes found\n")) | |
1470 | return 0 |
|
1470 | return 0 | |
1471 |
|
1471 | |||
1472 | if heads is None and remote.capable('changegroupsubset'): |
|
1472 | if heads is None and remote.capable('changegroupsubset'): | |
1473 | heads = rheads |
|
1473 | heads = rheads | |
1474 |
|
1474 | |||
1475 | if heads is None: |
|
1475 | if heads is None: | |
1476 | cg = remote.changegroup(fetch, 'pull') |
|
1476 | cg = remote.changegroup(fetch, 'pull') | |
1477 | else: |
|
1477 | else: | |
1478 | if not remote.capable('changegroupsubset'): |
|
1478 | if not remote.capable('changegroupsubset'): | |
1479 | raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset.")) |
|
1479 | raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset.")) | |
1480 | cg = remote.changegroupsubset(fetch, heads, 'pull') |
|
1480 | cg = remote.changegroupsubset(fetch, heads, 'pull') | |
1481 | return self.addchangegroup(cg, 'pull', remote.url()) |
|
1481 | return self.addchangegroup(cg, 'pull', remote.url()) | |
1482 | finally: |
|
1482 | finally: | |
1483 | del lock |
|
1483 | del lock | |
1484 |
|
1484 | |||
1485 | def push(self, remote, force=False, revs=None): |
|
1485 | def push(self, remote, force=False, revs=None): | |
1486 | # there are two ways to push to remote repo: |
|
1486 | # there are two ways to push to remote repo: | |
1487 | # |
|
1487 | # | |
1488 | # addchangegroup assumes local user can lock remote |
|
1488 | # addchangegroup assumes local user can lock remote | |
1489 | # repo (local filesystem, old ssh servers). |
|
1489 | # repo (local filesystem, old ssh servers). | |
1490 | # |
|
1490 | # | |
1491 | # unbundle assumes local user cannot lock remote repo (new ssh |
|
1491 | # unbundle assumes local user cannot lock remote repo (new ssh | |
1492 | # servers, http servers). |
|
1492 | # servers, http servers). | |
1493 |
|
1493 | |||
1494 | if remote.capable('unbundle'): |
|
1494 | if remote.capable('unbundle'): | |
1495 | return self.push_unbundle(remote, force, revs) |
|
1495 | return self.push_unbundle(remote, force, revs) | |
1496 | return self.push_addchangegroup(remote, force, revs) |
|
1496 | return self.push_addchangegroup(remote, force, revs) | |
1497 |
|
1497 | |||
1498 | def prepush(self, remote, force, revs): |
|
1498 | def prepush(self, remote, force, revs): | |
1499 | base = {} |
|
1499 | base = {} | |
1500 | remote_heads = remote.heads() |
|
1500 | remote_heads = remote.heads() | |
1501 | inc = self.findincoming(remote, base, remote_heads, force=force) |
|
1501 | inc = self.findincoming(remote, base, remote_heads, force=force) | |
1502 |
|
1502 | |||
1503 | update, updated_heads = self.findoutgoing(remote, base, remote_heads) |
|
1503 | update, updated_heads = self.findoutgoing(remote, base, remote_heads) | |
1504 | if revs is not None: |
|
1504 | if revs is not None: | |
1505 | msng_cl, bases, heads = self.changelog.nodesbetween(update, revs) |
|
1505 | msng_cl, bases, heads = self.changelog.nodesbetween(update, revs) | |
1506 | else: |
|
1506 | else: | |
1507 | bases, heads = update, self.changelog.heads() |
|
1507 | bases, heads = update, self.changelog.heads() | |
1508 |
|
1508 | |||
1509 | if not bases: |
|
1509 | if not bases: | |
1510 | self.ui.status(_("no changes found\n")) |
|
1510 | self.ui.status(_("no changes found\n")) | |
1511 | return None, 1 |
|
1511 | return None, 1 | |
1512 | elif not force: |
|
1512 | elif not force: | |
1513 | # check if we're creating new remote heads |
|
1513 | # check if we're creating new remote heads | |
1514 | # to be a remote head after push, node must be either |
|
1514 | # to be a remote head after push, node must be either | |
1515 | # - unknown locally |
|
1515 | # - unknown locally | |
1516 | # - a local outgoing head descended from update |
|
1516 | # - a local outgoing head descended from update | |
1517 | # - a remote head that's known locally and not |
|
1517 | # - a remote head that's known locally and not | |
1518 | # ancestral to an outgoing head |
|
1518 | # ancestral to an outgoing head | |
1519 |
|
1519 | |||
1520 | warn = 0 |
|
1520 | warn = 0 | |
1521 |
|
1521 | |||
1522 | if remote_heads == [nullid]: |
|
1522 | if remote_heads == [nullid]: | |
1523 | warn = 0 |
|
1523 | warn = 0 | |
1524 | elif not revs and len(heads) > len(remote_heads): |
|
1524 | elif not revs and len(heads) > len(remote_heads): | |
1525 | warn = 1 |
|
1525 | warn = 1 | |
1526 | else: |
|
1526 | else: | |
1527 | newheads = list(heads) |
|
1527 | newheads = list(heads) | |
1528 | for r in remote_heads: |
|
1528 | for r in remote_heads: | |
1529 | if r in self.changelog.nodemap: |
|
1529 | if r in self.changelog.nodemap: | |
1530 | desc = self.changelog.heads(r, heads) |
|
1530 | desc = self.changelog.heads(r, heads) | |
1531 | l = [h for h in heads if h in desc] |
|
1531 | l = [h for h in heads if h in desc] | |
1532 | if not l: |
|
1532 | if not l: | |
1533 | newheads.append(r) |
|
1533 | newheads.append(r) | |
1534 | else: |
|
1534 | else: | |
1535 | newheads.append(r) |
|
1535 | newheads.append(r) | |
1536 | if len(newheads) > len(remote_heads): |
|
1536 | if len(newheads) > len(remote_heads): | |
1537 | warn = 1 |
|
1537 | warn = 1 | |
1538 |
|
1538 | |||
1539 | if warn: |
|
1539 | if warn: | |
1540 | self.ui.warn(_("abort: push creates new remote heads!\n")) |
|
1540 | self.ui.warn(_("abort: push creates new remote heads!\n")) | |
1541 | self.ui.status(_("(did you forget to merge?" |
|
1541 | self.ui.status(_("(did you forget to merge?" | |
1542 | " use push -f to force)\n")) |
|
1542 | " use push -f to force)\n")) | |
1543 | return None, 0 |
|
1543 | return None, 0 | |
1544 | elif inc: |
|
1544 | elif inc: | |
1545 | self.ui.warn(_("note: unsynced remote changes!\n")) |
|
1545 | self.ui.warn(_("note: unsynced remote changes!\n")) | |
1546 |
|
1546 | |||
1547 |
|
1547 | |||
1548 | if revs is None: |
|
1548 | if revs is None: | |
1549 | cg = self.changegroup(update, 'push') |
|
1549 | cg = self.changegroup(update, 'push') | |
1550 | else: |
|
1550 | else: | |
1551 | cg = self.changegroupsubset(update, revs, 'push') |
|
1551 | cg = self.changegroupsubset(update, revs, 'push') | |
1552 | return cg, remote_heads |
|
1552 | return cg, remote_heads | |
1553 |
|
1553 | |||
1554 | def push_addchangegroup(self, remote, force, revs): |
|
1554 | def push_addchangegroup(self, remote, force, revs): | |
1555 | lock = remote.lock() |
|
1555 | lock = remote.lock() | |
1556 | try: |
|
1556 | try: | |
1557 | ret = self.prepush(remote, force, revs) |
|
1557 | ret = self.prepush(remote, force, revs) | |
1558 | if ret[0] is not None: |
|
1558 | if ret[0] is not None: | |
1559 | cg, remote_heads = ret |
|
1559 | cg, remote_heads = ret | |
1560 | return remote.addchangegroup(cg, 'push', self.url()) |
|
1560 | return remote.addchangegroup(cg, 'push', self.url()) | |
1561 | return ret[1] |
|
1561 | return ret[1] | |
1562 | finally: |
|
1562 | finally: | |
1563 | del lock |
|
1563 | del lock | |
1564 |
|
1564 | |||
1565 | def push_unbundle(self, remote, force, revs): |
|
1565 | def push_unbundle(self, remote, force, revs): | |
1566 | # local repo finds heads on server, finds out what revs it |
|
1566 | # local repo finds heads on server, finds out what revs it | |
1567 | # must push. once revs transferred, if server finds it has |
|
1567 | # must push. once revs transferred, if server finds it has | |
1568 | # different heads (someone else won commit/push race), server |
|
1568 | # different heads (someone else won commit/push race), server | |
1569 | # aborts. |
|
1569 | # aborts. | |
1570 |
|
1570 | |||
1571 | ret = self.prepush(remote, force, revs) |
|
1571 | ret = self.prepush(remote, force, revs) | |
1572 | if ret[0] is not None: |
|
1572 | if ret[0] is not None: | |
1573 | cg, remote_heads = ret |
|
1573 | cg, remote_heads = ret | |
1574 | if force: remote_heads = ['force'] |
|
1574 | if force: remote_heads = ['force'] | |
1575 | return remote.unbundle(cg, remote_heads, 'push') |
|
1575 | return remote.unbundle(cg, remote_heads, 'push') | |
1576 | return ret[1] |
|
1576 | return ret[1] | |
1577 |
|
1577 | |||
1578 | def changegroupinfo(self, nodes, source): |
|
1578 | def changegroupinfo(self, nodes, source): | |
1579 | if self.ui.verbose or source == 'bundle': |
|
1579 | if self.ui.verbose or source == 'bundle': | |
1580 | self.ui.status(_("%d changesets found\n") % len(nodes)) |
|
1580 | self.ui.status(_("%d changesets found\n") % len(nodes)) | |
1581 | if self.ui.debugflag: |
|
1581 | if self.ui.debugflag: | |
1582 | self.ui.debug(_("List of changesets:\n")) |
|
1582 | self.ui.debug(_("List of changesets:\n")) | |
1583 | for node in nodes: |
|
1583 | for node in nodes: | |
1584 | self.ui.debug("%s\n" % hex(node)) |
|
1584 | self.ui.debug("%s\n" % hex(node)) | |
1585 |
|
1585 | |||
1586 | def changegroupsubset(self, bases, heads, source, extranodes=None): |
|
1586 | def changegroupsubset(self, bases, heads, source, extranodes=None): | |
1587 | """This function generates a changegroup consisting of all the nodes |
|
1587 | """This function generates a changegroup consisting of all the nodes | |
1588 | that are descendents of any of the bases, and ancestors of any of |
|
1588 | that are descendents of any of the bases, and ancestors of any of | |
1589 | the heads. |
|
1589 | the heads. | |
1590 |
|
1590 | |||
1591 | It is fairly complex as determining which filenodes and which |
|
1591 | It is fairly complex as determining which filenodes and which | |
1592 | manifest nodes need to be included for the changeset to be complete |
|
1592 | manifest nodes need to be included for the changeset to be complete | |
1593 | is non-trivial. |
|
1593 | is non-trivial. | |
1594 |
|
1594 | |||
1595 | Another wrinkle is doing the reverse, figuring out which changeset in |
|
1595 | Another wrinkle is doing the reverse, figuring out which changeset in | |
1596 | the changegroup a particular filenode or manifestnode belongs to. |
|
1596 | the changegroup a particular filenode or manifestnode belongs to. | |
1597 |
|
1597 | |||
1598 | The caller can specify some nodes that must be included in the |
|
1598 | The caller can specify some nodes that must be included in the | |
1599 | changegroup using the extranodes argument. It should be a dict |
|
1599 | changegroup using the extranodes argument. It should be a dict | |
1600 | where the keys are the filenames (or 1 for the manifest), and the |
|
1600 | where the keys are the filenames (or 1 for the manifest), and the | |
1601 | values are lists of (node, linknode) tuples, where node is a wanted |
|
1601 | values are lists of (node, linknode) tuples, where node is a wanted | |
1602 | node and linknode is the changelog node that should be transmitted as |
|
1602 | node and linknode is the changelog node that should be transmitted as | |
1603 | the linkrev. |
|
1603 | the linkrev. | |
1604 | """ |
|
1604 | """ | |
1605 |
|
1605 | |||
1606 | if extranodes is None: |
|
1606 | if extranodes is None: | |
1607 | # can we go through the fast path ? |
|
1607 | # can we go through the fast path ? | |
1608 | heads.sort() |
|
1608 | heads.sort() | |
1609 | allheads = self.heads() |
|
1609 | allheads = self.heads() | |
1610 | allheads.sort() |
|
1610 | allheads.sort() | |
1611 | if heads == allheads: |
|
1611 | if heads == allheads: | |
1612 | common = [] |
|
1612 | common = [] | |
1613 | # parents of bases are known from both sides |
|
1613 | # parents of bases are known from both sides | |
1614 | for n in bases: |
|
1614 | for n in bases: | |
1615 | for p in self.changelog.parents(n): |
|
1615 | for p in self.changelog.parents(n): | |
1616 | if p != nullid: |
|
1616 | if p != nullid: | |
1617 | common.append(p) |
|
1617 | common.append(p) | |
1618 | return self._changegroup(common, source) |
|
1618 | return self._changegroup(common, source) | |
1619 |
|
1619 | |||
1620 | self.hook('preoutgoing', throw=True, source=source) |
|
1620 | self.hook('preoutgoing', throw=True, source=source) | |
1621 |
|
1621 | |||
1622 | # Set up some initial variables |
|
1622 | # Set up some initial variables | |
1623 | # Make it easy to refer to self.changelog |
|
1623 | # Make it easy to refer to self.changelog | |
1624 | cl = self.changelog |
|
1624 | cl = self.changelog | |
1625 | # msng is short for missing - compute the list of changesets in this |
|
1625 | # msng is short for missing - compute the list of changesets in this | |
1626 | # changegroup. |
|
1626 | # changegroup. | |
1627 | msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads) |
|
1627 | msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads) | |
1628 | self.changegroupinfo(msng_cl_lst, source) |
|
1628 | self.changegroupinfo(msng_cl_lst, source) | |
1629 | # Some bases may turn out to be superfluous, and some heads may be |
|
1629 | # Some bases may turn out to be superfluous, and some heads may be | |
1630 | # too. nodesbetween will return the minimal set of bases and heads |
|
1630 | # too. nodesbetween will return the minimal set of bases and heads | |
1631 | # necessary to re-create the changegroup. |
|
1631 | # necessary to re-create the changegroup. | |
1632 |
|
1632 | |||
1633 | # Known heads are the list of heads that it is assumed the recipient |
|
1633 | # Known heads are the list of heads that it is assumed the recipient | |
1634 | # of this changegroup will know about. |
|
1634 | # of this changegroup will know about. | |
1635 | knownheads = {} |
|
1635 | knownheads = {} | |
1636 | # We assume that all parents of bases are known heads. |
|
1636 | # We assume that all parents of bases are known heads. | |
1637 | for n in bases: |
|
1637 | for n in bases: | |
1638 | for p in cl.parents(n): |
|
1638 | for p in cl.parents(n): | |
1639 | if p != nullid: |
|
1639 | if p != nullid: | |
1640 | knownheads[p] = 1 |
|
1640 | knownheads[p] = 1 | |
1641 | knownheads = knownheads.keys() |
|
1641 | knownheads = knownheads.keys() | |
1642 | if knownheads: |
|
1642 | if knownheads: | |
1643 | # Now that we know what heads are known, we can compute which |
|
1643 | # Now that we know what heads are known, we can compute which | |
1644 | # changesets are known. The recipient must know about all |
|
1644 | # changesets are known. The recipient must know about all | |
1645 | # changesets required to reach the known heads from the null |
|
1645 | # changesets required to reach the known heads from the null | |
1646 | # changeset. |
|
1646 | # changeset. | |
1647 | has_cl_set, junk, junk = cl.nodesbetween(None, knownheads) |
|
1647 | has_cl_set, junk, junk = cl.nodesbetween(None, knownheads) | |
1648 | junk = None |
|
1648 | junk = None | |
1649 | # Transform the list into an ersatz set. |
|
1649 | # Transform the list into an ersatz set. | |
1650 | has_cl_set = dict.fromkeys(has_cl_set) |
|
1650 | has_cl_set = dict.fromkeys(has_cl_set) | |
1651 | else: |
|
1651 | else: | |
1652 | # If there were no known heads, the recipient cannot be assumed to |
|
1652 | # If there were no known heads, the recipient cannot be assumed to | |
1653 | # know about any changesets. |
|
1653 | # know about any changesets. | |
1654 | has_cl_set = {} |
|
1654 | has_cl_set = {} | |
1655 |
|
1655 | |||
1656 | # Make it easy to refer to self.manifest |
|
1656 | # Make it easy to refer to self.manifest | |
1657 | mnfst = self.manifest |
|
1657 | mnfst = self.manifest | |
1658 | # We don't know which manifests are missing yet |
|
1658 | # We don't know which manifests are missing yet | |
1659 | msng_mnfst_set = {} |
|
1659 | msng_mnfst_set = {} | |
1660 | # Nor do we know which filenodes are missing. |
|
1660 | # Nor do we know which filenodes are missing. | |
1661 | msng_filenode_set = {} |
|
1661 | msng_filenode_set = {} | |
1662 |
|
1662 | |||
1663 | junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex |
|
1663 | junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex | |
1664 | junk = None |
|
1664 | junk = None | |
1665 |
|
1665 | |||
1666 | # A changeset always belongs to itself, so the changenode lookup |
|
1666 | # A changeset always belongs to itself, so the changenode lookup | |
1667 | # function for a changenode is identity. |
|
1667 | # function for a changenode is identity. | |
1668 | def identity(x): |
|
1668 | def identity(x): | |
1669 | return x |
|
1669 | return x | |
1670 |
|
1670 | |||
1671 | # A function generating function. Sets up an environment for the |
|
1671 | # A function generating function. Sets up an environment for the | |
1672 | # inner function. |
|
1672 | # inner function. | |
1673 | def cmp_by_rev_func(revlog): |
|
1673 | def cmp_by_rev_func(revlog): | |
1674 | # Compare two nodes by their revision number in the environment's |
|
1674 | # Compare two nodes by their revision number in the environment's | |
1675 | # revision history. Since the revision number both represents the |
|
1675 | # revision history. Since the revision number both represents the | |
1676 | # most efficient order to read the nodes in, and represents a |
|
1676 | # most efficient order to read the nodes in, and represents a | |
1677 | # topological sorting of the nodes, this function is often useful. |
|
1677 | # topological sorting of the nodes, this function is often useful. | |
1678 | def cmp_by_rev(a, b): |
|
1678 | def cmp_by_rev(a, b): | |
1679 | return cmp(revlog.rev(a), revlog.rev(b)) |
|
1679 | return cmp(revlog.rev(a), revlog.rev(b)) | |
1680 | return cmp_by_rev |
|
1680 | return cmp_by_rev | |
1681 |
|
1681 | |||
1682 | # If we determine that a particular file or manifest node must be a |
|
1682 | # If we determine that a particular file or manifest node must be a | |
1683 | # node that the recipient of the changegroup will already have, we can |
|
1683 | # node that the recipient of the changegroup will already have, we can | |
1684 | # also assume the recipient will have all the parents. This function |
|
1684 | # also assume the recipient will have all the parents. This function | |
1685 | # prunes them from the set of missing nodes. |
|
1685 | # prunes them from the set of missing nodes. | |
1686 | def prune_parents(revlog, hasset, msngset): |
|
1686 | def prune_parents(revlog, hasset, msngset): | |
1687 | haslst = hasset.keys() |
|
1687 | haslst = hasset.keys() | |
1688 | haslst.sort(cmp_by_rev_func(revlog)) |
|
1688 | haslst.sort(cmp_by_rev_func(revlog)) | |
1689 | for node in haslst: |
|
1689 | for node in haslst: | |
1690 | parentlst = [p for p in revlog.parents(node) if p != nullid] |
|
1690 | parentlst = [p for p in revlog.parents(node) if p != nullid] | |
1691 | while parentlst: |
|
1691 | while parentlst: | |
1692 | n = parentlst.pop() |
|
1692 | n = parentlst.pop() | |
1693 | if n not in hasset: |
|
1693 | if n not in hasset: | |
1694 | hasset[n] = 1 |
|
1694 | hasset[n] = 1 | |
1695 | p = [p for p in revlog.parents(n) if p != nullid] |
|
1695 | p = [p for p in revlog.parents(n) if p != nullid] | |
1696 | parentlst.extend(p) |
|
1696 | parentlst.extend(p) | |
1697 | for n in hasset: |
|
1697 | for n in hasset: | |
1698 | msngset.pop(n, None) |
|
1698 | msngset.pop(n, None) | |
1699 |
|
1699 | |||
1700 | # This is a function generating function used to set up an environment |
|
1700 | # This is a function generating function used to set up an environment | |
1701 | # for the inner function to execute in. |
|
1701 | # for the inner function to execute in. | |
1702 | def manifest_and_file_collector(changedfileset): |
|
1702 | def manifest_and_file_collector(changedfileset): | |
1703 | # This is an information gathering function that gathers |
|
1703 | # This is an information gathering function that gathers | |
1704 | # information from each changeset node that goes out as part of |
|
1704 | # information from each changeset node that goes out as part of | |
1705 | # the changegroup. The information gathered is a list of which |
|
1705 | # the changegroup. The information gathered is a list of which | |
1706 | # manifest nodes are potentially required (the recipient may |
|
1706 | # manifest nodes are potentially required (the recipient may | |
1707 | # already have them) and total list of all files which were |
|
1707 | # already have them) and total list of all files which were | |
1708 | # changed in any changeset in the changegroup. |
|
1708 | # changed in any changeset in the changegroup. | |
1709 | # |
|
1709 | # | |
1710 | # We also remember the first changenode we saw any manifest |
|
1710 | # We also remember the first changenode we saw any manifest | |
1711 | # referenced by so we can later determine which changenode 'owns' |
|
1711 | # referenced by so we can later determine which changenode 'owns' | |
1712 | # the manifest. |
|
1712 | # the manifest. | |
1713 | def collect_manifests_and_files(clnode): |
|
1713 | def collect_manifests_and_files(clnode): | |
1714 | c = cl.read(clnode) |
|
1714 | c = cl.read(clnode) | |
1715 | for f in c[3]: |
|
1715 | for f in c[3]: | |
1716 | # This is to make sure we only have one instance of each |
|
1716 | # This is to make sure we only have one instance of each | |
1717 | # filename string for each filename. |
|
1717 | # filename string for each filename. | |
1718 | changedfileset.setdefault(f, f) |
|
1718 | changedfileset.setdefault(f, f) | |
1719 | msng_mnfst_set.setdefault(c[0], clnode) |
|
1719 | msng_mnfst_set.setdefault(c[0], clnode) | |
1720 | return collect_manifests_and_files |
|
1720 | return collect_manifests_and_files | |
1721 |
|
1721 | |||
1722 | # Figure out which manifest nodes (of the ones we think might be part |
|
1722 | # Figure out which manifest nodes (of the ones we think might be part | |
1723 | # of the changegroup) the recipient must know about and remove them |
|
1723 | # of the changegroup) the recipient must know about and remove them | |
1724 | # from the changegroup. |
|
1724 | # from the changegroup. | |
1725 | def prune_manifests(): |
|
1725 | def prune_manifests(): | |
1726 | has_mnfst_set = {} |
|
1726 | has_mnfst_set = {} | |
1727 | for n in msng_mnfst_set: |
|
1727 | for n in msng_mnfst_set: | |
1728 | # If a 'missing' manifest thinks it belongs to a changenode |
|
1728 | # If a 'missing' manifest thinks it belongs to a changenode | |
1729 | # the recipient is assumed to have, obviously the recipient |
|
1729 | # the recipient is assumed to have, obviously the recipient | |
1730 | # must have that manifest. |
|
1730 | # must have that manifest. | |
1731 | linknode = cl.node(mnfst.linkrev(mnfst.rev(n))) |
|
1731 | linknode = cl.node(mnfst.linkrev(mnfst.rev(n))) | |
1732 | if linknode in has_cl_set: |
|
1732 | if linknode in has_cl_set: | |
1733 | has_mnfst_set[n] = 1 |
|
1733 | has_mnfst_set[n] = 1 | |
1734 | prune_parents(mnfst, has_mnfst_set, msng_mnfst_set) |
|
1734 | prune_parents(mnfst, has_mnfst_set, msng_mnfst_set) | |
1735 |
|
1735 | |||
1736 | # Use the information collected in collect_manifests_and_files to say |
|
1736 | # Use the information collected in collect_manifests_and_files to say | |
1737 | # which changenode any manifestnode belongs to. |
|
1737 | # which changenode any manifestnode belongs to. | |
1738 | def lookup_manifest_link(mnfstnode): |
|
1738 | def lookup_manifest_link(mnfstnode): | |
1739 | return msng_mnfst_set[mnfstnode] |
|
1739 | return msng_mnfst_set[mnfstnode] | |
1740 |
|
1740 | |||
1741 | # A function generating function that sets up the initial environment |
|
1741 | # A function generating function that sets up the initial environment | |
1742 | # the inner function. |
|
1742 | # the inner function. | |
1743 | def filenode_collector(changedfiles): |
|
1743 | def filenode_collector(changedfiles): | |
1744 | next_rev = [0] |
|
1744 | next_rev = [0] | |
1745 | # This gathers information from each manifestnode included in the |
|
1745 | # This gathers information from each manifestnode included in the | |
1746 | # changegroup about which filenodes the manifest node references |
|
1746 | # changegroup about which filenodes the manifest node references | |
1747 | # so we can include those in the changegroup too. |
|
1747 | # so we can include those in the changegroup too. | |
1748 | # |
|
1748 | # | |
1749 | # It also remembers which changenode each filenode belongs to. It |
|
1749 | # It also remembers which changenode each filenode belongs to. It | |
1750 | # does this by assuming the a filenode belongs to the changenode |
|
1750 | # does this by assuming the a filenode belongs to the changenode | |
1751 | # the first manifest that references it belongs to. |
|
1751 | # the first manifest that references it belongs to. | |
1752 | def collect_msng_filenodes(mnfstnode): |
|
1752 | def collect_msng_filenodes(mnfstnode): | |
1753 | r = mnfst.rev(mnfstnode) |
|
1753 | r = mnfst.rev(mnfstnode) | |
1754 | if r == next_rev[0]: |
|
1754 | if r == next_rev[0]: | |
1755 | # If the last rev we looked at was the one just previous, |
|
1755 | # If the last rev we looked at was the one just previous, | |
1756 | # we only need to see a diff. |
|
1756 | # we only need to see a diff. | |
1757 | deltamf = mnfst.readdelta(mnfstnode) |
|
1757 | deltamf = mnfst.readdelta(mnfstnode) | |
1758 | # For each line in the delta |
|
1758 | # For each line in the delta | |
1759 | for f, fnode in deltamf.items(): |
|
1759 | for f, fnode in deltamf.items(): | |
1760 | f = changedfiles.get(f, None) |
|
1760 | f = changedfiles.get(f, None) | |
1761 | # And if the file is in the list of files we care |
|
1761 | # And if the file is in the list of files we care | |
1762 | # about. |
|
1762 | # about. | |
1763 | if f is not None: |
|
1763 | if f is not None: | |
1764 | # Get the changenode this manifest belongs to |
|
1764 | # Get the changenode this manifest belongs to | |
1765 | clnode = msng_mnfst_set[mnfstnode] |
|
1765 | clnode = msng_mnfst_set[mnfstnode] | |
1766 | # Create the set of filenodes for the file if |
|
1766 | # Create the set of filenodes for the file if | |
1767 | # there isn't one already. |
|
1767 | # there isn't one already. | |
1768 | ndset = msng_filenode_set.setdefault(f, {}) |
|
1768 | ndset = msng_filenode_set.setdefault(f, {}) | |
1769 | # And set the filenode's changelog node to the |
|
1769 | # And set the filenode's changelog node to the | |
1770 | # manifest's if it hasn't been set already. |
|
1770 | # manifest's if it hasn't been set already. | |
1771 | ndset.setdefault(fnode, clnode) |
|
1771 | ndset.setdefault(fnode, clnode) | |
1772 | else: |
|
1772 | else: | |
1773 | # Otherwise we need a full manifest. |
|
1773 | # Otherwise we need a full manifest. | |
1774 | m = mnfst.read(mnfstnode) |
|
1774 | m = mnfst.read(mnfstnode) | |
1775 | # For every file in we care about. |
|
1775 | # For every file in we care about. | |
1776 | for f in changedfiles: |
|
1776 | for f in changedfiles: | |
1777 | fnode = m.get(f, None) |
|
1777 | fnode = m.get(f, None) | |
1778 | # If it's in the manifest |
|
1778 | # If it's in the manifest | |
1779 | if fnode is not None: |
|
1779 | if fnode is not None: | |
1780 | # See comments above. |
|
1780 | # See comments above. | |
1781 | clnode = msng_mnfst_set[mnfstnode] |
|
1781 | clnode = msng_mnfst_set[mnfstnode] | |
1782 | ndset = msng_filenode_set.setdefault(f, {}) |
|
1782 | ndset = msng_filenode_set.setdefault(f, {}) | |
1783 | ndset.setdefault(fnode, clnode) |
|
1783 | ndset.setdefault(fnode, clnode) | |
1784 | # Remember the revision we hope to see next. |
|
1784 | # Remember the revision we hope to see next. | |
1785 | next_rev[0] = r + 1 |
|
1785 | next_rev[0] = r + 1 | |
1786 | return collect_msng_filenodes |
|
1786 | return collect_msng_filenodes | |
1787 |
|
1787 | |||
1788 | # We have a list of filenodes we think we need for a file, lets remove |
|
1788 | # We have a list of filenodes we think we need for a file, lets remove | |
1789 | # all those we now the recipient must have. |
|
1789 | # all those we now the recipient must have. | |
1790 | def prune_filenodes(f, filerevlog): |
|
1790 | def prune_filenodes(f, filerevlog): | |
1791 | msngset = msng_filenode_set[f] |
|
1791 | msngset = msng_filenode_set[f] | |
1792 | hasset = {} |
|
1792 | hasset = {} | |
1793 | # If a 'missing' filenode thinks it belongs to a changenode we |
|
1793 | # If a 'missing' filenode thinks it belongs to a changenode we | |
1794 | # assume the recipient must have, then the recipient must have |
|
1794 | # assume the recipient must have, then the recipient must have | |
1795 | # that filenode. |
|
1795 | # that filenode. | |
1796 | for n in msngset: |
|
1796 | for n in msngset: | |
1797 | clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n))) |
|
1797 | clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n))) | |
1798 | if clnode in has_cl_set: |
|
1798 | if clnode in has_cl_set: | |
1799 | hasset[n] = 1 |
|
1799 | hasset[n] = 1 | |
1800 | prune_parents(filerevlog, hasset, msngset) |
|
1800 | prune_parents(filerevlog, hasset, msngset) | |
1801 |
|
1801 | |||
1802 | # A function generator function that sets up the a context for the |
|
1802 | # A function generator function that sets up the a context for the | |
1803 | # inner function. |
|
1803 | # inner function. | |
1804 | def lookup_filenode_link_func(fname): |
|
1804 | def lookup_filenode_link_func(fname): | |
1805 | msngset = msng_filenode_set[fname] |
|
1805 | msngset = msng_filenode_set[fname] | |
1806 | # Lookup the changenode the filenode belongs to. |
|
1806 | # Lookup the changenode the filenode belongs to. | |
1807 | def lookup_filenode_link(fnode): |
|
1807 | def lookup_filenode_link(fnode): | |
1808 | return msngset[fnode] |
|
1808 | return msngset[fnode] | |
1809 | return lookup_filenode_link |
|
1809 | return lookup_filenode_link | |
1810 |
|
1810 | |||
1811 | # Add the nodes that were explicitly requested. |
|
1811 | # Add the nodes that were explicitly requested. | |
1812 | def add_extra_nodes(name, nodes): |
|
1812 | def add_extra_nodes(name, nodes): | |
1813 | if not extranodes or name not in extranodes: |
|
1813 | if not extranodes or name not in extranodes: | |
1814 | return |
|
1814 | return | |
1815 |
|
1815 | |||
1816 | for node, linknode in extranodes[name]: |
|
1816 | for node, linknode in extranodes[name]: | |
1817 | if node not in nodes: |
|
1817 | if node not in nodes: | |
1818 | nodes[node] = linknode |
|
1818 | nodes[node] = linknode | |
1819 |
|
1819 | |||
1820 | # Now that we have all theses utility functions to help out and |
|
1820 | # Now that we have all theses utility functions to help out and | |
1821 | # logically divide up the task, generate the group. |
|
1821 | # logically divide up the task, generate the group. | |
1822 | def gengroup(): |
|
1822 | def gengroup(): | |
1823 | # The set of changed files starts empty. |
|
1823 | # The set of changed files starts empty. | |
1824 | changedfiles = {} |
|
1824 | changedfiles = {} | |
1825 | # Create a changenode group generator that will call our functions |
|
1825 | # Create a changenode group generator that will call our functions | |
1826 | # back to lookup the owning changenode and collect information. |
|
1826 | # back to lookup the owning changenode and collect information. | |
1827 | group = cl.group(msng_cl_lst, identity, |
|
1827 | group = cl.group(msng_cl_lst, identity, | |
1828 | manifest_and_file_collector(changedfiles)) |
|
1828 | manifest_and_file_collector(changedfiles)) | |
1829 | for chnk in group: |
|
1829 | for chnk in group: | |
1830 | yield chnk |
|
1830 | yield chnk | |
1831 |
|
1831 | |||
1832 | # The list of manifests has been collected by the generator |
|
1832 | # The list of manifests has been collected by the generator | |
1833 | # calling our functions back. |
|
1833 | # calling our functions back. | |
1834 | prune_manifests() |
|
1834 | prune_manifests() | |
1835 | add_extra_nodes(1, msng_mnfst_set) |
|
1835 | add_extra_nodes(1, msng_mnfst_set) | |
1836 | msng_mnfst_lst = msng_mnfst_set.keys() |
|
1836 | msng_mnfst_lst = msng_mnfst_set.keys() | |
1837 | # Sort the manifestnodes by revision number. |
|
1837 | # Sort the manifestnodes by revision number. | |
1838 | msng_mnfst_lst.sort(cmp_by_rev_func(mnfst)) |
|
1838 | msng_mnfst_lst.sort(cmp_by_rev_func(mnfst)) | |
1839 | # Create a generator for the manifestnodes that calls our lookup |
|
1839 | # Create a generator for the manifestnodes that calls our lookup | |
1840 | # and data collection functions back. |
|
1840 | # and data collection functions back. | |
1841 | group = mnfst.group(msng_mnfst_lst, lookup_manifest_link, |
|
1841 | group = mnfst.group(msng_mnfst_lst, lookup_manifest_link, | |
1842 | filenode_collector(changedfiles)) |
|
1842 | filenode_collector(changedfiles)) | |
1843 | for chnk in group: |
|
1843 | for chnk in group: | |
1844 | yield chnk |
|
1844 | yield chnk | |
1845 |
|
1845 | |||
1846 | # These are no longer needed, dereference and toss the memory for |
|
1846 | # These are no longer needed, dereference and toss the memory for | |
1847 | # them. |
|
1847 | # them. | |
1848 | msng_mnfst_lst = None |
|
1848 | msng_mnfst_lst = None | |
1849 | msng_mnfst_set.clear() |
|
1849 | msng_mnfst_set.clear() | |
1850 |
|
1850 | |||
1851 | if extranodes: |
|
1851 | if extranodes: | |
1852 | for fname in extranodes: |
|
1852 | for fname in extranodes: | |
1853 | if isinstance(fname, int): |
|
1853 | if isinstance(fname, int): | |
1854 | continue |
|
1854 | continue | |
1855 | msng_filenode_set.setdefault(fname, {}) |
|
1855 | msng_filenode_set.setdefault(fname, {}) | |
1856 | changedfiles[fname] = 1 |
|
1856 | changedfiles[fname] = 1 | |
1857 | # Go through all our files in order sorted by name. |
|
1857 | # Go through all our files in order sorted by name. | |
1858 | for fname in util.sort(changedfiles): |
|
1858 | for fname in util.sort(changedfiles): | |
1859 | filerevlog = self.file(fname) |
|
1859 | filerevlog = self.file(fname) | |
1860 | if not len(filerevlog): |
|
1860 | if not len(filerevlog): | |
1861 | raise util.Abort(_("empty or missing revlog for %s") % fname) |
|
1861 | raise util.Abort(_("empty or missing revlog for %s") % fname) | |
1862 | # Toss out the filenodes that the recipient isn't really |
|
1862 | # Toss out the filenodes that the recipient isn't really | |
1863 | # missing. |
|
1863 | # missing. | |
1864 | if fname in msng_filenode_set: |
|
1864 | if fname in msng_filenode_set: | |
1865 | prune_filenodes(fname, filerevlog) |
|
1865 | prune_filenodes(fname, filerevlog) | |
1866 | add_extra_nodes(fname, msng_filenode_set[fname]) |
|
1866 | add_extra_nodes(fname, msng_filenode_set[fname]) | |
1867 | msng_filenode_lst = msng_filenode_set[fname].keys() |
|
1867 | msng_filenode_lst = msng_filenode_set[fname].keys() | |
1868 | else: |
|
1868 | else: | |
1869 | msng_filenode_lst = [] |
|
1869 | msng_filenode_lst = [] | |
1870 | # If any filenodes are left, generate the group for them, |
|
1870 | # If any filenodes are left, generate the group for them, | |
1871 | # otherwise don't bother. |
|
1871 | # otherwise don't bother. | |
1872 | if len(msng_filenode_lst) > 0: |
|
1872 | if len(msng_filenode_lst) > 0: | |
1873 | yield changegroup.chunkheader(len(fname)) |
|
1873 | yield changegroup.chunkheader(len(fname)) | |
1874 | yield fname |
|
1874 | yield fname | |
1875 | # Sort the filenodes by their revision # |
|
1875 | # Sort the filenodes by their revision # | |
1876 | msng_filenode_lst.sort(cmp_by_rev_func(filerevlog)) |
|
1876 | msng_filenode_lst.sort(cmp_by_rev_func(filerevlog)) | |
1877 | # Create a group generator and only pass in a changenode |
|
1877 | # Create a group generator and only pass in a changenode | |
1878 | # lookup function as we need to collect no information |
|
1878 | # lookup function as we need to collect no information | |
1879 | # from filenodes. |
|
1879 | # from filenodes. | |
1880 | group = filerevlog.group(msng_filenode_lst, |
|
1880 | group = filerevlog.group(msng_filenode_lst, | |
1881 | lookup_filenode_link_func(fname)) |
|
1881 | lookup_filenode_link_func(fname)) | |
1882 | for chnk in group: |
|
1882 | for chnk in group: | |
1883 | yield chnk |
|
1883 | yield chnk | |
1884 | if fname in msng_filenode_set: |
|
1884 | if fname in msng_filenode_set: | |
1885 | # Don't need this anymore, toss it to free memory. |
|
1885 | # Don't need this anymore, toss it to free memory. | |
1886 | del msng_filenode_set[fname] |
|
1886 | del msng_filenode_set[fname] | |
1887 | # Signal that no more groups are left. |
|
1887 | # Signal that no more groups are left. | |
1888 | yield changegroup.closechunk() |
|
1888 | yield changegroup.closechunk() | |
1889 |
|
1889 | |||
1890 | if msng_cl_lst: |
|
1890 | if msng_cl_lst: | |
1891 | self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source) |
|
1891 | self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source) | |
1892 |
|
1892 | |||
1893 | return util.chunkbuffer(gengroup()) |
|
1893 | return util.chunkbuffer(gengroup()) | |
1894 |
|
1894 | |||
1895 | def changegroup(self, basenodes, source): |
|
1895 | def changegroup(self, basenodes, source): | |
1896 | # to avoid a race we use changegroupsubset() (issue1320) |
|
1896 | # to avoid a race we use changegroupsubset() (issue1320) | |
1897 | return self.changegroupsubset(basenodes, self.heads(), source) |
|
1897 | return self.changegroupsubset(basenodes, self.heads(), source) | |
1898 |
|
1898 | |||
1899 | def _changegroup(self, common, source): |
|
1899 | def _changegroup(self, common, source): | |
1900 | """Generate a changegroup of all nodes that we have that a recipient |
|
1900 | """Generate a changegroup of all nodes that we have that a recipient | |
1901 | doesn't. |
|
1901 | doesn't. | |
1902 |
|
1902 | |||
1903 | This is much easier than the previous function as we can assume that |
|
1903 | This is much easier than the previous function as we can assume that | |
1904 | the recipient has any changenode we aren't sending them. |
|
1904 | the recipient has any changenode we aren't sending them. | |
1905 |
|
1905 | |||
1906 | common is the set of common nodes between remote and self""" |
|
1906 | common is the set of common nodes between remote and self""" | |
1907 |
|
1907 | |||
1908 | self.hook('preoutgoing', throw=True, source=source) |
|
1908 | self.hook('preoutgoing', throw=True, source=source) | |
1909 |
|
1909 | |||
1910 | cl = self.changelog |
|
1910 | cl = self.changelog | |
1911 | nodes = cl.findmissing(common) |
|
1911 | nodes = cl.findmissing(common) | |
1912 | revset = dict.fromkeys([cl.rev(n) for n in nodes]) |
|
1912 | revset = dict.fromkeys([cl.rev(n) for n in nodes]) | |
1913 | self.changegroupinfo(nodes, source) |
|
1913 | self.changegroupinfo(nodes, source) | |
1914 |
|
1914 | |||
1915 | def identity(x): |
|
1915 | def identity(x): | |
1916 | return x |
|
1916 | return x | |
1917 |
|
1917 | |||
1918 | def gennodelst(log): |
|
1918 | def gennodelst(log): | |
1919 | for r in log: |
|
1919 | for r in log: | |
1920 | if log.linkrev(r) in revset: |
|
1920 | if log.linkrev(r) in revset: | |
1921 | yield log.node(r) |
|
1921 | yield log.node(r) | |
1922 |
|
1922 | |||
1923 | def changed_file_collector(changedfileset): |
|
1923 | def changed_file_collector(changedfileset): | |
1924 | def collect_changed_files(clnode): |
|
1924 | def collect_changed_files(clnode): | |
1925 | c = cl.read(clnode) |
|
1925 | c = cl.read(clnode) | |
1926 | for fname in c[3]: |
|
1926 | for fname in c[3]: | |
1927 | changedfileset[fname] = 1 |
|
1927 | changedfileset[fname] = 1 | |
1928 | return collect_changed_files |
|
1928 | return collect_changed_files | |
1929 |
|
1929 | |||
1930 | def lookuprevlink_func(revlog): |
|
1930 | def lookuprevlink_func(revlog): | |
1931 | def lookuprevlink(n): |
|
1931 | def lookuprevlink(n): | |
1932 | return cl.node(revlog.linkrev(revlog.rev(n))) |
|
1932 | return cl.node(revlog.linkrev(revlog.rev(n))) | |
1933 | return lookuprevlink |
|
1933 | return lookuprevlink | |
1934 |
|
1934 | |||
1935 | def gengroup(): |
|
1935 | def gengroup(): | |
1936 | # construct a list of all changed files |
|
1936 | # construct a list of all changed files | |
1937 | changedfiles = {} |
|
1937 | changedfiles = {} | |
1938 |
|
1938 | |||
1939 | for chnk in cl.group(nodes, identity, |
|
1939 | for chnk in cl.group(nodes, identity, | |
1940 | changed_file_collector(changedfiles)): |
|
1940 | changed_file_collector(changedfiles)): | |
1941 | yield chnk |
|
1941 | yield chnk | |
1942 |
|
1942 | |||
1943 | mnfst = self.manifest |
|
1943 | mnfst = self.manifest | |
1944 | nodeiter = gennodelst(mnfst) |
|
1944 | nodeiter = gennodelst(mnfst) | |
1945 | for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)): |
|
1945 | for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)): | |
1946 | yield chnk |
|
1946 | yield chnk | |
1947 |
|
1947 | |||
1948 | for fname in util.sort(changedfiles): |
|
1948 | for fname in util.sort(changedfiles): | |
1949 | filerevlog = self.file(fname) |
|
1949 | filerevlog = self.file(fname) | |
1950 | if not len(filerevlog): |
|
1950 | if not len(filerevlog): | |
1951 | raise util.Abort(_("empty or missing revlog for %s") % fname) |
|
1951 | raise util.Abort(_("empty or missing revlog for %s") % fname) | |
1952 | nodeiter = gennodelst(filerevlog) |
|
1952 | nodeiter = gennodelst(filerevlog) | |
1953 | nodeiter = list(nodeiter) |
|
1953 | nodeiter = list(nodeiter) | |
1954 | if nodeiter: |
|
1954 | if nodeiter: | |
1955 | yield changegroup.chunkheader(len(fname)) |
|
1955 | yield changegroup.chunkheader(len(fname)) | |
1956 | yield fname |
|
1956 | yield fname | |
1957 | lookup = lookuprevlink_func(filerevlog) |
|
1957 | lookup = lookuprevlink_func(filerevlog) | |
1958 | for chnk in filerevlog.group(nodeiter, lookup): |
|
1958 | for chnk in filerevlog.group(nodeiter, lookup): | |
1959 | yield chnk |
|
1959 | yield chnk | |
1960 |
|
1960 | |||
1961 | yield changegroup.closechunk() |
|
1961 | yield changegroup.closechunk() | |
1962 |
|
1962 | |||
1963 | if nodes: |
|
1963 | if nodes: | |
1964 | self.hook('outgoing', node=hex(nodes[0]), source=source) |
|
1964 | self.hook('outgoing', node=hex(nodes[0]), source=source) | |
1965 |
|
1965 | |||
1966 | return util.chunkbuffer(gengroup()) |
|
1966 | return util.chunkbuffer(gengroup()) | |
1967 |
|
1967 | |||
1968 | def addchangegroup(self, source, srctype, url, emptyok=False): |
|
1968 | def addchangegroup(self, source, srctype, url, emptyok=False): | |
1969 | """add changegroup to repo. |
|
1969 | """add changegroup to repo. | |
1970 |
|
1970 | |||
1971 | return values: |
|
1971 | return values: | |
1972 | - nothing changed or no source: 0 |
|
1972 | - nothing changed or no source: 0 | |
1973 | - more heads than before: 1+added heads (2..n) |
|
1973 | - more heads than before: 1+added heads (2..n) | |
1974 | - less heads than before: -1-removed heads (-2..-n) |
|
1974 | - less heads than before: -1-removed heads (-2..-n) | |
1975 | - number of heads stays the same: 1 |
|
1975 | - number of heads stays the same: 1 | |
1976 | """ |
|
1976 | """ | |
1977 | def csmap(x): |
|
1977 | def csmap(x): | |
1978 | self.ui.debug(_("add changeset %s\n") % short(x)) |
|
1978 | self.ui.debug(_("add changeset %s\n") % short(x)) | |
1979 | return len(cl) |
|
1979 | return len(cl) | |
1980 |
|
1980 | |||
1981 | def revmap(x): |
|
1981 | def revmap(x): | |
1982 | return cl.rev(x) |
|
1982 | return cl.rev(x) | |
1983 |
|
1983 | |||
1984 | if not source: |
|
1984 | if not source: | |
1985 | return 0 |
|
1985 | return 0 | |
1986 |
|
1986 | |||
1987 | self.hook('prechangegroup', throw=True, source=srctype, url=url) |
|
1987 | self.hook('prechangegroup', throw=True, source=srctype, url=url) | |
1988 |
|
1988 | |||
1989 | changesets = files = revisions = 0 |
|
1989 | changesets = files = revisions = 0 | |
1990 |
|
1990 | |||
1991 | # write changelog data to temp files so concurrent readers will not see |
|
1991 | # write changelog data to temp files so concurrent readers will not see | |
1992 | # inconsistent view |
|
1992 | # inconsistent view | |
1993 | cl = self.changelog |
|
1993 | cl = self.changelog | |
1994 | cl.delayupdate() |
|
1994 | cl.delayupdate() | |
1995 | oldheads = len(cl.heads()) |
|
1995 | oldheads = len(cl.heads()) | |
1996 |
|
1996 | |||
1997 | tr = self.transaction() |
|
1997 | tr = self.transaction() | |
1998 | try: |
|
1998 | try: | |
1999 | trp = weakref.proxy(tr) |
|
1999 | trp = weakref.proxy(tr) | |
2000 | # pull off the changeset group |
|
2000 | # pull off the changeset group | |
2001 | self.ui.status(_("adding changesets\n")) |
|
2001 | self.ui.status(_("adding changesets\n")) | |
2002 | cor = len(cl) - 1 |
|
2002 | cor = len(cl) - 1 | |
2003 | chunkiter = changegroup.chunkiter(source) |
|
2003 | chunkiter = changegroup.chunkiter(source) | |
2004 | if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok: |
|
2004 | if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok: | |
2005 | raise util.Abort(_("received changelog group is empty")) |
|
2005 | raise util.Abort(_("received changelog group is empty")) | |
2006 | cnr = len(cl) - 1 |
|
2006 | cnr = len(cl) - 1 | |
2007 | changesets = cnr - cor |
|
2007 | changesets = cnr - cor | |
2008 |
|
2008 | |||
2009 | # pull off the manifest group |
|
2009 | # pull off the manifest group | |
2010 | self.ui.status(_("adding manifests\n")) |
|
2010 | self.ui.status(_("adding manifests\n")) | |
2011 | chunkiter = changegroup.chunkiter(source) |
|
2011 | chunkiter = changegroup.chunkiter(source) | |
2012 | # no need to check for empty manifest group here: |
|
2012 | # no need to check for empty manifest group here: | |
2013 | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
|
2013 | # if the result of the merge of 1 and 2 is the same in 3 and 4, | |
2014 | # no new manifest will be created and the manifest group will |
|
2014 | # no new manifest will be created and the manifest group will | |
2015 | # be empty during the pull |
|
2015 | # be empty during the pull | |
2016 | self.manifest.addgroup(chunkiter, revmap, trp) |
|
2016 | self.manifest.addgroup(chunkiter, revmap, trp) | |
2017 |
|
2017 | |||
2018 | # process the files |
|
2018 | # process the files | |
2019 | self.ui.status(_("adding file changes\n")) |
|
2019 | self.ui.status(_("adding file changes\n")) | |
2020 | while 1: |
|
2020 | while 1: | |
2021 | f = changegroup.getchunk(source) |
|
2021 | f = changegroup.getchunk(source) | |
2022 | if not f: |
|
2022 | if not f: | |
2023 | break |
|
2023 | break | |
2024 | self.ui.debug(_("adding %s revisions\n") % f) |
|
2024 | self.ui.debug(_("adding %s revisions\n") % f) | |
2025 | fl = self.file(f) |
|
2025 | fl = self.file(f) | |
2026 | o = len(fl) |
|
2026 | o = len(fl) | |
2027 | chunkiter = changegroup.chunkiter(source) |
|
2027 | chunkiter = changegroup.chunkiter(source) | |
2028 | if fl.addgroup(chunkiter, revmap, trp) is None: |
|
2028 | if fl.addgroup(chunkiter, revmap, trp) is None: | |
2029 | raise util.Abort(_("received file revlog group is empty")) |
|
2029 | raise util.Abort(_("received file revlog group is empty")) | |
2030 | revisions += len(fl) - o |
|
2030 | revisions += len(fl) - o | |
2031 | files += 1 |
|
2031 | files += 1 | |
2032 |
|
2032 | |||
2033 | # make changelog see real files again |
|
2033 | # make changelog see real files again | |
2034 | cl.finalize(trp) |
|
2034 | cl.finalize(trp) | |
2035 |
|
2035 | |||
2036 | newheads = len(self.changelog.heads()) |
|
2036 | newheads = len(self.changelog.heads()) | |
2037 | heads = "" |
|
2037 | heads = "" | |
2038 | if oldheads and newheads != oldheads: |
|
2038 | if oldheads and newheads != oldheads: | |
2039 | heads = _(" (%+d heads)") % (newheads - oldheads) |
|
2039 | heads = _(" (%+d heads)") % (newheads - oldheads) | |
2040 |
|
2040 | |||
2041 | self.ui.status(_("added %d changesets" |
|
2041 | self.ui.status(_("added %d changesets" | |
2042 | " with %d changes to %d files%s\n") |
|
2042 | " with %d changes to %d files%s\n") | |
2043 | % (changesets, revisions, files, heads)) |
|
2043 | % (changesets, revisions, files, heads)) | |
2044 |
|
2044 | |||
2045 | if changesets > 0: |
|
2045 | if changesets > 0: | |
2046 | self.hook('pretxnchangegroup', throw=True, |
|
2046 | self.hook('pretxnchangegroup', throw=True, | |
2047 | node=hex(self.changelog.node(cor+1)), source=srctype, |
|
2047 | node=hex(self.changelog.node(cor+1)), source=srctype, | |
2048 | url=url) |
|
2048 | url=url) | |
2049 |
|
2049 | |||
2050 | tr.close() |
|
2050 | tr.close() | |
2051 | finally: |
|
2051 | finally: | |
2052 | del tr |
|
2052 | del tr | |
2053 |
|
2053 | |||
2054 | if changesets > 0: |
|
2054 | if changesets > 0: | |
2055 | # forcefully update the on-disk branch cache |
|
2055 | # forcefully update the on-disk branch cache | |
2056 | self.ui.debug(_("updating the branch cache\n")) |
|
2056 | self.ui.debug(_("updating the branch cache\n")) | |
2057 | self.branchtags() |
|
2057 | self.branchtags() | |
2058 | self.hook("changegroup", node=hex(self.changelog.node(cor+1)), |
|
2058 | self.hook("changegroup", node=hex(self.changelog.node(cor+1)), | |
2059 | source=srctype, url=url) |
|
2059 | source=srctype, url=url) | |
2060 |
|
2060 | |||
2061 | for i in xrange(cor + 1, cnr + 1): |
|
2061 | for i in xrange(cor + 1, cnr + 1): | |
2062 | self.hook("incoming", node=hex(self.changelog.node(i)), |
|
2062 | self.hook("incoming", node=hex(self.changelog.node(i)), | |
2063 | source=srctype, url=url) |
|
2063 | source=srctype, url=url) | |
2064 |
|
2064 | |||
2065 | # never return 0 here: |
|
2065 | # never return 0 here: | |
2066 | if newheads < oldheads: |
|
2066 | if newheads < oldheads: | |
2067 | return newheads - oldheads - 1 |
|
2067 | return newheads - oldheads - 1 | |
2068 | else: |
|
2068 | else: | |
2069 | return newheads - oldheads + 1 |
|
2069 | return newheads - oldheads + 1 | |
2070 |
|
2070 | |||
2071 |
|
2071 | |||
2072 | def stream_in(self, remote): |
|
2072 | def stream_in(self, remote): | |
2073 | fp = remote.stream_out() |
|
2073 | fp = remote.stream_out() | |
2074 | l = fp.readline() |
|
2074 | l = fp.readline() | |
2075 | try: |
|
2075 | try: | |
2076 | resp = int(l) |
|
2076 | resp = int(l) | |
2077 | except ValueError: |
|
2077 | except ValueError: | |
2078 | raise util.UnexpectedOutput( |
|
2078 | raise util.UnexpectedOutput( | |
2079 | _('Unexpected response from remote server:'), l) |
|
2079 | _('Unexpected response from remote server:'), l) | |
2080 | if resp == 1: |
|
2080 | if resp == 1: | |
2081 | raise util.Abort(_('operation forbidden by server')) |
|
2081 | raise util.Abort(_('operation forbidden by server')) | |
2082 | elif resp == 2: |
|
2082 | elif resp == 2: | |
2083 | raise util.Abort(_('locking the remote repository failed')) |
|
2083 | raise util.Abort(_('locking the remote repository failed')) | |
2084 | elif resp != 0: |
|
2084 | elif resp != 0: | |
2085 | raise util.Abort(_('the server sent an unknown error code')) |
|
2085 | raise util.Abort(_('the server sent an unknown error code')) | |
2086 | self.ui.status(_('streaming all changes\n')) |
|
2086 | self.ui.status(_('streaming all changes\n')) | |
2087 | l = fp.readline() |
|
2087 | l = fp.readline() | |
2088 | try: |
|
2088 | try: | |
2089 | total_files, total_bytes = map(int, l.split(' ', 1)) |
|
2089 | total_files, total_bytes = map(int, l.split(' ', 1)) | |
2090 | except (ValueError, TypeError): |
|
2090 | except (ValueError, TypeError): | |
2091 | raise util.UnexpectedOutput( |
|
2091 | raise util.UnexpectedOutput( | |
2092 | _('Unexpected response from remote server:'), l) |
|
2092 | _('Unexpected response from remote server:'), l) | |
2093 | self.ui.status(_('%d files to transfer, %s of data\n') % |
|
2093 | self.ui.status(_('%d files to transfer, %s of data\n') % | |
2094 | (total_files, util.bytecount(total_bytes))) |
|
2094 | (total_files, util.bytecount(total_bytes))) | |
2095 | start = time.time() |
|
2095 | start = time.time() | |
2096 | for i in xrange(total_files): |
|
2096 | for i in xrange(total_files): | |
2097 | # XXX doesn't support '\n' or '\r' in filenames |
|
2097 | # XXX doesn't support '\n' or '\r' in filenames | |
2098 | l = fp.readline() |
|
2098 | l = fp.readline() | |
2099 | try: |
|
2099 | try: | |
2100 | name, size = l.split('\0', 1) |
|
2100 | name, size = l.split('\0', 1) | |
2101 | size = int(size) |
|
2101 | size = int(size) | |
2102 | except (ValueError, TypeError): |
|
2102 | except (ValueError, TypeError): | |
2103 | raise util.UnexpectedOutput( |
|
2103 | raise util.UnexpectedOutput( | |
2104 | _('Unexpected response from remote server:'), l) |
|
2104 | _('Unexpected response from remote server:'), l) | |
2105 | self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size))) |
|
2105 | self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size))) | |
2106 | ofp = self.sopener(name, 'w') |
|
2106 | ofp = self.sopener(name, 'w') | |
2107 | for chunk in util.filechunkiter(fp, limit=size): |
|
2107 | for chunk in util.filechunkiter(fp, limit=size): | |
2108 | ofp.write(chunk) |
|
2108 | ofp.write(chunk) | |
2109 | ofp.close() |
|
2109 | ofp.close() | |
2110 | elapsed = time.time() - start |
|
2110 | elapsed = time.time() - start | |
2111 | if elapsed <= 0: |
|
2111 | if elapsed <= 0: | |
2112 | elapsed = 0.001 |
|
2112 | elapsed = 0.001 | |
2113 | self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % |
|
2113 | self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % | |
2114 | (util.bytecount(total_bytes), elapsed, |
|
2114 | (util.bytecount(total_bytes), elapsed, | |
2115 | util.bytecount(total_bytes / elapsed))) |
|
2115 | util.bytecount(total_bytes / elapsed))) | |
2116 | self.invalidate() |
|
2116 | self.invalidate() | |
2117 | return len(self.heads()) + 1 |
|
2117 | return len(self.heads()) + 1 | |
2118 |
|
2118 | |||
2119 | def clone(self, remote, heads=[], stream=False): |
|
2119 | def clone(self, remote, heads=[], stream=False): | |
2120 | '''clone remote repository. |
|
2120 | '''clone remote repository. | |
2121 |
|
2121 | |||
2122 | keyword arguments: |
|
2122 | keyword arguments: | |
2123 | heads: list of revs to clone (forces use of pull) |
|
2123 | heads: list of revs to clone (forces use of pull) | |
2124 | stream: use streaming clone if possible''' |
|
2124 | stream: use streaming clone if possible''' | |
2125 |
|
2125 | |||
2126 | # now, all clients that can request uncompressed clones can |
|
2126 | # now, all clients that can request uncompressed clones can | |
2127 | # read repo formats supported by all servers that can serve |
|
2127 | # read repo formats supported by all servers that can serve | |
2128 | # them. |
|
2128 | # them. | |
2129 |
|
2129 | |||
2130 | # if revlog format changes, client will have to check version |
|
2130 | # if revlog format changes, client will have to check version | |
2131 | # and format flags on "stream" capability, and use |
|
2131 | # and format flags on "stream" capability, and use | |
2132 | # uncompressed only if compatible. |
|
2132 | # uncompressed only if compatible. | |
2133 |
|
2133 | |||
2134 | if stream and not heads and remote.capable('stream'): |
|
2134 | if stream and not heads and remote.capable('stream'): | |
2135 | return self.stream_in(remote) |
|
2135 | return self.stream_in(remote) | |
2136 | return self.pull(remote, heads) |
|
2136 | return self.pull(remote, heads) | |
2137 |
|
2137 | |||
2138 | # used to avoid circular references so destructors work |
|
2138 | # used to avoid circular references so destructors work | |
2139 | def aftertrans(files): |
|
2139 | def aftertrans(files): | |
2140 | renamefiles = [tuple(t) for t in files] |
|
2140 | renamefiles = [tuple(t) for t in files] | |
2141 | def a(): |
|
2141 | def a(): | |
2142 | for src, dest in renamefiles: |
|
2142 | for src, dest in renamefiles: | |
2143 | util.rename(src, dest) |
|
2143 | util.rename(src, dest) | |
2144 | return a |
|
2144 | return a | |
2145 |
|
2145 | |||
2146 | def instance(ui, path, create): |
|
2146 | def instance(ui, path, create): | |
2147 | return localrepository(ui, util.drop_scheme('file', path), create) |
|
2147 | return localrepository(ui, util.drop_scheme('file', path), create) | |
2148 |
|
2148 | |||
2149 | def islocal(path): |
|
2149 | def islocal(path): | |
2150 | return True |
|
2150 | return True |
General Comments 0
You need to be logged in to leave comments.
Login now