Show More
@@ -1,1286 +1,1308 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | # queue.py - patch queues for mercurial |
|
2 | # queue.py - patch queues for mercurial | |
3 | # |
|
3 | # | |
4 | # Copyright 2005 Chris Mason <mason@suse.com> |
|
4 | # Copyright 2005 Chris Mason <mason@suse.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms |
|
6 | # This software may be used and distributed according to the terms | |
7 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | # of the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 | from mercurial.demandload import * |
|
9 | from mercurial.demandload import * | |
10 | demandload(globals(), "os sys re struct traceback errno bz2") |
|
10 | demandload(globals(), "os sys re struct traceback errno bz2") | |
11 | from mercurial.i18n import gettext as _ |
|
11 | from mercurial.i18n import gettext as _ | |
12 | from mercurial import ui, hg, revlog, commands, util |
|
12 | from mercurial import ui, hg, revlog, commands, util | |
13 |
|
13 | |||
14 | versionstr = "0.45" |
|
14 | versionstr = "0.45" | |
15 |
|
15 | |||
16 | repomap = {} |
|
16 | repomap = {} | |
17 |
|
17 | |||
18 | class queue: |
|
18 | class queue: | |
19 | def __init__(self, ui, path, patchdir=None): |
|
19 | def __init__(self, ui, path, patchdir=None): | |
20 | self.opener = util.opener(path) |
|
20 | self.opener = util.opener(path) | |
21 | self.basepath = path |
|
21 | self.basepath = path | |
22 | if patchdir: |
|
22 | if patchdir: | |
23 | self.path = patchdir |
|
23 | self.path = patchdir | |
24 | else: |
|
24 | else: | |
25 | self.path = os.path.join(path, "patches") |
|
25 | self.path = os.path.join(path, "patches") | |
26 | self.ui = ui |
|
26 | self.ui = ui | |
27 | self.applied = [] |
|
27 | self.applied = [] | |
28 | self.full_series = [] |
|
28 | self.full_series = [] | |
29 | self.applied_dirty = 0 |
|
29 | self.applied_dirty = 0 | |
30 | self.series_dirty = 0 |
|
30 | self.series_dirty = 0 | |
31 | self.series_path = os.path.join(self.path, "series") |
|
31 | self.series_path = os.path.join(self.path, "series") | |
32 | self.status_path = os.path.join(self.path, "status") |
|
32 | self.status_path = os.path.join(self.path, "status") | |
33 |
|
33 | |||
34 | s = self.series_path |
|
34 | s = self.series_path | |
35 | if os.path.exists(s): |
|
35 | if os.path.exists(s): | |
36 | self.full_series = self.opener(s).read().splitlines() |
|
36 | self.full_series = self.opener(s).read().splitlines() | |
37 | self.read_series(self.full_series) |
|
37 | self.read_series(self.full_series) | |
38 |
|
38 | |||
39 | s = self.status_path |
|
39 | s = self.status_path | |
40 | if os.path.exists(s): |
|
40 | if os.path.exists(s): | |
41 | self.applied = self.opener(s).read().splitlines() |
|
41 | self.applied = self.opener(s).read().splitlines() | |
42 |
|
42 | |||
43 | def find_series(self, patch): |
|
43 | def find_series(self, patch): | |
44 | pre = re.compile("(\s*)([^#]+)") |
|
44 | pre = re.compile("(\s*)([^#]+)") | |
45 | index = 0 |
|
45 | index = 0 | |
46 | for l in self.full_series: |
|
46 | for l in self.full_series: | |
47 | m = pre.match(l) |
|
47 | m = pre.match(l) | |
48 | if m: |
|
48 | if m: | |
49 | s = m.group(2) |
|
49 | s = m.group(2) | |
50 | s = s.rstrip() |
|
50 | s = s.rstrip() | |
51 | if s == patch: |
|
51 | if s == patch: | |
52 | return index |
|
52 | return index | |
53 | index += 1 |
|
53 | index += 1 | |
54 | return None |
|
54 | return None | |
55 |
|
55 | |||
56 | def read_series(self, list): |
|
56 | def read_series(self, list): | |
57 | def matcher(list): |
|
57 | def matcher(list): | |
58 | pre = re.compile("(\s*)([^#]+)") |
|
58 | pre = re.compile("(\s*)([^#]+)") | |
59 | for l in list: |
|
59 | for l in list: | |
60 | m = pre.match(l) |
|
60 | m = pre.match(l) | |
61 | if m: |
|
61 | if m: | |
62 | s = m.group(2) |
|
62 | s = m.group(2) | |
63 | s = s.rstrip() |
|
63 | s = s.rstrip() | |
64 | if len(s) > 0: |
|
64 | if len(s) > 0: | |
65 | yield s |
|
65 | yield s | |
66 | self.series = [] |
|
66 | self.series = [] | |
67 | self.series = [ x for x in matcher(list) ] |
|
67 | self.series = [ x for x in matcher(list) ] | |
68 |
|
68 | |||
69 | def save_dirty(self): |
|
69 | def save_dirty(self): | |
70 | if self.applied_dirty: |
|
70 | if self.applied_dirty: | |
71 | if len(self.applied) > 0: |
|
71 | if len(self.applied) > 0: | |
72 | nl = "\n" |
|
72 | nl = "\n" | |
73 | else: |
|
73 | else: | |
74 | nl = "" |
|
74 | nl = "" | |
75 | f = self.opener(self.status_path, "w") |
|
75 | f = self.opener(self.status_path, "w") | |
76 | f.write("\n".join(self.applied) + nl) |
|
76 | f.write("\n".join(self.applied) + nl) | |
77 | if self.series_dirty: |
|
77 | if self.series_dirty: | |
78 | if len(self.full_series) > 0: |
|
78 | if len(self.full_series) > 0: | |
79 | nl = "\n" |
|
79 | nl = "\n" | |
80 | else: |
|
80 | else: | |
81 | nl = "" |
|
81 | nl = "" | |
82 | f = self.opener(self.series_path, "w") |
|
82 | f = self.opener(self.series_path, "w") | |
83 | f.write("\n".join(self.full_series) + nl) |
|
83 | f.write("\n".join(self.full_series) + nl) | |
84 |
|
84 | |||
85 | def readheaders(self, patch): |
|
85 | def readheaders(self, patch): | |
86 | def eatdiff(lines): |
|
86 | def eatdiff(lines): | |
87 | while lines: |
|
87 | while lines: | |
88 | l = lines[-1] |
|
88 | l = lines[-1] | |
89 |
if l.startswith("diff -") or |
|
89 | if (l.startswith("diff -") or | |
90 |
l.startswith("Index:") or |
|
90 | l.startswith("Index:") or | |
91 | l.startswith("==========="): |
|
91 | l.startswith("===========")): | |
92 | del lines[-1] |
|
92 | del lines[-1] | |
93 | else: |
|
93 | else: | |
94 | break |
|
94 | break | |
95 | def eatempty(lines): |
|
95 | def eatempty(lines): | |
96 | while lines: |
|
96 | while lines: | |
97 | l = lines[-1] |
|
97 | l = lines[-1] | |
98 | if re.match('\s*$', l): |
|
98 | if re.match('\s*$', l): | |
99 | del lines[-1] |
|
99 | del lines[-1] | |
100 | else: |
|
100 | else: | |
101 | break |
|
101 | break | |
102 |
|
102 | |||
103 | pf = os.path.join(self.path, patch) |
|
103 | pf = os.path.join(self.path, patch) | |
104 | message = [] |
|
104 | message = [] | |
105 | comments = [] |
|
105 | comments = [] | |
106 | user = None |
|
106 | user = None | |
107 | format = None |
|
107 | format = None | |
108 | subject = None |
|
108 | subject = None | |
109 | diffstart = 0 |
|
109 | diffstart = 0 | |
110 |
|
110 | |||
111 | for line in file(pf): |
|
111 | for line in file(pf): | |
112 | line = line.rstrip() |
|
112 | line = line.rstrip() | |
113 | if diffstart: |
|
113 | if diffstart: | |
114 | if line.startswith('+++ '): |
|
114 | if line.startswith('+++ '): | |
115 | diffstart = 2 |
|
115 | diffstart = 2 | |
116 | break |
|
116 | break | |
117 | if line.startswith("--- "): |
|
117 | if line.startswith("--- "): | |
118 | diffstart = 1 |
|
118 | diffstart = 1 | |
119 | continue |
|
119 | continue | |
120 | elif format == "hgpatch": |
|
120 | elif format == "hgpatch": | |
121 | # parse values when importing the result of an hg export |
|
121 | # parse values when importing the result of an hg export | |
122 | if line.startswith("# User "): |
|
122 | if line.startswith("# User "): | |
123 | user = line[7:] |
|
123 | user = line[7:] | |
124 | elif not line.startswith("# ") and line: |
|
124 | elif not line.startswith("# ") and line: | |
125 | message.append(line) |
|
125 | message.append(line) | |
126 | format = None |
|
126 | format = None | |
127 | elif line == '# HG changeset patch': |
|
127 | elif line == '# HG changeset patch': | |
128 | format = "hgpatch" |
|
128 | format = "hgpatch" | |
129 |
elif format != "tagdone" and |
|
129 | elif (format != "tagdone" and (line.startswith("Subject: ") or | |
130 |
|
|
130 | line.startswith("subject: "))): | |
131 | line.startswith("subject: ")): |
|
|||
132 | subject = line[9:] |
|
131 | subject = line[9:] | |
133 | format = "tag" |
|
132 | format = "tag" | |
134 |
elif format != "tagdone" and |
|
133 | elif (format != "tagdone" and (line.startswith("From: ") or | |
135 |
|
|
134 | line.startswith("from: "))): | |
136 | line.startswith("from: ")): |
|
|||
137 | user = line[6:] |
|
135 | user = line[6:] | |
138 | format = "tag" |
|
136 | format = "tag" | |
139 | elif format == "tag" and line == "": |
|
137 | elif format == "tag" and line == "": | |
140 | # when looking for tags (subject: from: etc) they |
|
138 | # when looking for tags (subject: from: etc) they | |
141 | # end once you find a blank line in the source |
|
139 | # end once you find a blank line in the source | |
142 | format = "tagdone" |
|
140 | format = "tagdone" | |
143 | else: |
|
141 | else: | |
144 | message.append(line) |
|
142 | message.append(line) | |
145 | comments.append(line) |
|
143 | comments.append(line) | |
146 |
|
144 | |||
147 | eatdiff(message) |
|
145 | eatdiff(message) | |
148 | eatdiff(comments) |
|
146 | eatdiff(comments) | |
149 | eatempty(message) |
|
147 | eatempty(message) | |
150 | eatempty(comments) |
|
148 | eatempty(comments) | |
151 |
|
149 | |||
152 | # make sure message isn't empty |
|
150 | # make sure message isn't empty | |
153 | if format and format.startswith("tag") and subject: |
|
151 | if format and format.startswith("tag") and subject: | |
154 | message.insert(0, "") |
|
152 | message.insert(0, "") | |
155 | message.insert(0, subject) |
|
153 | message.insert(0, subject) | |
156 | return (message, comments, user, diffstart > 1) |
|
154 | return (message, comments, user, diffstart > 1) | |
157 |
|
155 | |||
158 | def mergeone(self, repo, mergeq, head, patch, rev, wlock): |
|
156 | def mergeone(self, repo, mergeq, head, patch, rev, wlock): | |
159 | # first try just applying the patch |
|
157 | # first try just applying the patch | |
160 |
(err, n) = self.apply(repo, [ patch ], update_status=False, |
|
158 | (err, n) = self.apply(repo, [ patch ], update_status=False, | |
161 | strict=True, merge=rev, wlock=wlock) |
|
159 | strict=True, merge=rev, wlock=wlock) | |
162 |
|
160 | |||
163 | if err == 0: |
|
161 | if err == 0: | |
164 | return (err, n) |
|
162 | return (err, n) | |
165 |
|
163 | |||
166 | if n is None: |
|
164 | if n is None: | |
167 | self.ui.warn("apply failed for patch %s\n" % patch) |
|
165 | self.ui.warn("apply failed for patch %s\n" % patch) | |
168 | sys.exit(1) |
|
166 | sys.exit(1) | |
169 |
|
167 | |||
170 | self.ui.warn("patch didn't work out, merging %s\n" % patch) |
|
168 | self.ui.warn("patch didn't work out, merging %s\n" % patch) | |
171 |
|
169 | |||
172 | # apply failed, strip away that rev and merge. |
|
170 | # apply failed, strip away that rev and merge. | |
173 | repo.update(head, allow=False, force=True, wlock=wlock) |
|
171 | repo.update(head, allow=False, force=True, wlock=wlock) | |
174 | self.strip(repo, n, update=False, backup='strip', wlock=wlock) |
|
172 | self.strip(repo, n, update=False, backup='strip', wlock=wlock) | |
175 |
|
173 | |||
176 | c = repo.changelog.read(rev) |
|
174 | c = repo.changelog.read(rev) | |
177 | ret = repo.update(rev, allow=True, wlock=wlock) |
|
175 | ret = repo.update(rev, allow=True, wlock=wlock) | |
178 | if ret: |
|
176 | if ret: | |
179 | self.ui.warn("update returned %d\n" % ret) |
|
177 | self.ui.warn("update returned %d\n" % ret) | |
180 | sys.exit(1) |
|
178 | sys.exit(1) | |
181 | n = repo.commit(None, c[4], c[1], force=1, wlock=wlock) |
|
179 | n = repo.commit(None, c[4], c[1], force=1, wlock=wlock) | |
182 | if n == None: |
|
180 | if n == None: | |
183 | self.ui.warn("repo commit failed\n") |
|
181 | self.ui.warn("repo commit failed\n") | |
184 | sys.exit(1) |
|
182 | sys.exit(1) | |
185 | try: |
|
183 | try: | |
186 | message, comments, user, patchfound = mergeq.readheaders(patch) |
|
184 | message, comments, user, patchfound = mergeq.readheaders(patch) | |
187 | except: |
|
185 | except: | |
188 | self.ui.warn("Unable to read %s\n" % patch) |
|
186 | self.ui.warn("Unable to read %s\n" % patch) | |
189 | sys.exit(1) |
|
187 | sys.exit(1) | |
190 |
|
188 | |||
191 | patchf = self.opener(os.path.join(self.path, patch), "w") |
|
189 | patchf = self.opener(os.path.join(self.path, patch), "w") | |
192 | if comments: |
|
190 | if comments: | |
193 | comments = "\n".join(comments) + '\n\n' |
|
191 | comments = "\n".join(comments) + '\n\n' | |
194 | patchf.write(comments) |
|
192 | patchf.write(comments) | |
195 | commands.dodiff(patchf, self.ui, repo, head, n) |
|
193 | commands.dodiff(patchf, self.ui, repo, head, n) | |
196 | patchf.close() |
|
194 | patchf.close() | |
197 | return (0, n) |
|
195 | return (0, n) | |
198 |
|
196 | |||
199 | def qparents(self, repo, rev=None): |
|
197 | def qparents(self, repo, rev=None): | |
200 | if rev is None: |
|
198 | if rev is None: | |
201 | (p1, p2) = repo.dirstate.parents() |
|
199 | (p1, p2) = repo.dirstate.parents() | |
202 | if p2 == revlog.nullid: |
|
200 | if p2 == revlog.nullid: | |
203 | return p1 |
|
201 | return p1 | |
204 | if len(self.applied) == 0: |
|
202 | if len(self.applied) == 0: | |
205 | return None |
|
203 | return None | |
206 | (top, patch) = self.applied[-1].split(':') |
|
204 | (top, patch) = self.applied[-1].split(':') | |
207 | top = revlog.bin(top) |
|
205 | top = revlog.bin(top) | |
208 | return top |
|
206 | return top | |
209 | pp = repo.changelog.parents(rev) |
|
207 | pp = repo.changelog.parents(rev) | |
210 | if pp[1] != revlog.nullid: |
|
208 | if pp[1] != revlog.nullid: | |
211 | arevs = [ x.split(':')[0] for x in self.applied ] |
|
209 | arevs = [ x.split(':')[0] for x in self.applied ] | |
212 | p0 = revlog.hex(pp[0]) |
|
210 | p0 = revlog.hex(pp[0]) | |
213 | p1 = revlog.hex(pp[1]) |
|
211 | p1 = revlog.hex(pp[1]) | |
214 | if p0 in arevs: |
|
212 | if p0 in arevs: | |
215 | return pp[0] |
|
213 | return pp[0] | |
216 | if p1 in arevs: |
|
214 | if p1 in arevs: | |
217 | return pp[1] |
|
215 | return pp[1] | |
218 | return None |
|
216 | return None | |
219 | return pp[0] |
|
217 | return pp[0] | |
220 |
|
218 | |||
221 | def mergepatch(self, repo, mergeq, series, wlock): |
|
219 | def mergepatch(self, repo, mergeq, series, wlock): | |
222 | if len(self.applied) == 0: |
|
220 | if len(self.applied) == 0: | |
223 | # each of the patches merged in will have two parents. This |
|
221 | # each of the patches merged in will have two parents. This | |
224 | # can confuse the qrefresh, qdiff, and strip code because it |
|
222 | # can confuse the qrefresh, qdiff, and strip code because it | |
225 | # needs to know which parent is actually in the patch queue. |
|
223 | # needs to know which parent is actually in the patch queue. | |
226 | # so, we insert a merge marker with only one parent. This way |
|
224 | # so, we insert a merge marker with only one parent. This way | |
227 | # the first patch in the queue is never a merge patch |
|
225 | # the first patch in the queue is never a merge patch | |
228 | # |
|
226 | # | |
229 | pname = ".hg.patches.merge.marker" |
|
227 | pname = ".hg.patches.merge.marker" | |
230 | n = repo.commit(None, '[mq]: merge marker', user=None, force=1, |
|
228 | n = repo.commit(None, '[mq]: merge marker', user=None, force=1, | |
231 | wlock=wlock) |
|
229 | wlock=wlock) | |
232 | self.applied.append(revlog.hex(n) + ":" + pname) |
|
230 | self.applied.append(revlog.hex(n) + ":" + pname) | |
233 | self.applied_dirty = 1 |
|
231 | self.applied_dirty = 1 | |
234 |
|
232 | |||
235 | head = self.qparents(repo) |
|
233 | head = self.qparents(repo) | |
236 |
|
234 | |||
237 | for patch in series: |
|
235 | for patch in series: | |
238 | patch = mergeq.lookup(patch) |
|
236 | patch = mergeq.lookup(patch) | |
239 | if not patch: |
|
237 | if not patch: | |
240 | self.ui.warn("patch %s does not exist\n" % patch) |
|
238 | self.ui.warn("patch %s does not exist\n" % patch) | |
241 | return (1, None) |
|
239 | return (1, None) | |
242 |
|
240 | |||
243 | info = mergeq.isapplied(patch) |
|
241 | info = mergeq.isapplied(patch) | |
244 | if not info: |
|
242 | if not info: | |
245 | self.ui.warn("patch %s is not applied\n" % patch) |
|
243 | self.ui.warn("patch %s is not applied\n" % patch) | |
246 | return (1, None) |
|
244 | return (1, None) | |
247 | rev = revlog.bin(info[1]) |
|
245 | rev = revlog.bin(info[1]) | |
248 | (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock) |
|
246 | (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock) | |
249 | if head: |
|
247 | if head: | |
250 | self.applied.append(revlog.hex(head) + ":" + patch) |
|
248 | self.applied.append(revlog.hex(head) + ":" + patch) | |
251 | self.applied_dirty = 1 |
|
249 | self.applied_dirty = 1 | |
252 | if err: |
|
250 | if err: | |
253 | return (err, head) |
|
251 | return (err, head) | |
254 | return (0, head) |
|
252 | return (0, head) | |
255 |
|
253 | |||
256 |
def apply(self, repo, series, list=False, update_status=True, |
|
254 | def apply(self, repo, series, list=False, update_status=True, | |
257 | strict=False, patchdir=None, merge=None, wlock=None): |
|
255 | strict=False, patchdir=None, merge=None, wlock=None): | |
258 | # TODO unify with commands.py |
|
256 | # TODO unify with commands.py | |
259 | if not patchdir: |
|
257 | if not patchdir: | |
260 | patchdir = self.path |
|
258 | patchdir = self.path | |
261 | pwd = os.getcwd() |
|
259 | pwd = os.getcwd() | |
262 | os.chdir(repo.root) |
|
260 | os.chdir(repo.root) | |
263 | err = 0 |
|
261 | err = 0 | |
264 | if not wlock: |
|
262 | if not wlock: | |
265 | wlock = repo.wlock() |
|
263 | wlock = repo.wlock() | |
266 | lock = repo.lock() |
|
264 | lock = repo.lock() | |
267 | tr = repo.transaction() |
|
265 | tr = repo.transaction() | |
268 | n = None |
|
266 | n = None | |
269 | for patch in series: |
|
267 | for patch in series: | |
270 | self.ui.warn("applying %s\n" % patch) |
|
268 | self.ui.warn("applying %s\n" % patch) | |
271 | pf = os.path.join(patchdir, patch) |
|
269 | pf = os.path.join(patchdir, patch) | |
272 |
|
270 | |||
273 | try: |
|
271 | try: | |
274 | message, comments, user, patchfound = self.readheaders(patch) |
|
272 | message, comments, user, patchfound = self.readheaders(patch) | |
275 | except: |
|
273 | except: | |
276 | self.ui.warn("Unable to read %s\n" % pf) |
|
274 | self.ui.warn("Unable to read %s\n" % pf) | |
277 | err = 1 |
|
275 | err = 1 | |
278 | break |
|
276 | break | |
279 |
|
277 | |||
280 | if not message: |
|
278 | if not message: | |
281 | message = "imported patch %s\n" % patch |
|
279 | message = "imported patch %s\n" % patch | |
282 | else: |
|
280 | else: | |
283 | if list: |
|
281 | if list: | |
284 | message.append("\nimported patch %s" % patch) |
|
282 | message.append("\nimported patch %s" % patch) | |
285 | message = '\n'.join(message) |
|
283 | message = '\n'.join(message) | |
286 |
|
284 | |||
287 | try: |
|
285 | try: | |
288 | f = os.popen("patch -p1 --no-backup-if-mismatch < '%s'" % |
|
286 | f = os.popen("patch -p1 --no-backup-if-mismatch < '%s'" % (pf)) | |
289 | (pf)) |
|
|||
290 | except: |
|
287 | except: | |
291 | self.ui.warn("patch failed, unable to continue (try -v)\n") |
|
288 | self.ui.warn("patch failed, unable to continue (try -v)\n") | |
292 | err = 1 |
|
289 | err = 1 | |
293 | break |
|
290 | break | |
294 | files = [] |
|
291 | files = [] | |
295 | fuzz = False |
|
292 | fuzz = False | |
296 | for l in f: |
|
293 | for l in f: | |
297 | l = l.rstrip('\r\n'); |
|
294 | l = l.rstrip('\r\n'); | |
298 | if self.ui.verbose: |
|
295 | if self.ui.verbose: | |
299 | self.ui.warn(l + "\n") |
|
296 | self.ui.warn(l + "\n") | |
300 | if l[:14] == 'patching file ': |
|
297 | if l[:14] == 'patching file ': | |
301 | pf = os.path.normpath(l[14:]) |
|
298 | pf = os.path.normpath(l[14:]) | |
302 |
# when patch finds a space in the file name, it puts |
|
299 | # when patch finds a space in the file name, it puts | |
303 | # single quotes around the filename. strip them off |
|
300 | # single quotes around the filename. strip them off | |
304 | if pf[0] == "'" and pf[-1] == "'": |
|
301 | if pf[0] == "'" and pf[-1] == "'": | |
305 | pf = pf[1:-1] |
|
302 | pf = pf[1:-1] | |
306 | if pf not in files: |
|
303 | if pf not in files: | |
307 | files.append(pf) |
|
304 | files.append(pf) | |
308 | printed_file = False |
|
305 | printed_file = False | |
309 | file_str = l |
|
306 | file_str = l | |
310 | elif l.find('with fuzz') >= 0: |
|
307 | elif l.find('with fuzz') >= 0: | |
311 | if not printed_file: |
|
308 | if not printed_file: | |
312 | self.ui.warn(file_str + '\n') |
|
309 | self.ui.warn(file_str + '\n') | |
313 | printed_file = True |
|
310 | printed_file = True | |
314 | self.ui.warn(l + '\n') |
|
311 | self.ui.warn(l + '\n') | |
315 | fuzz = True |
|
312 | fuzz = True | |
316 | elif l.find('saving rejects to file') >= 0: |
|
313 | elif l.find('saving rejects to file') >= 0: | |
317 | self.ui.warn(l + '\n') |
|
314 | self.ui.warn(l + '\n') | |
318 | elif l.find('FAILED') >= 0: |
|
315 | elif l.find('FAILED') >= 0: | |
319 | if not printed_file: |
|
316 | if not printed_file: | |
320 | self.ui.warn(file_str + '\n') |
|
317 | self.ui.warn(file_str + '\n') | |
321 | printed_file = True |
|
318 | printed_file = True | |
322 | self.ui.warn(l + '\n') |
|
319 | self.ui.warn(l + '\n') | |
323 | patcherr = f.close() |
|
320 | patcherr = f.close() | |
324 |
|
321 | |||
325 | if merge and len(files) > 0: |
|
322 | if merge and len(files) > 0: | |
326 |
|
|
323 | # Mark as merged and update dirstate parent info | |
327 |
|
|
324 | repo.dirstate.update(repo.dirstate.filterfiles(files), 'm') | |
328 |
|
|
325 | p1, p2 = repo.dirstate.parents() | |
329 |
|
|
326 | repo.dirstate.setparents(p1, merge) | |
330 | if len(files) > 0: |
|
327 | if len(files) > 0: | |
331 |
commands.addremove_lock(self.ui, repo, files, |
|
328 | commands.addremove_lock(self.ui, repo, files, | |
332 | opts={}, wlock=wlock) |
|
329 | opts={}, wlock=wlock) | |
333 | n = repo.commit(files, message, user, force=1, lock=lock, |
|
330 | n = repo.commit(files, message, user, force=1, lock=lock, | |
334 | wlock=wlock) |
|
331 | wlock=wlock) | |
335 |
|
332 | |||
336 | if n == None: |
|
333 | if n == None: | |
337 | self.ui.warn("repo commit failed\n") |
|
334 | self.ui.warn("repo commit failed\n") | |
338 | sys.exit(1) |
|
335 | sys.exit(1) | |
339 |
|
336 | |||
340 | if update_status: |
|
337 | if update_status: | |
341 | self.applied.append(revlog.hex(n) + ":" + patch) |
|
338 | self.applied.append(revlog.hex(n) + ":" + patch) | |
342 |
|
339 | |||
343 | if patcherr: |
|
340 | if patcherr: | |
344 | if not patchfound: |
|
341 | if not patchfound: | |
345 | self.ui.warn("patch %s is empty\n" % patch) |
|
342 | self.ui.warn("patch %s is empty\n" % patch) | |
346 | err = 0 |
|
343 | err = 0 | |
347 | else: |
|
344 | else: | |
348 | self.ui.warn("patch failed, rejects left in working dir\n") |
|
345 | self.ui.warn("patch failed, rejects left in working dir\n") | |
349 | err = 1 |
|
346 | err = 1 | |
350 | break |
|
347 | break | |
351 |
|
348 | |||
352 | if fuzz and strict: |
|
349 | if fuzz and strict: | |
353 | self.ui.warn("fuzz found when applying patch, stopping\n") |
|
350 | self.ui.warn("fuzz found when applying patch, stopping\n") | |
354 | err = 1 |
|
351 | err = 1 | |
355 | break |
|
352 | break | |
356 | tr.close() |
|
353 | tr.close() | |
357 | os.chdir(pwd) |
|
354 | os.chdir(pwd) | |
358 | return (err, n) |
|
355 | return (err, n) | |
359 |
|
356 | |||
360 | def delete(self, repo, patch): |
|
357 | def delete(self, repo, patch): | |
361 | patch = self.lookup(patch) |
|
358 | patch = self.lookup(patch) | |
362 | info = self.isapplied(patch) |
|
359 | info = self.isapplied(patch) | |
363 | if info: |
|
360 | if info: | |
364 | self.ui.warn("cannot delete applied patch %s\n" % patch) |
|
361 | self.ui.warn("cannot delete applied patch %s\n" % patch) | |
365 | sys.exit(1) |
|
362 | sys.exit(1) | |
366 | if patch not in self.series: |
|
363 | if patch not in self.series: | |
367 | self.ui.warn("patch %s not in series file\n" % patch) |
|
364 | self.ui.warn("patch %s not in series file\n" % patch) | |
368 | sys.exit(1) |
|
365 | sys.exit(1) | |
369 | i = self.find_series(patch) |
|
366 | i = self.find_series(patch) | |
370 | del self.full_series[i] |
|
367 | del self.full_series[i] | |
371 | self.read_series(self.full_series) |
|
368 | self.read_series(self.full_series) | |
372 | self.series_dirty = 1 |
|
369 | self.series_dirty = 1 | |
373 |
|
370 | |||
374 | def check_toppatch(self, repo): |
|
371 | def check_toppatch(self, repo): | |
375 | if len(self.applied) > 0: |
|
372 | if len(self.applied) > 0: | |
376 | (top, patch) = self.applied[-1].split(':') |
|
373 | (top, patch) = self.applied[-1].split(':') | |
377 | top = revlog.bin(top) |
|
374 | top = revlog.bin(top) | |
378 | pp = repo.dirstate.parents() |
|
375 | pp = repo.dirstate.parents() | |
379 | if top not in pp: |
|
376 | if top not in pp: | |
380 | self.ui.warn("queue top not at dirstate parents. top %s dirstate %s %s\n" %( revlog.short(top), revlog.short(pp[0]), revlog.short(pp[1]))) |
|
377 | self.ui.warn("queue top not at dirstate parents. top %s dirstate %s %s\n" %( revlog.short(top), revlog.short(pp[0]), revlog.short(pp[1]))) | |
381 | sys.exit(1) |
|
378 | sys.exit(1) | |
382 | return top |
|
379 | return top | |
383 | return None |
|
380 | return None | |
384 | def check_localchanges(self, repo): |
|
381 | def check_localchanges(self, repo): | |
385 | (c, a, r, d, u) = repo.changes(None, None) |
|
382 | (c, a, r, d, u) = repo.changes(None, None) | |
386 | if c or a or d or r: |
|
383 | if c or a or d or r: | |
387 | self.ui.write("Local changes found, refresh first\n") |
|
384 | self.ui.write("Local changes found, refresh first\n") | |
388 | sys.exit(1) |
|
385 | sys.exit(1) | |
389 | def new(self, repo, patch, msg=None, force=None): |
|
386 | def new(self, repo, patch, msg=None, force=None): | |
390 | if not force: |
|
387 | if not force: | |
391 | self.check_localchanges(repo) |
|
388 | self.check_localchanges(repo) | |
392 | self.check_toppatch(repo) |
|
389 | self.check_toppatch(repo) | |
393 | wlock = repo.wlock() |
|
390 | wlock = repo.wlock() | |
394 | insert = self.series_end() |
|
391 | insert = self.series_end() | |
395 | if msg: |
|
392 | if msg: | |
396 | n = repo.commit([], "[mq]: %s" % msg, force=True, wlock=wlock) |
|
393 | n = repo.commit([], "[mq]: %s" % msg, force=True, wlock=wlock) | |
397 | else: |
|
394 | else: | |
398 | n = repo.commit([], "New patch: %s" % patch, force=True, wlock=wlock) |
|
395 | n = repo.commit([], | |
|
396 | "New patch: %s" % patch, force=True, wlock=wlock) | |||
399 | if n == None: |
|
397 | if n == None: | |
400 | self.ui.warn("repo commit failed\n") |
|
398 | self.ui.warn("repo commit failed\n") | |
401 | sys.exit(1) |
|
399 | sys.exit(1) | |
402 | self.full_series[insert:insert] = [patch] |
|
400 | self.full_series[insert:insert] = [patch] | |
403 | self.applied.append(revlog.hex(n) + ":" + patch) |
|
401 | self.applied.append(revlog.hex(n) + ":" + patch) | |
404 | self.read_series(self.full_series) |
|
402 | self.read_series(self.full_series) | |
405 | self.series_dirty = 1 |
|
403 | self.series_dirty = 1 | |
406 | self.applied_dirty = 1 |
|
404 | self.applied_dirty = 1 | |
407 | p = self.opener(os.path.join(self.path, patch), "w") |
|
405 | p = self.opener(os.path.join(self.path, patch), "w") | |
408 | if msg: |
|
406 | if msg: | |
409 | msg = msg + "\n" |
|
407 | msg = msg + "\n" | |
410 | p.write(msg) |
|
408 | p.write(msg) | |
411 | p.close() |
|
409 | p.close() | |
412 | wlock = None |
|
410 | wlock = None | |
413 | r = self.qrepo() |
|
411 | r = self.qrepo() | |
414 | if r: r.add([patch]) |
|
412 | if r: r.add([patch]) | |
415 |
|
413 | |||
416 | def strip(self, repo, rev, update=True, backup="all", wlock=None): |
|
414 | def strip(self, repo, rev, update=True, backup="all", wlock=None): | |
417 | def limitheads(chlog, stop): |
|
415 | def limitheads(chlog, stop): | |
418 | """return the list of all nodes that have no children""" |
|
416 | """return the list of all nodes that have no children""" | |
419 | p = {} |
|
417 | p = {} | |
420 | h = [] |
|
418 | h = [] | |
421 | stoprev = 0 |
|
419 | stoprev = 0 | |
422 | if stop in chlog.nodemap: |
|
420 | if stop in chlog.nodemap: | |
423 | stoprev = chlog.rev(stop) |
|
421 | stoprev = chlog.rev(stop) | |
424 |
|
422 | |||
425 | for r in range(chlog.count() - 1, -1, -1): |
|
423 | for r in range(chlog.count() - 1, -1, -1): | |
426 | n = chlog.node(r) |
|
424 | n = chlog.node(r) | |
427 | if n not in p: |
|
425 | if n not in p: | |
428 | h.append(n) |
|
426 | h.append(n) | |
429 | if n == stop: |
|
427 | if n == stop: | |
430 | break |
|
428 | break | |
431 | if r < stoprev: |
|
429 | if r < stoprev: | |
432 | break |
|
430 | break | |
433 | for pn in chlog.parents(n): |
|
431 | for pn in chlog.parents(n): | |
434 | p[pn] = 1 |
|
432 | p[pn] = 1 | |
435 | return h |
|
433 | return h | |
436 |
|
434 | |||
437 | def bundle(cg): |
|
435 | def bundle(cg): | |
438 | backupdir = repo.join("strip-backup") |
|
436 | backupdir = repo.join("strip-backup") | |
439 | if not os.path.isdir(backupdir): |
|
437 | if not os.path.isdir(backupdir): | |
440 | os.mkdir(backupdir) |
|
438 | os.mkdir(backupdir) | |
441 | name = os.path.join(backupdir, "%s" % revlog.short(rev)) |
|
439 | name = os.path.join(backupdir, "%s" % revlog.short(rev)) | |
442 | name = savename(name) |
|
440 | name = savename(name) | |
443 | self.ui.warn("saving bundle to %s\n" % name) |
|
441 | self.ui.warn("saving bundle to %s\n" % name) | |
444 | # TODO, exclusive open |
|
442 | # TODO, exclusive open | |
445 | f = open(name, "wb") |
|
443 | f = open(name, "wb") | |
446 | try: |
|
444 | try: | |
447 | f.write("HG10") |
|
445 | f.write("HG10") | |
448 | z = bz2.BZ2Compressor(9) |
|
446 | z = bz2.BZ2Compressor(9) | |
449 | while 1: |
|
447 | while 1: | |
450 | chunk = cg.read(4096) |
|
448 | chunk = cg.read(4096) | |
451 | if not chunk: |
|
449 | if not chunk: | |
452 | break |
|
450 | break | |
453 | f.write(z.compress(chunk)) |
|
451 | f.write(z.compress(chunk)) | |
454 | f.write(z.flush()) |
|
452 | f.write(z.flush()) | |
455 | except: |
|
453 | except: | |
456 | os.unlink(name) |
|
454 | os.unlink(name) | |
457 | raise |
|
455 | raise | |
458 | f.close() |
|
456 | f.close() | |
459 | return name |
|
457 | return name | |
460 |
|
458 | |||
461 | def stripall(rev, revnum): |
|
459 | def stripall(rev, revnum): | |
462 | cl = repo.changelog |
|
460 | cl = repo.changelog | |
463 | c = cl.read(rev) |
|
461 | c = cl.read(rev) | |
464 | mm = repo.manifest.read(c[0]) |
|
462 | mm = repo.manifest.read(c[0]) | |
465 | seen = {} |
|
463 | seen = {} | |
466 |
|
464 | |||
467 | for x in xrange(revnum, cl.count()): |
|
465 | for x in xrange(revnum, cl.count()): | |
468 | c = cl.read(cl.node(x)) |
|
466 | c = cl.read(cl.node(x)) | |
469 | for f in c[3]: |
|
467 | for f in c[3]: | |
470 | if f in seen: |
|
468 | if f in seen: | |
471 | continue |
|
469 | continue | |
472 | seen[f] = 1 |
|
470 | seen[f] = 1 | |
473 | if f in mm: |
|
471 | if f in mm: | |
474 | filerev = mm[f] |
|
472 | filerev = mm[f] | |
475 | else: |
|
473 | else: | |
476 | filerev = 0 |
|
474 | filerev = 0 | |
477 | seen[f] = filerev |
|
475 | seen[f] = filerev | |
478 | # we go in two steps here so the strip loop happens in a |
|
476 | # we go in two steps here so the strip loop happens in a | |
479 | # sensible order. When stripping many files, this helps keep |
|
477 | # sensible order. When stripping many files, this helps keep | |
480 | # our disk access patterns under control. |
|
478 | # our disk access patterns under control. | |
481 | list = seen.keys() |
|
479 | list = seen.keys() | |
482 | list.sort() |
|
480 | list.sort() | |
483 | for f in list: |
|
481 | for f in list: | |
484 | ff = repo.file(f) |
|
482 | ff = repo.file(f) | |
485 | filerev = seen[f] |
|
483 | filerev = seen[f] | |
486 | if filerev != 0: |
|
484 | if filerev != 0: | |
487 | if filerev in ff.nodemap: |
|
485 | if filerev in ff.nodemap: | |
488 | filerev = ff.rev(filerev) |
|
486 | filerev = ff.rev(filerev) | |
489 | else: |
|
487 | else: | |
490 | filerev = 0 |
|
488 | filerev = 0 | |
491 | ff.strip(filerev, revnum) |
|
489 | ff.strip(filerev, revnum) | |
492 |
|
490 | |||
493 | if not wlock: |
|
491 | if not wlock: | |
494 | wlock = repo.wlock() |
|
492 | wlock = repo.wlock() | |
495 | lock = repo.lock() |
|
493 | lock = repo.lock() | |
496 | chlog = repo.changelog |
|
494 | chlog = repo.changelog | |
497 | # TODO delete the undo files, and handle undo of merge sets |
|
495 | # TODO delete the undo files, and handle undo of merge sets | |
498 | pp = chlog.parents(rev) |
|
496 | pp = chlog.parents(rev) | |
499 | revnum = chlog.rev(rev) |
|
497 | revnum = chlog.rev(rev) | |
500 |
|
498 | |||
501 | if update: |
|
499 | if update: | |
502 | urev = self.qparents(repo, rev) |
|
500 | urev = self.qparents(repo, rev) | |
503 | repo.update(urev, allow=False, force=True, wlock=wlock) |
|
501 | repo.update(urev, allow=False, force=True, wlock=wlock) | |
504 | repo.dirstate.write() |
|
502 | repo.dirstate.write() | |
505 |
|
503 | |||
506 | # save is a list of all the branches we are truncating away |
|
504 | # save is a list of all the branches we are truncating away | |
507 | # that we actually want to keep. changegroup will be used |
|
505 | # that we actually want to keep. changegroup will be used | |
508 | # to preserve them and add them back after the truncate |
|
506 | # to preserve them and add them back after the truncate | |
509 | saveheads = [] |
|
507 | saveheads = [] | |
510 | savebases = {} |
|
508 | savebases = {} | |
511 |
|
509 | |||
512 | tip = chlog.tip() |
|
510 | tip = chlog.tip() | |
513 | heads = limitheads(chlog, rev) |
|
511 | heads = limitheads(chlog, rev) | |
514 | seen = {} |
|
512 | seen = {} | |
515 |
|
513 | |||
516 | # search through all the heads, finding those where the revision |
|
514 | # search through all the heads, finding those where the revision | |
517 | # we want to strip away is an ancestor. Also look for merges |
|
515 | # we want to strip away is an ancestor. Also look for merges | |
518 | # that might be turned into new heads by the strip. |
|
516 | # that might be turned into new heads by the strip. | |
519 | while heads: |
|
517 | while heads: | |
520 | h = heads.pop() |
|
518 | h = heads.pop() | |
521 | n = h |
|
519 | n = h | |
522 | while True: |
|
520 | while True: | |
523 | seen[n] = 1 |
|
521 | seen[n] = 1 | |
524 | pp = chlog.parents(n) |
|
522 | pp = chlog.parents(n) | |
525 | if pp[1] != revlog.nullid and chlog.rev(pp[1]) > revnum: |
|
523 | if pp[1] != revlog.nullid and chlog.rev(pp[1]) > revnum: | |
526 | if pp[1] not in seen: |
|
524 | if pp[1] not in seen: | |
527 | heads.append(pp[1]) |
|
525 | heads.append(pp[1]) | |
528 | if pp[0] == revlog.nullid: |
|
526 | if pp[0] == revlog.nullid: | |
529 | break |
|
527 | break | |
530 | if chlog.rev(pp[0]) < revnum: |
|
528 | if chlog.rev(pp[0]) < revnum: | |
531 | break |
|
529 | break | |
532 | n = pp[0] |
|
530 | n = pp[0] | |
533 | if n == rev: |
|
531 | if n == rev: | |
534 | break |
|
532 | break | |
535 | r = chlog.reachable(h, rev) |
|
533 | r = chlog.reachable(h, rev) | |
536 | if rev not in r: |
|
534 | if rev not in r: | |
537 | saveheads.append(h) |
|
535 | saveheads.append(h) | |
538 | for x in r: |
|
536 | for x in r: | |
539 | if chlog.rev(x) > revnum: |
|
537 | if chlog.rev(x) > revnum: | |
540 | savebases[x] = 1 |
|
538 | savebases[x] = 1 | |
541 |
|
539 | |||
542 | # create a changegroup for all the branches we need to keep |
|
540 | # create a changegroup for all the branches we need to keep | |
543 | if backup is "all": |
|
541 | if backup is "all": | |
544 | backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip') |
|
542 | backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip') | |
545 | bundle(backupch) |
|
543 | bundle(backupch) | |
546 | if saveheads: |
|
544 | if saveheads: | |
547 | backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip') |
|
545 | backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip') | |
548 | chgrpfile = bundle(backupch) |
|
546 | chgrpfile = bundle(backupch) | |
549 |
|
547 | |||
550 | stripall(rev, revnum) |
|
548 | stripall(rev, revnum) | |
551 |
|
549 | |||
552 | change = chlog.read(rev) |
|
550 | change = chlog.read(rev) | |
553 | repo.manifest.strip(repo.manifest.rev(change[0]), revnum) |
|
551 | repo.manifest.strip(repo.manifest.rev(change[0]), revnum) | |
554 | chlog.strip(revnum, revnum) |
|
552 | chlog.strip(revnum, revnum) | |
555 | if saveheads: |
|
553 | if saveheads: | |
556 | self.ui.status("adding branch\n") |
|
554 | self.ui.status("adding branch\n") | |
557 | commands.unbundle(self.ui, repo, chgrpfile, update=False) |
|
555 | commands.unbundle(self.ui, repo, chgrpfile, update=False) | |
558 | if backup is not "strip": |
|
556 | if backup is not "strip": | |
559 | os.unlink(chgrpfile) |
|
557 | os.unlink(chgrpfile) | |
560 |
|
558 | |||
561 | def isapplied(self, patch): |
|
559 | def isapplied(self, patch): | |
562 | """returns (index, rev, patch)""" |
|
560 | """returns (index, rev, patch)""" | |
563 | for i in xrange(len(self.applied)): |
|
561 | for i in xrange(len(self.applied)): | |
564 | p = self.applied[i] |
|
562 | p = self.applied[i] | |
565 | a = p.split(':') |
|
563 | a = p.split(':') | |
566 | if a[1] == patch: |
|
564 | if a[1] == patch: | |
567 | return (i, a[0], a[1]) |
|
565 | return (i, a[0], a[1]) | |
568 | return None |
|
566 | return None | |
569 |
|
567 | |||
570 | def lookup(self, patch): |
|
568 | def lookup(self, patch): | |
571 | if patch == None: |
|
569 | if patch == None: | |
572 | return None |
|
570 | return None | |
573 | if patch in self.series: |
|
571 | if patch in self.series: | |
574 | return patch |
|
572 | return patch | |
575 | if not os.path.isfile(os.path.join(self.path, patch)): |
|
573 | if not os.path.isfile(os.path.join(self.path, patch)): | |
576 | try: |
|
574 | try: | |
577 | sno = int(patch) |
|
575 | sno = int(patch) | |
578 | except(ValueError, OverflowError): |
|
576 | except(ValueError, OverflowError): | |
579 | self.ui.warn("patch %s not in series\n" % patch) |
|
577 | self.ui.warn("patch %s not in series\n" % patch) | |
580 | sys.exit(1) |
|
578 | sys.exit(1) | |
581 | if sno >= len(self.series): |
|
579 | if sno >= len(self.series): | |
582 | self.ui.warn("patch number %d is out of range\n" % sno) |
|
580 | self.ui.warn("patch number %d is out of range\n" % sno) | |
583 | sys.exit(1) |
|
581 | sys.exit(1) | |
584 | patch = self.series[sno] |
|
582 | patch = self.series[sno] | |
585 | else: |
|
583 | else: | |
586 | self.ui.warn("patch %s not in series\n" % patch) |
|
584 | self.ui.warn("patch %s not in series\n" % patch) | |
587 | sys.exit(1) |
|
585 | sys.exit(1) | |
588 | return patch |
|
586 | return patch | |
589 |
|
587 | |||
590 |
def push(self, repo, patch=None, force=False, list=False, |
|
588 | def push(self, repo, patch=None, force=False, list=False, | |
591 | mergeq=None, wlock=None): |
|
589 | mergeq=None, wlock=None): | |
592 | if not wlock: |
|
590 | if not wlock: | |
593 | wlock = repo.wlock() |
|
591 | wlock = repo.wlock() | |
594 |
patch = self.lookup(patch) |
|
592 | patch = self.lookup(patch) | |
595 | if patch and self.isapplied(patch): |
|
593 | if patch and self.isapplied(patch): | |
596 | self.ui.warn("patch %s is already applied\n" % patch) |
|
594 | self.ui.warn("patch %s is already applied\n" % patch) | |
597 | sys.exit(1) |
|
595 | sys.exit(1) | |
598 | if self.series_end() == len(self.series): |
|
596 | if self.series_end() == len(self.series): | |
599 | self.ui.warn("File series fully applied\n") |
|
597 | self.ui.warn("File series fully applied\n") | |
600 | sys.exit(1) |
|
598 | sys.exit(1) | |
601 | if not force: |
|
599 | if not force: | |
602 | self.check_localchanges(repo) |
|
600 | self.check_localchanges(repo) | |
603 |
|
601 | |||
604 | self.applied_dirty = 1; |
|
602 | self.applied_dirty = 1; | |
605 | start = self.series_end() |
|
603 | start = self.series_end() | |
606 | if start > 0: |
|
604 | if start > 0: | |
607 | self.check_toppatch(repo) |
|
605 | self.check_toppatch(repo) | |
608 | if not patch: |
|
606 | if not patch: | |
609 | patch = self.series[start] |
|
607 | patch = self.series[start] | |
610 | end = start + 1 |
|
608 | end = start + 1 | |
611 | else: |
|
609 | else: | |
612 | end = self.series.index(patch, start) + 1 |
|
610 | end = self.series.index(patch, start) + 1 | |
613 | s = self.series[start:end] |
|
611 | s = self.series[start:end] | |
614 | if mergeq: |
|
612 | if mergeq: | |
615 | ret = self.mergepatch(repo, mergeq, s, wlock) |
|
613 | ret = self.mergepatch(repo, mergeq, s, wlock) | |
616 | else: |
|
614 | else: | |
617 | ret = self.apply(repo, s, list, wlock=wlock) |
|
615 | ret = self.apply(repo, s, list, wlock=wlock) | |
618 | top = self.applied[-1].split(':')[1] |
|
616 | top = self.applied[-1].split(':')[1] | |
619 | if ret[0]: |
|
617 | if ret[0]: | |
620 |
self.ui.write("Errors during apply, please fix and refresh %s\n" % |
|
618 | self.ui.write("Errors during apply, please fix and refresh %s\n" % | |
621 | top) |
|
619 | top) | |
622 | else: |
|
620 | else: | |
623 | self.ui.write("Now at: %s\n" % top) |
|
621 | self.ui.write("Now at: %s\n" % top) | |
624 | return ret[0] |
|
622 | return ret[0] | |
625 |
|
623 | |||
626 | def pop(self, repo, patch=None, force=False, update=True, wlock=None): |
|
624 | def pop(self, repo, patch=None, force=False, update=True, wlock=None): | |
627 | def getfile(f, rev): |
|
625 | def getfile(f, rev): | |
628 | t = repo.file(f).read(rev) |
|
626 | t = repo.file(f).read(rev) | |
629 | try: |
|
627 | try: | |
630 | repo.wfile(f, "w").write(t) |
|
628 | repo.wfile(f, "w").write(t) | |
631 | except IOError: |
|
629 | except IOError: | |
632 | os.makedirs(os.path.dirname(repo.wjoin(f))) |
|
630 | os.makedirs(os.path.dirname(repo.wjoin(f))) | |
633 | repo.wfile(f, "w").write(t) |
|
631 | repo.wfile(f, "w").write(t) | |
634 |
|
632 | |||
635 | if not wlock: |
|
633 | if not wlock: | |
636 | wlock = repo.wlock() |
|
634 | wlock = repo.wlock() | |
637 | if patch: |
|
635 | if patch: | |
638 | # index, rev, patch |
|
636 | # index, rev, patch | |
639 | info = self.isapplied(patch) |
|
637 | info = self.isapplied(patch) | |
640 | if not info: |
|
638 | if not info: | |
641 | patch = self.lookup(patch) |
|
639 | patch = self.lookup(patch) | |
642 | info = self.isapplied(patch) |
|
640 | info = self.isapplied(patch) | |
643 | if not info: |
|
641 | if not info: | |
644 | self.ui.warn("patch %s is not applied\n" % patch) |
|
642 | self.ui.warn("patch %s is not applied\n" % patch) | |
645 | sys.exit(1) |
|
643 | sys.exit(1) | |
646 | if len(self.applied) == 0: |
|
644 | if len(self.applied) == 0: | |
647 | self.ui.warn("No patches applied\n") |
|
645 | self.ui.warn("No patches applied\n") | |
648 | sys.exit(1) |
|
646 | sys.exit(1) | |
649 |
|
647 | |||
650 | if not update: |
|
648 | if not update: | |
651 | parents = repo.dirstate.parents() |
|
649 | parents = repo.dirstate.parents() | |
652 | rr = [ revlog.bin(x.split(':')[0]) for x in self.applied ] |
|
650 | rr = [ revlog.bin(x.split(':')[0]) for x in self.applied ] | |
653 | for p in parents: |
|
651 | for p in parents: | |
654 | if p in rr: |
|
652 | if p in rr: | |
655 | self.ui.warn("qpop: forcing dirstate update\n") |
|
653 | self.ui.warn("qpop: forcing dirstate update\n") | |
656 | update = True |
|
654 | update = True | |
657 |
|
655 | |||
658 | if not force and update: |
|
656 | if not force and update: | |
659 | self.check_localchanges(repo) |
|
657 | self.check_localchanges(repo) | |
660 |
|
658 | |||
661 | self.applied_dirty = 1; |
|
659 | self.applied_dirty = 1; | |
662 | end = len(self.applied) |
|
660 | end = len(self.applied) | |
663 | if not patch: |
|
661 | if not patch: | |
664 | info = [len(self.applied) - 1] + self.applied[-1].split(':') |
|
662 | info = [len(self.applied) - 1] + self.applied[-1].split(':') | |
665 | start = info[0] |
|
663 | start = info[0] | |
666 | rev = revlog.bin(info[1]) |
|
664 | rev = revlog.bin(info[1]) | |
667 |
|
665 | |||
668 | # we know there are no local changes, so we can make a simplified |
|
666 | # we know there are no local changes, so we can make a simplified | |
669 | # form of hg.update. |
|
667 | # form of hg.update. | |
670 | if update: |
|
668 | if update: | |
671 | top = self.check_toppatch(repo) |
|
669 | top = self.check_toppatch(repo) | |
672 | qp = self.qparents(repo, rev) |
|
670 | qp = self.qparents(repo, rev) | |
673 | changes = repo.changelog.read(qp) |
|
671 | changes = repo.changelog.read(qp) | |
674 | mf1 = repo.manifest.readflags(changes[0]) |
|
672 | mf1 = repo.manifest.readflags(changes[0]) | |
675 | mmap = repo.manifest.read(changes[0]) |
|
673 | mmap = repo.manifest.read(changes[0]) | |
676 | (c, a, r, d, u) = repo.changes(qp, top) |
|
674 | (c, a, r, d, u) = repo.changes(qp, top) | |
677 | if d: |
|
675 | if d: | |
678 | raise util.Abort("deletions found between repo revs") |
|
676 | raise util.Abort("deletions found between repo revs") | |
679 | for f in c: |
|
677 | for f in c: | |
680 | getfile(f, mmap[f]) |
|
678 | getfile(f, mmap[f]) | |
681 | for f in r: |
|
679 | for f in r: | |
682 | getfile(f, mmap[f]) |
|
680 | getfile(f, mmap[f]) | |
683 | util.set_exec(repo.wjoin(f), mf1[f]) |
|
681 | util.set_exec(repo.wjoin(f), mf1[f]) | |
684 | repo.dirstate.update(c + r, 'n') |
|
682 | repo.dirstate.update(c + r, 'n') | |
685 | for f in a: |
|
683 | for f in a: | |
686 | try: os.unlink(repo.wjoin(f)) |
|
684 | try: os.unlink(repo.wjoin(f)) | |
687 | except: raise |
|
685 | except: raise | |
688 | try: os.removedirs(os.path.dirname(repo.wjoin(f))) |
|
686 | try: os.removedirs(os.path.dirname(repo.wjoin(f))) | |
689 | except: pass |
|
687 | except: pass | |
690 | if a: |
|
688 | if a: | |
691 | repo.dirstate.forget(a) |
|
689 | repo.dirstate.forget(a) | |
692 | repo.dirstate.setparents(qp, revlog.nullid) |
|
690 | repo.dirstate.setparents(qp, revlog.nullid) | |
693 | self.strip(repo, rev, update=False, backup='strip', wlock=wlock) |
|
691 | self.strip(repo, rev, update=False, backup='strip', wlock=wlock) | |
694 | del self.applied[start:end] |
|
692 | del self.applied[start:end] | |
695 | if len(self.applied): |
|
693 | if len(self.applied): | |
696 | self.ui.write("Now at: %s\n" % self.applied[-1].split(':')[1]) |
|
694 | self.ui.write("Now at: %s\n" % self.applied[-1].split(':')[1]) | |
697 | else: |
|
695 | else: | |
698 | self.ui.write("Patch queue now empty\n") |
|
696 | self.ui.write("Patch queue now empty\n") | |
699 |
|
697 | |||
700 | def diff(self, repo, files): |
|
698 | def diff(self, repo, files): | |
701 | top = self.check_toppatch(repo) |
|
699 | top = self.check_toppatch(repo) | |
702 | if not top: |
|
700 | if not top: | |
703 | self.ui.write("No patches applied\n") |
|
701 | self.ui.write("No patches applied\n") | |
704 | return |
|
702 | return | |
705 | qp = self.qparents(repo, top) |
|
703 | qp = self.qparents(repo, top) | |
706 | commands.dodiff(sys.stdout, self.ui, repo, qp, None, files) |
|
704 | commands.dodiff(sys.stdout, self.ui, repo, qp, None, files) | |
707 |
|
705 | |||
708 | def refresh(self, repo, short=False): |
|
706 | def refresh(self, repo, short=False): | |
709 | if len(self.applied) == 0: |
|
707 | if len(self.applied) == 0: | |
710 | self.ui.write("No patches applied\n") |
|
708 | self.ui.write("No patches applied\n") | |
711 | return |
|
709 | return | |
712 | wlock = repo.wlock() |
|
710 | wlock = repo.wlock() | |
713 | self.check_toppatch(repo) |
|
711 | self.check_toppatch(repo) | |
714 | qp = self.qparents(repo) |
|
712 | qp = self.qparents(repo) | |
715 | (top, patch) = self.applied[-1].split(':') |
|
713 | (top, patch) = self.applied[-1].split(':') | |
716 | top = revlog.bin(top) |
|
714 | top = revlog.bin(top) | |
717 | cparents = repo.changelog.parents(top) |
|
715 | cparents = repo.changelog.parents(top) | |
718 | patchparent = self.qparents(repo, top) |
|
716 | patchparent = self.qparents(repo, top) | |
719 | message, comments, user, patchfound = self.readheaders(patch) |
|
717 | message, comments, user, patchfound = self.readheaders(patch) | |
720 |
|
718 | |||
721 | patchf = self.opener(os.path.join(self.path, patch), "w") |
|
719 | patchf = self.opener(os.path.join(self.path, patch), "w") | |
722 | if comments: |
|
720 | if comments: | |
723 | comments = "\n".join(comments) + '\n\n' |
|
721 | comments = "\n".join(comments) + '\n\n' | |
724 | patchf.write(comments) |
|
722 | patchf.write(comments) | |
725 |
|
723 | |||
726 | tip = repo.changelog.tip() |
|
724 | tip = repo.changelog.tip() | |
727 | if top == tip: |
|
725 | if top == tip: | |
728 |
# if the top of our patch queue is also the tip, there is an |
|
726 | # if the top of our patch queue is also the tip, there is an | |
729 | # optimization here. We update the dirstate in place and strip |
|
727 | # optimization here. We update the dirstate in place and strip | |
730 | # off the tip commit. Then just commit the current directory |
|
728 | # off the tip commit. Then just commit the current directory | |
731 | # tree. We can also send repo.commit the list of files |
|
729 | # tree. We can also send repo.commit the list of files | |
732 | # changed to speed up the diff |
|
730 | # changed to speed up the diff | |
733 | # |
|
731 | # | |
734 |
# in short mode, we only diff the files included in the |
|
732 | # in short mode, we only diff the files included in the | |
735 | # patch already |
|
733 | # patch already | |
736 | # |
|
734 | # | |
737 | # this should really read: |
|
735 | # this should really read: | |
738 | #(cc, dd, aa, aa2, uu) = repo.changes(tip, patchparent) |
|
736 | #(cc, dd, aa, aa2, uu) = repo.changes(tip, patchparent) | |
739 | # but we do it backwards to take advantage of manifest/chlog |
|
737 | # but we do it backwards to take advantage of manifest/chlog | |
740 | # caching against the next repo.changes call |
|
738 | # caching against the next repo.changes call | |
741 |
# |
|
739 | # | |
742 | (cc, aa, dd, aa2, uu) = repo.changes(patchparent, tip) |
|
740 | (cc, aa, dd, aa2, uu) = repo.changes(patchparent, tip) | |
743 | if short: |
|
741 | if short: | |
744 | filelist = cc + aa + dd |
|
742 | filelist = cc + aa + dd | |
745 | else: |
|
743 | else: | |
746 | filelist = None |
|
744 | filelist = None | |
747 | (c, a, r, d, u) = repo.changes(None, None, filelist) |
|
745 | (c, a, r, d, u) = repo.changes(None, None, filelist) | |
748 |
|
746 | |||
749 | # we might end up with files that were added between tip and |
|
747 | # we might end up with files that were added between tip and | |
750 | # the dirstate parent, but then changed in the local dirstate. |
|
748 | # the dirstate parent, but then changed in the local dirstate. | |
751 | # in this case, we want them to only show up in the added section |
|
749 | # in this case, we want them to only show up in the added section | |
752 | for x in c: |
|
750 | for x in c: | |
753 | if x not in aa: |
|
751 | if x not in aa: | |
754 | cc.append(x) |
|
752 | cc.append(x) | |
755 | # we might end up with files added by the local dirstate that |
|
753 | # we might end up with files added by the local dirstate that | |
756 | # were deleted by the patch. In this case, they should only |
|
754 | # were deleted by the patch. In this case, they should only | |
757 | # show up in the changed section. |
|
755 | # show up in the changed section. | |
758 | for x in a: |
|
756 | for x in a: | |
759 | if x in dd: |
|
757 | if x in dd: | |
760 | del dd[dd.index(x)] |
|
758 | del dd[dd.index(x)] | |
761 | cc.append(x) |
|
759 | cc.append(x) | |
762 | else: |
|
760 | else: | |
763 | aa.append(x) |
|
761 | aa.append(x) | |
764 | # make sure any files deleted in the local dirstate |
|
762 | # make sure any files deleted in the local dirstate | |
765 | # are not in the add or change column of the patch |
|
763 | # are not in the add or change column of the patch | |
766 | forget = [] |
|
764 | forget = [] | |
767 | for x in d + r: |
|
765 | for x in d + r: | |
768 | if x in aa: |
|
766 | if x in aa: | |
769 | del aa[aa.index(x)] |
|
767 | del aa[aa.index(x)] | |
770 | forget.append(x) |
|
768 | forget.append(x) | |
771 | continue |
|
769 | continue | |
772 | elif x in cc: |
|
770 | elif x in cc: | |
773 | del cc[cc.index(x)] |
|
771 | del cc[cc.index(x)] | |
774 | dd.append(x) |
|
772 | dd.append(x) | |
775 |
|
773 | |||
776 | c = list(util.unique(cc)) |
|
774 | c = list(util.unique(cc)) | |
777 | r = list(util.unique(dd)) |
|
775 | r = list(util.unique(dd)) | |
778 | a = list(util.unique(aa)) |
|
776 | a = list(util.unique(aa)) | |
779 | filelist = list(util.unique(c + r + a )) |
|
777 | filelist = list(util.unique(c + r + a )) | |
780 |
commands.dodiff(patchf, self.ui, repo, patchparent, None, |
|
778 | commands.dodiff(patchf, self.ui, repo, patchparent, None, | |
781 | filelist, changes=(c, a, r, [], u)) |
|
779 | filelist, changes=(c, a, r, [], u)) | |
782 | patchf.close() |
|
780 | patchf.close() | |
783 |
|
781 | |||
784 | changes = repo.changelog.read(tip) |
|
782 | changes = repo.changelog.read(tip) | |
785 | repo.dirstate.setparents(*cparents) |
|
783 | repo.dirstate.setparents(*cparents) | |
786 | repo.dirstate.update(a, 'a') |
|
784 | repo.dirstate.update(a, 'a') | |
787 | repo.dirstate.update(r, 'r') |
|
785 | repo.dirstate.update(r, 'r') | |
788 |
repo.dirstate.update(c, 'n') |
|
786 | repo.dirstate.update(c, 'n') | |
789 | repo.dirstate.forget(forget) |
|
787 | repo.dirstate.forget(forget) | |
790 |
|
788 | |||
791 | if not message: |
|
789 | if not message: | |
792 | message = "patch queue: %s\n" % patch |
|
790 | message = "patch queue: %s\n" % patch | |
793 | else: |
|
791 | else: | |
794 | message = "\n".join(message) |
|
792 | message = "\n".join(message) | |
795 | self.strip(repo, top, update=False, backup='strip', wlock=wlock) |
|
793 | self.strip(repo, top, update=False, backup='strip', wlock=wlock) | |
796 | n = repo.commit(filelist, message, changes[1], force=1, wlock=wlock) |
|
794 | n = repo.commit(filelist, message, changes[1], force=1, wlock=wlock) | |
797 | self.applied[-1] = revlog.hex(n) + ':' + patch |
|
795 | self.applied[-1] = revlog.hex(n) + ':' + patch | |
798 | self.applied_dirty = 1 |
|
796 | self.applied_dirty = 1 | |
799 | else: |
|
797 | else: | |
800 | commands.dodiff(patchf, self.ui, repo, patchparent, None) |
|
798 | commands.dodiff(patchf, self.ui, repo, patchparent, None) | |
801 | patchf.close() |
|
799 | patchf.close() | |
802 | self.pop(repo, force=True, wlock=wlock) |
|
800 | self.pop(repo, force=True, wlock=wlock) | |
803 | self.push(repo, force=True, wlock=wlock) |
|
801 | self.push(repo, force=True, wlock=wlock) | |
804 |
|
802 | |||
805 | def init(self, repo, create=False): |
|
803 | def init(self, repo, create=False): | |
806 | if os.path.isdir(self.path): |
|
804 | if os.path.isdir(self.path): | |
807 | raise util.Abort("patch queue directory already exists") |
|
805 | raise util.Abort("patch queue directory already exists") | |
808 | os.mkdir(self.path) |
|
806 | os.mkdir(self.path) | |
809 | if create: |
|
807 | if create: | |
810 | return self.qrepo(create=True) |
|
808 | return self.qrepo(create=True) | |
811 |
|
809 | |||
812 | def unapplied(self, repo, patch=None): |
|
810 | def unapplied(self, repo, patch=None): | |
813 | if patch and patch not in self.series: |
|
811 | if patch and patch not in self.series: | |
814 | self.ui.warn("%s not in the series file\n" % patch) |
|
812 | self.ui.warn("%s not in the series file\n" % patch) | |
815 | sys.exit(1) |
|
813 | sys.exit(1) | |
816 | if not patch: |
|
814 | if not patch: | |
817 | start = self.series_end() |
|
815 | start = self.series_end() | |
818 | else: |
|
816 | else: | |
819 | start = self.series.index(patch) + 1 |
|
817 | start = self.series.index(patch) + 1 | |
820 | for p in self.series[start:]: |
|
818 | for p in self.series[start:]: | |
821 | self.ui.write("%s\n" % p) |
|
819 | self.ui.write("%s\n" % p) | |
822 |
|
820 | |||
823 | def qseries(self, repo, missing=None): |
|
821 | def qseries(self, repo, missing=None): | |
824 | start = self.series_end() |
|
822 | start = self.series_end() | |
825 | if not missing: |
|
823 | if not missing: | |
826 | for p in self.series[:start]: |
|
824 | for p in self.series[:start]: | |
827 | if self.ui.verbose: |
|
825 | if self.ui.verbose: | |
828 | self.ui.write("%d A " % self.series.index(p)) |
|
826 | self.ui.write("%d A " % self.series.index(p)) | |
829 | self.ui.write("%s\n" % p) |
|
827 | self.ui.write("%s\n" % p) | |
830 | for p in self.series[start:]: |
|
828 | for p in self.series[start:]: | |
831 | if self.ui.verbose: |
|
829 | if self.ui.verbose: | |
832 | self.ui.write("%d U " % self.series.index(p)) |
|
830 | self.ui.write("%d U " % self.series.index(p)) | |
833 | self.ui.write("%s\n" % p) |
|
831 | self.ui.write("%s\n" % p) | |
834 | else: |
|
832 | else: | |
835 | list = [] |
|
833 | list = [] | |
836 | for root, dirs, files in os.walk(self.path): |
|
834 | for root, dirs, files in os.walk(self.path): | |
837 | d = root[len(self.path) + 1:] |
|
835 | d = root[len(self.path) + 1:] | |
838 | for f in files: |
|
836 | for f in files: | |
839 | fl = os.path.join(d, f) |
|
837 | fl = os.path.join(d, f) | |
840 | if (fl not in self.series and fl != "status" and |
|
838 | if (fl not in self.series and fl != "status" and | |
841 | fl != "series" and not fl.startswith('.')): |
|
839 | fl != "series" and not fl.startswith('.')): | |
842 | list.append(fl) |
|
840 | list.append(fl) | |
843 | list.sort() |
|
841 | list.sort() | |
844 | if list: |
|
842 | if list: | |
845 |
for x in list: |
|
843 | for x in list: | |
846 | if self.ui.verbose: |
|
844 | if self.ui.verbose: | |
847 | self.ui.write("D ") |
|
845 | self.ui.write("D ") | |
848 | self.ui.write("%s\n" % x) |
|
846 | self.ui.write("%s\n" % x) | |
849 |
|
847 | |||
850 | def issaveline(self, l): |
|
848 | def issaveline(self, l): | |
851 | name = l.split(':')[1] |
|
849 | name = l.split(':')[1] | |
852 | if name == '.hg.patches.save.line': |
|
850 | if name == '.hg.patches.save.line': | |
853 | return True |
|
851 | return True | |
854 |
|
852 | |||
855 | def qrepo(self, create=False): |
|
853 | def qrepo(self, create=False): | |
856 | if create or os.path.isdir(os.path.join(self.path, ".hg")): |
|
854 | if create or os.path.isdir(os.path.join(self.path, ".hg")): | |
857 | return hg.repository(ui=self.ui, path=self.path, create=create) |
|
855 | return hg.repository(ui=self.ui, path=self.path, create=create) | |
858 |
|
856 | |||
859 | def restore(self, repo, rev, delete=None, qupdate=None): |
|
857 | def restore(self, repo, rev, delete=None, qupdate=None): | |
860 | c = repo.changelog.read(rev) |
|
858 | c = repo.changelog.read(rev) | |
861 | desc = c[4].strip() |
|
859 | desc = c[4].strip() | |
862 | lines = desc.splitlines() |
|
860 | lines = desc.splitlines() | |
863 | i = 0 |
|
861 | i = 0 | |
864 | datastart = None |
|
862 | datastart = None | |
865 | series = [] |
|
863 | series = [] | |
866 | applied = [] |
|
864 | applied = [] | |
867 | qpp = None |
|
865 | qpp = None | |
868 | for i in xrange(0, len(lines)): |
|
866 | for i in xrange(0, len(lines)): | |
869 | if lines[i] == 'Patch Data:': |
|
867 | if lines[i] == 'Patch Data:': | |
870 | datastart = i + 1 |
|
868 | datastart = i + 1 | |
871 | elif lines[i].startswith('Dirstate:'): |
|
869 | elif lines[i].startswith('Dirstate:'): | |
872 | l = lines[i].rstrip() |
|
870 | l = lines[i].rstrip() | |
873 | l = l[10:].split(' ') |
|
871 | l = l[10:].split(' ') | |
874 | qpp = [ hg.bin(x) for x in l ] |
|
872 | qpp = [ hg.bin(x) for x in l ] | |
875 | elif datastart != None: |
|
873 | elif datastart != None: | |
876 | l = lines[i].rstrip() |
|
874 | l = lines[i].rstrip() | |
877 | index = l.index(':') |
|
875 | index = l.index(':') | |
878 | id = l[:index] |
|
876 | id = l[:index] | |
879 | file = l[index + 1:] |
|
877 | file = l[index + 1:] | |
880 | if id: |
|
878 | if id: | |
881 | applied.append(l) |
|
879 | applied.append(l) | |
882 | series.append(file) |
|
880 | series.append(file) | |
883 | if datastart == None: |
|
881 | if datastart == None: | |
884 | self.ui.warn("No saved patch data found\n") |
|
882 | self.ui.warn("No saved patch data found\n") | |
885 | return 1 |
|
883 | return 1 | |
886 | self.ui.warn("restoring status: %s\n" % lines[0]) |
|
884 | self.ui.warn("restoring status: %s\n" % lines[0]) | |
887 | self.full_series = series |
|
885 | self.full_series = series | |
888 | self.applied = applied |
|
886 | self.applied = applied | |
889 | self.read_series(self.full_series) |
|
887 | self.read_series(self.full_series) | |
890 | self.series_dirty = 1 |
|
888 | self.series_dirty = 1 | |
891 | self.applied_dirty = 1 |
|
889 | self.applied_dirty = 1 | |
892 | heads = repo.changelog.heads() |
|
890 | heads = repo.changelog.heads() | |
893 | if delete: |
|
891 | if delete: | |
894 | if rev not in heads: |
|
892 | if rev not in heads: | |
895 | self.ui.warn("save entry has children, leaving it alone\n") |
|
893 | self.ui.warn("save entry has children, leaving it alone\n") | |
896 | else: |
|
894 | else: | |
897 | self.ui.warn("removing save entry %s\n" % hg.short(rev)) |
|
895 | self.ui.warn("removing save entry %s\n" % hg.short(rev)) | |
898 | pp = repo.dirstate.parents() |
|
896 | pp = repo.dirstate.parents() | |
899 | if rev in pp: |
|
897 | if rev in pp: | |
900 | update = True |
|
898 | update = True | |
901 | else: |
|
899 | else: | |
902 | update = False |
|
900 | update = False | |
903 | self.strip(repo, rev, update=update, backup='strip') |
|
901 | self.strip(repo, rev, update=update, backup='strip') | |
904 | if qpp: |
|
902 | if qpp: | |
905 |
self.ui.warn("saved queue repository parents: %s %s\n" % |
|
903 | self.ui.warn("saved queue repository parents: %s %s\n" % | |
906 | (hg.short(qpp[0]), hg.short(qpp[1]))) |
|
904 | (hg.short(qpp[0]), hg.short(qpp[1]))) | |
907 | if qupdate: |
|
905 | if qupdate: | |
908 | print "queue directory updating" |
|
906 | print "queue directory updating" | |
909 | r = self.qrepo() |
|
907 | r = self.qrepo() | |
910 | if not r: |
|
908 | if not r: | |
911 | self.ui.warn("Unable to load queue repository\n") |
|
909 | self.ui.warn("Unable to load queue repository\n") | |
912 | return 1 |
|
910 | return 1 | |
913 | r.update(qpp[0], allow=False, force=True) |
|
911 | r.update(qpp[0], allow=False, force=True) | |
914 |
|
912 | |||
915 | def save(self, repo, msg=None): |
|
913 | def save(self, repo, msg=None): | |
916 | if len(self.applied) == 0: |
|
914 | if len(self.applied) == 0: | |
917 | self.ui.warn("save: no patches applied, exiting\n") |
|
915 | self.ui.warn("save: no patches applied, exiting\n") | |
918 | return 1 |
|
916 | return 1 | |
919 | if self.issaveline(self.applied[-1]): |
|
917 | if self.issaveline(self.applied[-1]): | |
920 | self.ui.warn("status is already saved\n") |
|
918 | self.ui.warn("status is already saved\n") | |
921 | return 1 |
|
919 | return 1 | |
922 |
|
920 | |||
923 | ar = [ ':' + x for x in self.full_series ] |
|
921 | ar = [ ':' + x for x in self.full_series ] | |
924 | if not msg: |
|
922 | if not msg: | |
925 | msg = "hg patches saved state" |
|
923 | msg = "hg patches saved state" | |
926 | else: |
|
924 | else: | |
927 | msg = "hg patches: " + msg.rstrip('\r\n') |
|
925 | msg = "hg patches: " + msg.rstrip('\r\n') | |
928 | r = self.qrepo() |
|
926 | r = self.qrepo() | |
929 | if r: |
|
927 | if r: | |
930 | pp = r.dirstate.parents() |
|
928 | pp = r.dirstate.parents() | |
931 | msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1])) |
|
929 | msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1])) | |
932 | msg += "\n\nPatch Data:\n" |
|
930 | msg += "\n\nPatch Data:\n" | |
933 |
text = msg + "\n".join(self.applied) + '\n' + (ar and "\n".join(ar) |
|
931 | text = msg + "\n".join(self.applied) + '\n' + (ar and "\n".join(ar) | |
934 | + '\n' or "") |
|
932 | + '\n' or "") | |
935 | n = repo.commit(None, text, user=None, force=1) |
|
933 | n = repo.commit(None, text, user=None, force=1) | |
936 | if not n: |
|
934 | if not n: | |
937 | self.ui.warn("repo commit failed\n") |
|
935 | self.ui.warn("repo commit failed\n") | |
938 | return 1 |
|
936 | return 1 | |
939 | self.applied.append(revlog.hex(n) + ":" + '.hg.patches.save.line') |
|
937 | self.applied.append(revlog.hex(n) + ":" + '.hg.patches.save.line') | |
940 | self.applied_dirty = 1 |
|
938 | self.applied_dirty = 1 | |
941 |
|
939 | |||
942 | def series_end(self): |
|
940 | def series_end(self): | |
943 | end = 0 |
|
941 | end = 0 | |
944 | if len(self.applied) > 0: |
|
942 | if len(self.applied) > 0: | |
945 | (top, p) = self.applied[-1].split(':') |
|
943 | (top, p) = self.applied[-1].split(':') | |
946 | try: |
|
944 | try: | |
947 | end = self.series.index(p) |
|
945 | end = self.series.index(p) | |
948 | except ValueError: |
|
946 | except ValueError: | |
949 | return 0 |
|
947 | return 0 | |
950 | return end + 1 |
|
948 | return end + 1 | |
951 | return end |
|
949 | return end | |
952 |
|
950 | |||
953 | def qapplied(self, repo, patch=None): |
|
951 | def qapplied(self, repo, patch=None): | |
954 | if patch and patch not in self.series: |
|
952 | if patch and patch not in self.series: | |
955 | self.ui.warn("%s not in the series file\n" % patch) |
|
953 | self.ui.warn("%s not in the series file\n" % patch) | |
956 | sys.exit(1) |
|
954 | sys.exit(1) | |
957 | if not patch: |
|
955 | if not patch: | |
958 | end = len(self.applied) |
|
956 | end = len(self.applied) | |
959 | else: |
|
957 | else: | |
960 | end = self.series.index(patch) + 1 |
|
958 | end = self.series.index(patch) + 1 | |
961 | for x in xrange(end): |
|
959 | for x in xrange(end): | |
962 | p = self.appliedname(x) |
|
960 | p = self.appliedname(x) | |
963 | self.ui.write("%s\n" % p) |
|
961 | self.ui.write("%s\n" % p) | |
964 |
|
962 | |||
965 | def appliedname(self, index): |
|
963 | def appliedname(self, index): | |
966 | p = self.applied[index] |
|
964 | p = self.applied[index] | |
967 | if not self.ui.verbose: |
|
965 | if not self.ui.verbose: | |
968 | p = p.split(':')[1] |
|
966 | p = p.split(':')[1] | |
969 | return p |
|
967 | return p | |
970 |
|
968 | |||
971 | def top(self, repo): |
|
969 | def top(self, repo): | |
972 | if len(self.applied): |
|
970 | if len(self.applied): | |
973 | p = self.appliedname(-1) |
|
971 | p = self.appliedname(-1) | |
974 | self.ui.write(p + '\n') |
|
972 | self.ui.write(p + '\n') | |
975 | else: |
|
973 | else: | |
976 | self.ui.write("No patches applied\n") |
|
974 | self.ui.write("No patches applied\n") | |
977 |
|
975 | |||
978 | def next(self, repo): |
|
976 | def next(self, repo): | |
979 | end = self.series_end() |
|
977 | end = self.series_end() | |
980 | if end == len(self.series): |
|
978 | if end == len(self.series): | |
981 | self.ui.write("All patches applied\n") |
|
979 | self.ui.write("All patches applied\n") | |
982 | else: |
|
980 | else: | |
983 | self.ui.write(self.series[end] + '\n') |
|
981 | self.ui.write(self.series[end] + '\n') | |
984 |
|
982 | |||
985 | def prev(self, repo): |
|
983 | def prev(self, repo): | |
986 | if len(self.applied) > 1: |
|
984 | if len(self.applied) > 1: | |
987 | p = self.appliedname(-2) |
|
985 | p = self.appliedname(-2) | |
988 | self.ui.write(p + '\n') |
|
986 | self.ui.write(p + '\n') | |
989 | elif len(self.applied) == 1: |
|
987 | elif len(self.applied) == 1: | |
990 | self.ui.write("Only one patch applied\n") |
|
988 | self.ui.write("Only one patch applied\n") | |
991 | else: |
|
989 | else: | |
992 | self.ui.write("No patches applied\n") |
|
990 | self.ui.write("No patches applied\n") | |
993 |
|
991 | |||
994 | def qimport(self, repo, files, patch=None, existing=None, force=None): |
|
992 | def qimport(self, repo, files, patch=None, existing=None, force=None): | |
995 | if len(files) > 1 and patch: |
|
993 | if len(files) > 1 and patch: | |
996 | self.ui.warn("-n option not valid when importing multiple files\n") |
|
994 | self.ui.warn("-n option not valid when importing multiple files\n") | |
997 | sys.exit(1) |
|
995 | sys.exit(1) | |
998 | i = 0 |
|
996 | i = 0 | |
999 | for filename in files: |
|
997 | for filename in files: | |
1000 | if existing: |
|
998 | if existing: | |
1001 |
if not patch: |
|
999 | if not patch: | |
1002 | patch = filename |
|
1000 | patch = filename | |
1003 | if not os.path.isfile(os.path.join(self.path, patch)): |
|
1001 | if not os.path.isfile(os.path.join(self.path, patch)): | |
1004 | self.ui.warn("patch %s does not exist\n" % patch) |
|
1002 | self.ui.warn("patch %s does not exist\n" % patch) | |
1005 | sys.exit(1) |
|
1003 | sys.exit(1) | |
1006 | else: |
|
1004 | else: | |
1007 | try: |
|
1005 | try: | |
1008 | text = file(filename).read() |
|
1006 | text = file(filename).read() | |
1009 | except IOError: |
|
1007 | except IOError: | |
1010 | self.ui.warn("Unable to read %s\n" % patch) |
|
1008 | self.ui.warn("Unable to read %s\n" % patch) | |
1011 | sys.exit(1) |
|
1009 | sys.exit(1) | |
1012 | if not patch: |
|
1010 | if not patch: | |
1013 | patch = os.path.split(filename)[1] |
|
1011 | patch = os.path.split(filename)[1] | |
1014 | if not force and os.path.isfile(os.path.join(self.path, patch)): |
|
1012 | if not force and os.path.isfile(os.path.join(self.path, patch)): | |
1015 | self.ui.warn("patch %s already exists\n" % patch) |
|
1013 | self.ui.warn("patch %s already exists\n" % patch) | |
1016 | sys.exit(1) |
|
1014 | sys.exit(1) | |
1017 | patchf = self.opener(os.path.join(self.path, patch), "w") |
|
1015 | patchf = self.opener(os.path.join(self.path, patch), "w") | |
1018 | patchf.write(text) |
|
1016 | patchf.write(text) | |
1019 | if patch in self.series: |
|
1017 | if patch in self.series: | |
1020 | self.ui.warn("patch %s is already in the series file\n" % patch) |
|
1018 | self.ui.warn("patch %s is already in the series file\n" % patch) | |
1021 | sys.exit(1) |
|
1019 | sys.exit(1) | |
1022 | index = self.series_end() + i |
|
1020 | index = self.series_end() + i | |
1023 | self.full_series[index:index] = [patch] |
|
1021 | self.full_series[index:index] = [patch] | |
1024 | self.read_series(self.full_series) |
|
1022 | self.read_series(self.full_series) | |
1025 | self.ui.warn("adding %s to series file\n" % patch) |
|
1023 | self.ui.warn("adding %s to series file\n" % patch) | |
1026 | i += 1 |
|
1024 | i += 1 | |
1027 | patch = None |
|
1025 | patch = None | |
1028 | self.series_dirty = 1 |
|
1026 | self.series_dirty = 1 | |
1029 |
|
1027 | |||
1030 | def delete(ui, repo, patch, **opts): |
|
1028 | def delete(ui, repo, patch, **opts): | |
1031 | """remove a patch from the series file""" |
|
1029 | """remove a patch from the series file""" | |
1032 | q = repomap[repo] |
|
1030 | q = repomap[repo] | |
1033 | q.delete(repo, patch) |
|
1031 | q.delete(repo, patch) | |
1034 | q.save_dirty() |
|
1032 | q.save_dirty() | |
1035 | return 0 |
|
1033 | return 0 | |
1036 |
|
1034 | |||
1037 | def applied(ui, repo, patch=None, **opts): |
|
1035 | def applied(ui, repo, patch=None, **opts): | |
1038 | """print the patches already applied""" |
|
1036 | """print the patches already applied""" | |
1039 | repomap[repo].qapplied(repo, patch) |
|
1037 | repomap[repo].qapplied(repo, patch) | |
1040 | return 0 |
|
1038 | return 0 | |
1041 |
|
1039 | |||
1042 | def unapplied(ui, repo, patch=None, **opts): |
|
1040 | def unapplied(ui, repo, patch=None, **opts): | |
1043 | """print the patches not yet applied""" |
|
1041 | """print the patches not yet applied""" | |
1044 | repomap[repo].unapplied(repo, patch) |
|
1042 | repomap[repo].unapplied(repo, patch) | |
1045 | return 0 |
|
1043 | return 0 | |
1046 |
|
1044 | |||
1047 | def qimport(ui, repo, *filename, **opts): |
|
1045 | def qimport(ui, repo, *filename, **opts): | |
1048 | """import a patch""" |
|
1046 | """import a patch""" | |
1049 | q = repomap[repo] |
|
1047 | q = repomap[repo] | |
1050 |
q.qimport(repo, filename, patch=opts['name'], |
|
1048 | q.qimport(repo, filename, patch=opts['name'], | |
1051 |
|
|
1049 | existing=opts['existing'], force=opts['force']) | |
1052 | q.save_dirty() |
|
1050 | q.save_dirty() | |
1053 | return 0 |
|
1051 | return 0 | |
1054 |
|
1052 | |||
1055 | def init(ui, repo, **opts): |
|
1053 | def init(ui, repo, **opts): | |
1056 | """init a new queue repository""" |
|
1054 | """init a new queue repository""" | |
1057 | q = repomap[repo] |
|
1055 | q = repomap[repo] | |
1058 | r = q.init(repo, create=opts['create_repo']) |
|
1056 | r = q.init(repo, create=opts['create_repo']) | |
1059 | q.save_dirty() |
|
1057 | q.save_dirty() | |
1060 | if r: |
|
1058 | if r: | |
1061 | fp = r.wopener('.hgignore', 'w') |
|
1059 | fp = r.wopener('.hgignore', 'w') | |
1062 | print >> fp, 'syntax: glob' |
|
1060 | print >> fp, 'syntax: glob' | |
1063 | print >> fp, 'status' |
|
1061 | print >> fp, 'status' | |
1064 | fp.close() |
|
1062 | fp.close() | |
1065 | r.wopener('series', 'w').close() |
|
1063 | r.wopener('series', 'w').close() | |
1066 | r.add(['.hgignore', 'series']) |
|
1064 | r.add(['.hgignore', 'series']) | |
1067 | return 0 |
|
1065 | return 0 | |
1068 |
|
1066 | |||
1069 | def commit(ui, repo, *pats, **opts): |
|
1067 | def commit(ui, repo, *pats, **opts): | |
1070 | q = repomap[repo] |
|
1068 | q = repomap[repo] | |
1071 | r = q.qrepo() |
|
1069 | r = q.qrepo() | |
1072 | if not r: raise util.Abort('no queue repository') |
|
1070 | if not r: raise util.Abort('no queue repository') | |
1073 | commands.commit(r.ui, r, *pats, **opts) |
|
1071 | commands.commit(r.ui, r, *pats, **opts) | |
1074 |
|
1072 | |||
1075 | def series(ui, repo, **opts): |
|
1073 | def series(ui, repo, **opts): | |
1076 | """print the entire series file""" |
|
1074 | """print the entire series file""" | |
1077 | repomap[repo].qseries(repo, missing=opts['missing']) |
|
1075 | repomap[repo].qseries(repo, missing=opts['missing']) | |
1078 | return 0 |
|
1076 | return 0 | |
1079 |
|
1077 | |||
1080 | def top(ui, repo, **opts): |
|
1078 | def top(ui, repo, **opts): | |
1081 | """print the name of the current patch""" |
|
1079 | """print the name of the current patch""" | |
1082 | repomap[repo].top(repo) |
|
1080 | repomap[repo].top(repo) | |
1083 | return 0 |
|
1081 | return 0 | |
1084 |
|
1082 | |||
1085 | def next(ui, repo, **opts): |
|
1083 | def next(ui, repo, **opts): | |
1086 | """print the name of the next patch""" |
|
1084 | """print the name of the next patch""" | |
1087 | repomap[repo].next(repo) |
|
1085 | repomap[repo].next(repo) | |
1088 | return 0 |
|
1086 | return 0 | |
1089 |
|
1087 | |||
1090 | def prev(ui, repo, **opts): |
|
1088 | def prev(ui, repo, **opts): | |
1091 | """print the name of the previous patch""" |
|
1089 | """print the name of the previous patch""" | |
1092 | repomap[repo].prev(repo) |
|
1090 | repomap[repo].prev(repo) | |
1093 | return 0 |
|
1091 | return 0 | |
1094 |
|
1092 | |||
1095 | def new(ui, repo, patch, **opts): |
|
1093 | def new(ui, repo, patch, **opts): | |
1096 | """create a new patch""" |
|
1094 | """create a new patch""" | |
1097 | q = repomap[repo] |
|
1095 | q = repomap[repo] | |
1098 | q.new(repo, patch, msg=opts['message'], force=opts['force']) |
|
1096 | q.new(repo, patch, msg=opts['message'], force=opts['force']) | |
1099 | q.save_dirty() |
|
1097 | q.save_dirty() | |
1100 | return 0 |
|
1098 | return 0 | |
1101 |
|
1099 | |||
1102 | def refresh(ui, repo, **opts): |
|
1100 | def refresh(ui, repo, **opts): | |
1103 | """update the current patch""" |
|
1101 | """update the current patch""" | |
1104 | q = repomap[repo] |
|
1102 | q = repomap[repo] | |
1105 | q.refresh(repo, short=opts['short']) |
|
1103 | q.refresh(repo, short=opts['short']) | |
1106 | q.save_dirty() |
|
1104 | q.save_dirty() | |
1107 | return 0 |
|
1105 | return 0 | |
1108 |
|
1106 | |||
1109 | def diff(ui, repo, *files, **opts): |
|
1107 | def diff(ui, repo, *files, **opts): | |
1110 | """diff of the current patch""" |
|
1108 | """diff of the current patch""" | |
1111 | repomap[repo].diff(repo, files) |
|
1109 | repomap[repo].diff(repo, files) | |
1112 | return 0 |
|
1110 | return 0 | |
1113 |
|
1111 | |||
1114 | def lastsavename(path): |
|
1112 | def lastsavename(path): | |
1115 | (dir, base) = os.path.split(path) |
|
1113 | (dir, base) = os.path.split(path) | |
1116 | names = os.listdir(dir) |
|
1114 | names = os.listdir(dir) | |
1117 | namere = re.compile("%s.([0-9]+)" % base) |
|
1115 | namere = re.compile("%s.([0-9]+)" % base) | |
1118 | max = None |
|
1116 | max = None | |
1119 | maxname = None |
|
1117 | maxname = None | |
1120 | for f in names: |
|
1118 | for f in names: | |
1121 | m = namere.match(f) |
|
1119 | m = namere.match(f) | |
1122 | if m: |
|
1120 | if m: | |
1123 | index = int(m.group(1)) |
|
1121 | index = int(m.group(1)) | |
1124 | if max == None or index > max: |
|
1122 | if max == None or index > max: | |
1125 | max = index |
|
1123 | max = index | |
1126 | maxname = f |
|
1124 | maxname = f | |
1127 | if maxname: |
|
1125 | if maxname: | |
1128 | return (os.path.join(dir, maxname), max) |
|
1126 | return (os.path.join(dir, maxname), max) | |
1129 | return (None, None) |
|
1127 | return (None, None) | |
1130 |
|
1128 | |||
1131 | def savename(path): |
|
1129 | def savename(path): | |
1132 | (last, index) = lastsavename(path) |
|
1130 | (last, index) = lastsavename(path) | |
1133 | if last is None: |
|
1131 | if last is None: | |
1134 | index = 0 |
|
1132 | index = 0 | |
1135 | newpath = path + ".%d" % (index + 1) |
|
1133 | newpath = path + ".%d" % (index + 1) | |
1136 | return newpath |
|
1134 | return newpath | |
1137 |
|
1135 | |||
1138 | def push(ui, repo, patch=None, **opts): |
|
1136 | def push(ui, repo, patch=None, **opts): | |
1139 | """push the next patch onto the stack""" |
|
1137 | """push the next patch onto the stack""" | |
1140 | q = repomap[repo] |
|
1138 | q = repomap[repo] | |
1141 | mergeq = None |
|
1139 | mergeq = None | |
1142 |
|
1140 | |||
1143 | if opts['all']: |
|
1141 | if opts['all']: | |
1144 | patch = q.series[-1] |
|
1142 | patch = q.series[-1] | |
1145 | if opts['merge']: |
|
1143 | if opts['merge']: | |
1146 | if opts['name']: |
|
1144 | if opts['name']: | |
1147 | newpath = opts['name'] |
|
1145 | newpath = opts['name'] | |
1148 | else: |
|
1146 | else: | |
1149 | newpath,i = lastsavename(q.path) |
|
1147 | newpath, i = lastsavename(q.path) | |
1150 | if not newpath: |
|
1148 | if not newpath: | |
1151 | ui.warn("no saved queues found, please use -n\n") |
|
1149 | ui.warn("no saved queues found, please use -n\n") | |
1152 | return 1 |
|
1150 | return 1 | |
1153 | mergeq = queue(ui, repo.join(""), newpath) |
|
1151 | mergeq = queue(ui, repo.join(""), newpath) | |
1154 | ui.warn("merging with queue at: %s\n" % mergeq.path) |
|
1152 | ui.warn("merging with queue at: %s\n" % mergeq.path) | |
1155 |
ret = q.push(repo, patch, force=opts['force'], list=opts['list'], |
|
1153 | ret = q.push(repo, patch, force=opts['force'], list=opts['list'], | |
1156 | mergeq=mergeq) |
|
1154 | mergeq=mergeq) | |
1157 | q.save_dirty() |
|
1155 | q.save_dirty() | |
1158 | return ret |
|
1156 | return ret | |
1159 |
|
1157 | |||
1160 | def pop(ui, repo, patch=None, **opts): |
|
1158 | def pop(ui, repo, patch=None, **opts): | |
1161 | """pop the current patch off the stack""" |
|
1159 | """pop the current patch off the stack""" | |
1162 | localupdate = True |
|
1160 | localupdate = True | |
1163 | if opts['name']: |
|
1161 | if opts['name']: | |
1164 | q = queue(ui, repo.join(""), repo.join(opts['name'])) |
|
1162 | q = queue(ui, repo.join(""), repo.join(opts['name'])) | |
1165 | ui.warn('using patch queue: %s\n' % q.path) |
|
1163 | ui.warn('using patch queue: %s\n' % q.path) | |
1166 | localupdate = False |
|
1164 | localupdate = False | |
1167 | else: |
|
1165 | else: | |
1168 | q = repomap[repo] |
|
1166 | q = repomap[repo] | |
1169 | if opts['all'] and len(q.applied) > 0: |
|
1167 | if opts['all'] and len(q.applied) > 0: | |
1170 | patch = q.applied[0].split(':')[1] |
|
1168 | patch = q.applied[0].split(':')[1] | |
1171 | q.pop(repo, patch, force=opts['force'], update=localupdate) |
|
1169 | q.pop(repo, patch, force=opts['force'], update=localupdate) | |
1172 | q.save_dirty() |
|
1170 | q.save_dirty() | |
1173 | return 0 |
|
1171 | return 0 | |
1174 |
|
1172 | |||
1175 | def restore(ui, repo, rev, **opts): |
|
1173 | def restore(ui, repo, rev, **opts): | |
1176 | """restore the queue state saved by a rev""" |
|
1174 | """restore the queue state saved by a rev""" | |
1177 | rev = repo.lookup(rev) |
|
1175 | rev = repo.lookup(rev) | |
1178 | q = repomap[repo] |
|
1176 | q = repomap[repo] | |
1179 | q.restore(repo, rev, delete=opts['delete'], |
|
1177 | q.restore(repo, rev, delete=opts['delete'], | |
1180 |
|
|
1178 | qupdate=opts['update']) | |
1181 | q.save_dirty() |
|
1179 | q.save_dirty() | |
1182 | return 0 |
|
1180 | return 0 | |
1183 |
|
1181 | |||
1184 | def save(ui, repo, **opts): |
|
1182 | def save(ui, repo, **opts): | |
1185 | """save current queue state""" |
|
1183 | """save current queue state""" | |
1186 | q = repomap[repo] |
|
1184 | q = repomap[repo] | |
1187 | ret = q.save(repo, msg=opts['message']) |
|
1185 | ret = q.save(repo, msg=opts['message']) | |
1188 | if ret: |
|
1186 | if ret: | |
1189 | return ret |
|
1187 | return ret | |
1190 | q.save_dirty() |
|
1188 | q.save_dirty() | |
1191 | if opts['copy']: |
|
1189 | if opts['copy']: | |
1192 | path = q.path |
|
1190 | path = q.path | |
1193 | if opts['name']: |
|
1191 | if opts['name']: | |
1194 | newpath = os.path.join(q.basepath, opts['name']) |
|
1192 | newpath = os.path.join(q.basepath, opts['name']) | |
1195 | if os.path.exists(newpath): |
|
1193 | if os.path.exists(newpath): | |
1196 | if not os.path.isdir(newpath): |
|
1194 | if not os.path.isdir(newpath): | |
1197 | ui.warn("destination %s exists and is not a directory\n" % |
|
1195 | ui.warn("destination %s exists and is not a directory\n" % | |
1198 |
|
|
1196 | newpath) | |
1199 | sys.exit(1) |
|
1197 | sys.exit(1) | |
1200 | if not opts['force']: |
|
1198 | if not opts['force']: | |
1201 | ui.warn("destination %s exists, use -f to force\n" % |
|
1199 | ui.warn("destination %s exists, use -f to force\n" % | |
1202 |
|
|
1200 | newpath) | |
1203 | sys.exit(1) |
|
1201 | sys.exit(1) | |
1204 | else: |
|
1202 | else: | |
1205 | newpath = savename(path) |
|
1203 | newpath = savename(path) | |
1206 | ui.warn("copy %s to %s\n" % (path, newpath)) |
|
1204 | ui.warn("copy %s to %s\n" % (path, newpath)) | |
1207 | util.copyfiles(path, newpath) |
|
1205 | util.copyfiles(path, newpath) | |
1208 | if opts['empty']: |
|
1206 | if opts['empty']: | |
1209 | try: |
|
1207 | try: | |
1210 | os.unlink(q.status_path) |
|
1208 | os.unlink(q.status_path) | |
1211 | except: |
|
1209 | except: | |
1212 | pass |
|
1210 | pass | |
1213 | return 0 |
|
1211 | return 0 | |
1214 |
|
1212 | |||
1215 | def strip(ui, repo, rev, **opts): |
|
1213 | def strip(ui, repo, rev, **opts): | |
1216 | """strip a revision and all later revs on the same branch""" |
|
1214 | """strip a revision and all later revs on the same branch""" | |
1217 | rev = repo.lookup(rev) |
|
1215 | rev = repo.lookup(rev) | |
1218 | backup = 'all' |
|
1216 | backup = 'all' | |
1219 | if opts['backup']: |
|
1217 | if opts['backup']: | |
1220 | backup = 'strip' |
|
1218 | backup = 'strip' | |
1221 | elif opts['nobackup']: |
|
1219 | elif opts['nobackup']: | |
1222 | backup = 'none' |
|
1220 | backup = 'none' | |
1223 | repomap[repo].strip(repo, rev, backup=backup) |
|
1221 | repomap[repo].strip(repo, rev, backup=backup) | |
1224 | return 0 |
|
1222 | return 0 | |
1225 |
|
1223 | |||
1226 | def version(ui, q=None): |
|
1224 | def version(ui, q=None): | |
1227 | """print the version number""" |
|
1225 | """print the version number""" | |
1228 | ui.write("mq version %s\n" % versionstr) |
|
1226 | ui.write("mq version %s\n" % versionstr) | |
1229 | return 0 |
|
1227 | return 0 | |
1230 |
|
1228 | |||
1231 | def reposetup(ui, repo): |
|
1229 | def reposetup(ui, repo): | |
1232 | repomap[repo] = queue(ui, repo.join("")) |
|
1230 | repomap[repo] = queue(ui, repo.join("")) | |
1233 |
|
1231 | |||
1234 | cmdtable = { |
|
1232 | cmdtable = { | |
1235 |
"qapplied": (applied, [], |
|
1233 | "qapplied": (applied, [], 'hg qapplied [patch]'), | |
1236 |
"qcommit|qci": |
|
1234 | "qcommit|qci": | |
1237 | [('A', 'addremove', None, _('run addremove during commit')), |
|
1235 | (commit, | |
1238 | ('I', 'include', [], _('include names matching the given patterns')), |
|
1236 | [('A', 'addremove', None, _('run addremove during commit')), | |
1239 |
|
|
1237 | ('I', 'include', [], _('include names matching the given patterns')), | |
1240 | ('m', 'message', "", _('use <text> as commit message')), |
|
1238 | ('X', 'exclude', [], _('exclude names matching the given patterns')), | |
1241 |
|
|
1239 | ('m', 'message', '', _('use <text> as commit message')), | |
1242 |
|
|
1240 | ('l', 'logfile', '', _('read the commit message from <file>')), | |
1243 |
|
|
1241 | ('d', 'date', '', _('record datecode as commit date')), | |
1244 | "hg qcommit [options] [files]"), |
|
1242 | ('u', 'user', '', _('record user as commiter'))], | |
1245 | "^qdiff": (diff, [], "hg qdiff [files]"), |
|
1243 | 'hg qcommit [options] [files]'), | |
1246 | "qdelete": (delete, [], "hg qdelete [patch]"), |
|
1244 | "^qdiff": (diff, [], 'hg qdiff [files]'), | |
1247 | "^qimport": (qimport, [('e', 'existing', None, 'import file in patch dir'), |
|
1245 | "qdelete": (delete, [], 'hg qdelete [patch]'), | |
1248 | ('n', 'name', "", 'patch file name'), |
|
1246 | "^qimport": | |
1249 | ('f', 'force', None, 'overwrite existing files')], |
|
1247 | (qimport, | |
1250 | "hg qimport"), |
|
1248 | [('e', 'existing', None, 'import file in patch dir'), | |
1251 | "^qinit": (init, [('c', 'create-repo', None, 'create patch repository')], |
|
1249 | ('n', 'name', '', 'patch file name'), | |
1252 | "hg [-c] qinit"), |
|
1250 | ('f', 'force', None, 'overwrite existing files')], | |
1253 | "qnew": (new, [('m', 'message', "", 'commit message'), |
|
1251 | 'hg qimport'), | |
1254 | ('f', 'force', None, 'force')], |
|
1252 | "^qinit": | |
1255 | "hg qnew [-m message ] patch"), |
|
1253 | (init, | |
1256 | "qnext": (next, [], "hg qnext"), |
|
1254 | [('c', 'create-repo', None, 'create patch repository')], | |
1257 | "qprev": (prev, [], "hg qprev"), |
|
1255 | 'hg [-c] qinit'), | |
1258 | "^qpop": (pop, [('a', 'all', None, 'pop all patches'), |
|
1256 | "qnew": | |
1259 | ('n', 'name', "", 'queue name to pop'), |
|
1257 | (new, | |
1260 | ('f', 'force', None, 'forget any local changes')], |
|
1258 | [('m', 'message', '', 'commit message'), | |
1261 | 'hg qpop [options] [patch/index]'), |
|
1259 | ('f', 'force', None, 'force')], | |
1262 | "^qpush": (push, [('f', 'force', None, 'apply if the patch has rejects'), |
|
1260 | 'hg qnew [-m message ] patch'), | |
1263 | ('l', 'list', None, 'list patch name in commit text'), |
|
1261 | "qnext": (next, [], 'hg qnext'), | |
1264 | ('a', 'all', None, 'apply all patches'), |
|
1262 | "qprev": (prev, [], 'hg qprev'), | |
1265 | ('m', 'merge', None, 'merge from another queue'), |
|
1263 | "^qpop": | |
1266 | ('n', 'name', "", 'merge queue name')], |
|
1264 | (pop, | |
1267 | 'hg qpush [options] [patch/index]'), |
|
1265 | [('a', 'all', None, 'pop all patches'), | |
1268 | "^qrefresh": (refresh, [('s', 'short', None, 'short refresh')],"hg qrefresh"), |
|
1266 | ('n', 'name', '', 'queue name to pop'), | |
1269 | "qrestore": (restore, [('d', 'delete', None, 'delete save entry'), |
|
1267 | ('f', 'force', None, 'forget any local changes')], | |
1270 | ('u', 'update', None, 'update queue working dir')], |
|
1268 | 'hg qpop [options] [patch/index]'), | |
1271 | 'hg qrestore rev'), |
|
1269 | "^qpush": | |
1272 | "qsave": (save, [('m', 'message', "", 'commit message'), |
|
1270 | (push, | |
1273 |
|
|
1271 | [('f', 'force', None, 'apply if the patch has rejects'), | |
1274 | ('n', 'name', "", 'copy directory name'), |
|
1272 | ('l', 'list', None, 'list patch name in commit text'), | |
1275 | ('e', 'empty', None, 'clear queue status file'), |
|
1273 | ('a', 'all', None, 'apply all patches'), | |
1276 |
|
|
1274 | ('m', 'merge', None, 'merge from another queue'), | |
1277 | "qseries": (series, [('m', 'missing', None, 'print patches not in series')], |
|
1275 | ('n', 'name', '', 'merge queue name')], | |
1278 | "hg qseries"), |
|
1276 | 'hg qpush [options] [patch/index]'), | |
1279 | "^strip": (strip, [('f', 'force', None, 'force multi-head removal'), |
|
1277 | "^qrefresh": | |
1280 | ('b', 'backup', None, 'bundle unrelated changesets'), |
|
1278 | (refresh, | |
1281 | ('n', 'nobackup', None, 'no backups')], "hg strip rev"), |
|
1279 | [('s', 'short', None, 'short refresh')], | |
1282 | "qtop": (top, [], "hg qtop"), |
|
1280 | 'hg qrefresh'), | |
1283 | "qunapplied": (unapplied, [], "hg qunapplied [patch]"), |
|
1281 | "qrestore": | |
1284 | "qversion": (version, [], "hg qversion") |
|
1282 | (restore, | |
|
1283 | [('d', 'delete', None, 'delete save entry'), | |||
|
1284 | ('u', 'update', None, 'update queue working dir')], | |||
|
1285 | 'hg qrestore rev'), | |||
|
1286 | "qsave": | |||
|
1287 | (save, | |||
|
1288 | [('m', 'message', '', 'commit message'), | |||
|
1289 | ('c', 'copy', None, 'copy patch directory'), | |||
|
1290 | ('n', 'name', '', 'copy directory name'), | |||
|
1291 | ('e', 'empty', None, 'clear queue status file'), | |||
|
1292 | ('f', 'force', None, 'force copy')], | |||
|
1293 | 'hg qsave'), | |||
|
1294 | "qseries": | |||
|
1295 | (series, | |||
|
1296 | [('m', 'missing', None, 'print patches not in series')], | |||
|
1297 | 'hg qseries'), | |||
|
1298 | "^strip": | |||
|
1299 | (strip, | |||
|
1300 | [('f', 'force', None, 'force multi-head removal'), | |||
|
1301 | ('b', 'backup', None, 'bundle unrelated changesets'), | |||
|
1302 | ('n', 'nobackup', None, 'no backups')], | |||
|
1303 | 'hg strip rev'), | |||
|
1304 | "qtop": (top, [], 'hg qtop'), | |||
|
1305 | "qunapplied": (unapplied, [], 'hg qunapplied [patch]'), | |||
|
1306 | "qversion": (version, [], 'hg qversion') | |||
1285 | } |
|
1307 | } | |
1286 |
|
1308 |
@@ -1,692 +1,692 b'' | |||||
1 | """ |
|
1 | """ | |
2 | util.py - Mercurial utility functions and platform specfic implementations |
|
2 | util.py - Mercurial utility functions and platform specfic implementations | |
3 |
|
3 | |||
4 | Copyright 2005 K. Thananchayan <thananck@yahoo.com> |
|
4 | Copyright 2005 K. Thananchayan <thananck@yahoo.com> | |
5 |
|
5 | |||
6 | This software may be used and distributed according to the terms |
|
6 | This software may be used and distributed according to the terms | |
7 | of the GNU General Public License, incorporated herein by reference. |
|
7 | of the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 | This contains helper routines that are independent of the SCM core and hide |
|
9 | This contains helper routines that are independent of the SCM core and hide | |
10 | platform-specific details from the core. |
|
10 | platform-specific details from the core. | |
11 | """ |
|
11 | """ | |
12 |
|
12 | |||
13 | import os, errno |
|
13 | import os, errno | |
14 | from i18n import gettext as _ |
|
14 | from i18n import gettext as _ | |
15 | from demandload import * |
|
15 | from demandload import * | |
16 | demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile") |
|
16 | demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile") | |
17 | demandload(globals(), "threading time") |
|
17 | demandload(globals(), "threading time") | |
18 |
|
18 | |||
19 | def pipefilter(s, cmd): |
|
19 | def pipefilter(s, cmd): | |
20 | '''filter string S through command CMD, returning its output''' |
|
20 | '''filter string S through command CMD, returning its output''' | |
21 | (pout, pin) = popen2.popen2(cmd, -1, 'b') |
|
21 | (pout, pin) = popen2.popen2(cmd, -1, 'b') | |
22 | def writer(): |
|
22 | def writer(): | |
23 | pin.write(s) |
|
23 | pin.write(s) | |
24 | pin.close() |
|
24 | pin.close() | |
25 |
|
25 | |||
26 | # we should use select instead on UNIX, but this will work on most |
|
26 | # we should use select instead on UNIX, but this will work on most | |
27 | # systems, including Windows |
|
27 | # systems, including Windows | |
28 | w = threading.Thread(target=writer) |
|
28 | w = threading.Thread(target=writer) | |
29 | w.start() |
|
29 | w.start() | |
30 | f = pout.read() |
|
30 | f = pout.read() | |
31 | pout.close() |
|
31 | pout.close() | |
32 | w.join() |
|
32 | w.join() | |
33 | return f |
|
33 | return f | |
34 |
|
34 | |||
35 | def tempfilter(s, cmd): |
|
35 | def tempfilter(s, cmd): | |
36 | '''filter string S through a pair of temporary files with CMD. |
|
36 | '''filter string S through a pair of temporary files with CMD. | |
37 | CMD is used as a template to create the real command to be run, |
|
37 | CMD is used as a template to create the real command to be run, | |
38 | with the strings INFILE and OUTFILE replaced by the real names of |
|
38 | with the strings INFILE and OUTFILE replaced by the real names of | |
39 | the temporary files generated.''' |
|
39 | the temporary files generated.''' | |
40 | inname, outname = None, None |
|
40 | inname, outname = None, None | |
41 | try: |
|
41 | try: | |
42 | infd, inname = tempfile.mkstemp(prefix='hgfin') |
|
42 | infd, inname = tempfile.mkstemp(prefix='hgfin') | |
43 | fp = os.fdopen(infd, 'wb') |
|
43 | fp = os.fdopen(infd, 'wb') | |
44 | fp.write(s) |
|
44 | fp.write(s) | |
45 | fp.close() |
|
45 | fp.close() | |
46 | outfd, outname = tempfile.mkstemp(prefix='hgfout') |
|
46 | outfd, outname = tempfile.mkstemp(prefix='hgfout') | |
47 | os.close(outfd) |
|
47 | os.close(outfd) | |
48 | cmd = cmd.replace('INFILE', inname) |
|
48 | cmd = cmd.replace('INFILE', inname) | |
49 | cmd = cmd.replace('OUTFILE', outname) |
|
49 | cmd = cmd.replace('OUTFILE', outname) | |
50 | code = os.system(cmd) |
|
50 | code = os.system(cmd) | |
51 | if code: raise Abort(_("command '%s' failed: %s") % |
|
51 | if code: raise Abort(_("command '%s' failed: %s") % | |
52 | (cmd, explain_exit(code))) |
|
52 | (cmd, explain_exit(code))) | |
53 | return open(outname, 'rb').read() |
|
53 | return open(outname, 'rb').read() | |
54 | finally: |
|
54 | finally: | |
55 | try: |
|
55 | try: | |
56 | if inname: os.unlink(inname) |
|
56 | if inname: os.unlink(inname) | |
57 | except: pass |
|
57 | except: pass | |
58 | try: |
|
58 | try: | |
59 | if outname: os.unlink(outname) |
|
59 | if outname: os.unlink(outname) | |
60 | except: pass |
|
60 | except: pass | |
61 |
|
61 | |||
62 | filtertable = { |
|
62 | filtertable = { | |
63 | 'tempfile:': tempfilter, |
|
63 | 'tempfile:': tempfilter, | |
64 | 'pipe:': pipefilter, |
|
64 | 'pipe:': pipefilter, | |
65 | } |
|
65 | } | |
66 |
|
66 | |||
67 | def filter(s, cmd): |
|
67 | def filter(s, cmd): | |
68 | "filter a string through a command that transforms its input to its output" |
|
68 | "filter a string through a command that transforms its input to its output" | |
69 | for name, fn in filtertable.iteritems(): |
|
69 | for name, fn in filtertable.iteritems(): | |
70 | if cmd.startswith(name): |
|
70 | if cmd.startswith(name): | |
71 | return fn(s, cmd[len(name):].lstrip()) |
|
71 | return fn(s, cmd[len(name):].lstrip()) | |
72 | return pipefilter(s, cmd) |
|
72 | return pipefilter(s, cmd) | |
73 |
|
73 | |||
74 | def patch(strip, patchname, ui): |
|
74 | def patch(strip, patchname, ui): | |
75 | """apply the patch <patchname> to the working directory. |
|
75 | """apply the patch <patchname> to the working directory. | |
76 | a list of patched files is returned""" |
|
76 | a list of patched files is returned""" | |
77 | fp = os.popen('patch -p%d < "%s"' % (strip, patchname)) |
|
77 | fp = os.popen('patch -p%d < "%s"' % (strip, patchname)) | |
78 | files = {} |
|
78 | files = {} | |
79 | for line in fp: |
|
79 | for line in fp: | |
80 | line = line.rstrip() |
|
80 | line = line.rstrip() | |
81 | ui.status("%s\n" % line) |
|
81 | ui.status("%s\n" % line) | |
82 | if line.startswith('patching file '): |
|
82 | if line.startswith('patching file '): | |
83 | pf = parse_patch_output(line) |
|
83 | pf = parse_patch_output(line) | |
84 | files.setdefault(pf, 1) |
|
84 | files.setdefault(pf, 1) | |
85 | code = fp.close() |
|
85 | code = fp.close() | |
86 | if code: |
|
86 | if code: | |
87 | raise Abort(_("patch command failed: %s") % explain_exit(code)[0]) |
|
87 | raise Abort(_("patch command failed: %s") % explain_exit(code)[0]) | |
88 | return files.keys() |
|
88 | return files.keys() | |
89 |
|
89 | |||
90 | def binary(s): |
|
90 | def binary(s): | |
91 | """return true if a string is binary data using diff's heuristic""" |
|
91 | """return true if a string is binary data using diff's heuristic""" | |
92 | if s and '\0' in s[:4096]: |
|
92 | if s and '\0' in s[:4096]: | |
93 | return True |
|
93 | return True | |
94 | return False |
|
94 | return False | |
95 |
|
95 | |||
96 | def unique(g): |
|
96 | def unique(g): | |
97 | """return the uniq elements of iterable g""" |
|
97 | """return the uniq elements of iterable g""" | |
98 | seen = {} |
|
98 | seen = {} | |
99 | for f in g: |
|
99 | for f in g: | |
100 | if f not in seen: |
|
100 | if f not in seen: | |
101 | seen[f] = 1 |
|
101 | seen[f] = 1 | |
102 | yield f |
|
102 | yield f | |
103 |
|
103 | |||
104 | class Abort(Exception): |
|
104 | class Abort(Exception): | |
105 | """Raised if a command needs to print an error and exit.""" |
|
105 | """Raised if a command needs to print an error and exit.""" | |
106 |
|
106 | |||
107 | def always(fn): return True |
|
107 | def always(fn): return True | |
108 | def never(fn): return False |
|
108 | def never(fn): return False | |
109 |
|
109 | |||
110 | def patkind(name, dflt_pat='glob'): |
|
110 | def patkind(name, dflt_pat='glob'): | |
111 | """Split a string into an optional pattern kind prefix and the |
|
111 | """Split a string into an optional pattern kind prefix and the | |
112 | actual pattern.""" |
|
112 | actual pattern.""" | |
113 | for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre': |
|
113 | for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre': | |
114 | if name.startswith(prefix + ':'): return name.split(':', 1) |
|
114 | if name.startswith(prefix + ':'): return name.split(':', 1) | |
115 | return dflt_pat, name |
|
115 | return dflt_pat, name | |
116 |
|
116 | |||
117 | def globre(pat, head='^', tail='$'): |
|
117 | def globre(pat, head='^', tail='$'): | |
118 | "convert a glob pattern into a regexp" |
|
118 | "convert a glob pattern into a regexp" | |
119 | i, n = 0, len(pat) |
|
119 | i, n = 0, len(pat) | |
120 | res = '' |
|
120 | res = '' | |
121 | group = False |
|
121 | group = False | |
122 | def peek(): return i < n and pat[i] |
|
122 | def peek(): return i < n and pat[i] | |
123 | while i < n: |
|
123 | while i < n: | |
124 | c = pat[i] |
|
124 | c = pat[i] | |
125 | i = i+1 |
|
125 | i = i+1 | |
126 | if c == '*': |
|
126 | if c == '*': | |
127 | if peek() == '*': |
|
127 | if peek() == '*': | |
128 | i += 1 |
|
128 | i += 1 | |
129 | res += '.*' |
|
129 | res += '.*' | |
130 | else: |
|
130 | else: | |
131 | res += '[^/]*' |
|
131 | res += '[^/]*' | |
132 | elif c == '?': |
|
132 | elif c == '?': | |
133 | res += '.' |
|
133 | res += '.' | |
134 | elif c == '[': |
|
134 | elif c == '[': | |
135 | j = i |
|
135 | j = i | |
136 | if j < n and pat[j] in '!]': |
|
136 | if j < n and pat[j] in '!]': | |
137 | j += 1 |
|
137 | j += 1 | |
138 | while j < n and pat[j] != ']': |
|
138 | while j < n and pat[j] != ']': | |
139 | j += 1 |
|
139 | j += 1 | |
140 | if j >= n: |
|
140 | if j >= n: | |
141 | res += '\\[' |
|
141 | res += '\\[' | |
142 | else: |
|
142 | else: | |
143 | stuff = pat[i:j].replace('\\','\\\\') |
|
143 | stuff = pat[i:j].replace('\\','\\\\') | |
144 | i = j + 1 |
|
144 | i = j + 1 | |
145 | if stuff[0] == '!': |
|
145 | if stuff[0] == '!': | |
146 | stuff = '^' + stuff[1:] |
|
146 | stuff = '^' + stuff[1:] | |
147 | elif stuff[0] == '^': |
|
147 | elif stuff[0] == '^': | |
148 | stuff = '\\' + stuff |
|
148 | stuff = '\\' + stuff | |
149 | res = '%s[%s]' % (res, stuff) |
|
149 | res = '%s[%s]' % (res, stuff) | |
150 | elif c == '{': |
|
150 | elif c == '{': | |
151 | group = True |
|
151 | group = True | |
152 | res += '(?:' |
|
152 | res += '(?:' | |
153 | elif c == '}' and group: |
|
153 | elif c == '}' and group: | |
154 | res += ')' |
|
154 | res += ')' | |
155 | group = False |
|
155 | group = False | |
156 | elif c == ',' and group: |
|
156 | elif c == ',' and group: | |
157 | res += '|' |
|
157 | res += '|' | |
158 | else: |
|
158 | else: | |
159 | res += re.escape(c) |
|
159 | res += re.escape(c) | |
160 | return head + res + tail |
|
160 | return head + res + tail | |
161 |
|
161 | |||
162 | _globchars = {'[': 1, '{': 1, '*': 1, '?': 1} |
|
162 | _globchars = {'[': 1, '{': 1, '*': 1, '?': 1} | |
163 |
|
163 | |||
164 | def pathto(n1, n2): |
|
164 | def pathto(n1, n2): | |
165 | '''return the relative path from one place to another. |
|
165 | '''return the relative path from one place to another. | |
166 | this returns a path in the form used by the local filesystem, not hg.''' |
|
166 | this returns a path in the form used by the local filesystem, not hg.''' | |
167 | if not n1: return localpath(n2) |
|
167 | if not n1: return localpath(n2) | |
168 | a, b = n1.split('/'), n2.split('/') |
|
168 | a, b = n1.split('/'), n2.split('/') | |
169 | a.reverse() |
|
169 | a.reverse() | |
170 | b.reverse() |
|
170 | b.reverse() | |
171 | while a and b and a[-1] == b[-1]: |
|
171 | while a and b and a[-1] == b[-1]: | |
172 | a.pop() |
|
172 | a.pop() | |
173 | b.pop() |
|
173 | b.pop() | |
174 | b.reverse() |
|
174 | b.reverse() | |
175 | return os.sep.join((['..'] * len(a)) + b) |
|
175 | return os.sep.join((['..'] * len(a)) + b) | |
176 |
|
176 | |||
177 | def canonpath(root, cwd, myname): |
|
177 | def canonpath(root, cwd, myname): | |
178 | """return the canonical path of myname, given cwd and root""" |
|
178 | """return the canonical path of myname, given cwd and root""" | |
179 | if root == os.sep: |
|
179 | if root == os.sep: | |
180 | rootsep = os.sep |
|
180 | rootsep = os.sep | |
181 | else: |
|
181 | else: | |
182 |
|
|
182 | rootsep = root + os.sep | |
183 | name = myname |
|
183 | name = myname | |
184 | if not name.startswith(os.sep): |
|
184 | if not name.startswith(os.sep): | |
185 | name = os.path.join(root, cwd, name) |
|
185 | name = os.path.join(root, cwd, name) | |
186 | name = os.path.normpath(name) |
|
186 | name = os.path.normpath(name) | |
187 | if name.startswith(rootsep): |
|
187 | if name.startswith(rootsep): | |
188 | return pconvert(name[len(rootsep):]) |
|
188 | return pconvert(name[len(rootsep):]) | |
189 | elif name == root: |
|
189 | elif name == root: | |
190 | return '' |
|
190 | return '' | |
191 | else: |
|
191 | else: | |
192 | raise Abort('%s not under root' % myname) |
|
192 | raise Abort('%s not under root' % myname) | |
193 |
|
193 | |||
194 | def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None): |
|
194 | def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None): | |
195 | return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src) |
|
195 | return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src) | |
196 |
|
196 | |||
197 | def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None): |
|
197 | def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None): | |
198 | if os.name == 'nt': |
|
198 | if os.name == 'nt': | |
199 | dflt_pat = 'glob' |
|
199 | dflt_pat = 'glob' | |
200 | else: |
|
200 | else: | |
201 | dflt_pat = 'relpath' |
|
201 | dflt_pat = 'relpath' | |
202 | return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src) |
|
202 | return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src) | |
203 |
|
203 | |||
204 | def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src): |
|
204 | def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src): | |
205 | """build a function to match a set of file patterns |
|
205 | """build a function to match a set of file patterns | |
206 |
|
206 | |||
207 | arguments: |
|
207 | arguments: | |
208 | canonroot - the canonical root of the tree you're matching against |
|
208 | canonroot - the canonical root of the tree you're matching against | |
209 | cwd - the current working directory, if relevant |
|
209 | cwd - the current working directory, if relevant | |
210 | names - patterns to find |
|
210 | names - patterns to find | |
211 | inc - patterns to include |
|
211 | inc - patterns to include | |
212 | exc - patterns to exclude |
|
212 | exc - patterns to exclude | |
213 | head - a regex to prepend to patterns to control whether a match is rooted |
|
213 | head - a regex to prepend to patterns to control whether a match is rooted | |
214 |
|
214 | |||
215 | a pattern is one of: |
|
215 | a pattern is one of: | |
216 | 'glob:<rooted glob>' |
|
216 | 'glob:<rooted glob>' | |
217 | 're:<rooted regexp>' |
|
217 | 're:<rooted regexp>' | |
218 | 'path:<rooted path>' |
|
218 | 'path:<rooted path>' | |
219 | 'relglob:<relative glob>' |
|
219 | 'relglob:<relative glob>' | |
220 | 'relpath:<relative path>' |
|
220 | 'relpath:<relative path>' | |
221 | 'relre:<relative regexp>' |
|
221 | 'relre:<relative regexp>' | |
222 | '<rooted path or regexp>' |
|
222 | '<rooted path or regexp>' | |
223 |
|
223 | |||
224 | returns: |
|
224 | returns: | |
225 | a 3-tuple containing |
|
225 | a 3-tuple containing | |
226 | - list of explicit non-pattern names passed in |
|
226 | - list of explicit non-pattern names passed in | |
227 | - a bool match(filename) function |
|
227 | - a bool match(filename) function | |
228 | - a bool indicating if any patterns were passed in |
|
228 | - a bool indicating if any patterns were passed in | |
229 |
|
229 | |||
230 | todo: |
|
230 | todo: | |
231 | make head regex a rooted bool |
|
231 | make head regex a rooted bool | |
232 | """ |
|
232 | """ | |
233 |
|
233 | |||
234 | def contains_glob(name): |
|
234 | def contains_glob(name): | |
235 | for c in name: |
|
235 | for c in name: | |
236 | if c in _globchars: return True |
|
236 | if c in _globchars: return True | |
237 | return False |
|
237 | return False | |
238 |
|
238 | |||
239 | def regex(kind, name, tail): |
|
239 | def regex(kind, name, tail): | |
240 | '''convert a pattern into a regular expression''' |
|
240 | '''convert a pattern into a regular expression''' | |
241 | if kind == 're': |
|
241 | if kind == 're': | |
242 | return name |
|
242 | return name | |
243 | elif kind == 'path': |
|
243 | elif kind == 'path': | |
244 | return '^' + re.escape(name) + '(?:/|$)' |
|
244 | return '^' + re.escape(name) + '(?:/|$)' | |
245 | elif kind == 'relglob': |
|
245 | elif kind == 'relglob': | |
246 | return head + globre(name, '(?:|.*/)', tail) |
|
246 | return head + globre(name, '(?:|.*/)', tail) | |
247 | elif kind == 'relpath': |
|
247 | elif kind == 'relpath': | |
248 | return head + re.escape(name) + tail |
|
248 | return head + re.escape(name) + tail | |
249 | elif kind == 'relre': |
|
249 | elif kind == 'relre': | |
250 | if name.startswith('^'): |
|
250 | if name.startswith('^'): | |
251 | return name |
|
251 | return name | |
252 | return '.*' + name |
|
252 | return '.*' + name | |
253 | return head + globre(name, '', tail) |
|
253 | return head + globre(name, '', tail) | |
254 |
|
254 | |||
255 | def matchfn(pats, tail): |
|
255 | def matchfn(pats, tail): | |
256 | """build a matching function from a set of patterns""" |
|
256 | """build a matching function from a set of patterns""" | |
257 | if not pats: |
|
257 | if not pats: | |
258 | return |
|
258 | return | |
259 | matches = [] |
|
259 | matches = [] | |
260 | for k, p in pats: |
|
260 | for k, p in pats: | |
261 | try: |
|
261 | try: | |
262 | pat = '(?:%s)' % regex(k, p, tail) |
|
262 | pat = '(?:%s)' % regex(k, p, tail) | |
263 | matches.append(re.compile(pat).match) |
|
263 | matches.append(re.compile(pat).match) | |
264 | except re.error: |
|
264 | except re.error: | |
265 | if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p)) |
|
265 | if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p)) | |
266 | else: raise Abort("invalid pattern (%s): %s" % (k, p)) |
|
266 | else: raise Abort("invalid pattern (%s): %s" % (k, p)) | |
267 |
|
267 | |||
268 | def buildfn(text): |
|
268 | def buildfn(text): | |
269 | for m in matches: |
|
269 | for m in matches: | |
270 | r = m(text) |
|
270 | r = m(text) | |
271 | if r: |
|
271 | if r: | |
272 | return r |
|
272 | return r | |
273 |
|
273 | |||
274 | return buildfn |
|
274 | return buildfn | |
275 |
|
275 | |||
276 | def globprefix(pat): |
|
276 | def globprefix(pat): | |
277 | '''return the non-glob prefix of a path, e.g. foo/* -> foo''' |
|
277 | '''return the non-glob prefix of a path, e.g. foo/* -> foo''' | |
278 | root = [] |
|
278 | root = [] | |
279 | for p in pat.split(os.sep): |
|
279 | for p in pat.split(os.sep): | |
280 | if contains_glob(p): break |
|
280 | if contains_glob(p): break | |
281 | root.append(p) |
|
281 | root.append(p) | |
282 | return '/'.join(root) |
|
282 | return '/'.join(root) | |
283 |
|
283 | |||
284 | pats = [] |
|
284 | pats = [] | |
285 | files = [] |
|
285 | files = [] | |
286 | roots = [] |
|
286 | roots = [] | |
287 | for kind, name in [patkind(p, dflt_pat) for p in names]: |
|
287 | for kind, name in [patkind(p, dflt_pat) for p in names]: | |
288 | if kind in ('glob', 'relpath'): |
|
288 | if kind in ('glob', 'relpath'): | |
289 | name = canonpath(canonroot, cwd, name) |
|
289 | name = canonpath(canonroot, cwd, name) | |
290 | if name == '': |
|
290 | if name == '': | |
291 | kind, name = 'glob', '**' |
|
291 | kind, name = 'glob', '**' | |
292 | if kind in ('glob', 'path', 're'): |
|
292 | if kind in ('glob', 'path', 're'): | |
293 | pats.append((kind, name)) |
|
293 | pats.append((kind, name)) | |
294 | if kind == 'glob': |
|
294 | if kind == 'glob': | |
295 | root = globprefix(name) |
|
295 | root = globprefix(name) | |
296 | if root: roots.append(root) |
|
296 | if root: roots.append(root) | |
297 | elif kind == 'relpath': |
|
297 | elif kind == 'relpath': | |
298 | files.append((kind, name)) |
|
298 | files.append((kind, name)) | |
299 | roots.append(name) |
|
299 | roots.append(name) | |
300 |
|
300 | |||
301 | patmatch = matchfn(pats, '$') or always |
|
301 | patmatch = matchfn(pats, '$') or always | |
302 | filematch = matchfn(files, '(?:/|$)') or always |
|
302 | filematch = matchfn(files, '(?:/|$)') or always | |
303 | incmatch = always |
|
303 | incmatch = always | |
304 | if inc: |
|
304 | if inc: | |
305 | incmatch = matchfn(map(patkind, inc), '(?:/|$)') |
|
305 | incmatch = matchfn(map(patkind, inc), '(?:/|$)') | |
306 | excmatch = lambda fn: False |
|
306 | excmatch = lambda fn: False | |
307 | if exc: |
|
307 | if exc: | |
308 | excmatch = matchfn(map(patkind, exc), '(?:/|$)') |
|
308 | excmatch = matchfn(map(patkind, exc), '(?:/|$)') | |
309 |
|
309 | |||
310 | return (roots, |
|
310 | return (roots, | |
311 | lambda fn: (incmatch(fn) and not excmatch(fn) and |
|
311 | lambda fn: (incmatch(fn) and not excmatch(fn) and | |
312 | (fn.endswith('/') or |
|
312 | (fn.endswith('/') or | |
313 | (not pats and not files) or |
|
313 | (not pats and not files) or | |
314 | (pats and patmatch(fn)) or |
|
314 | (pats and patmatch(fn)) or | |
315 | (files and filematch(fn)))), |
|
315 | (files and filematch(fn)))), | |
316 | (inc or exc or (pats and pats != [('glob', '**')])) and True) |
|
316 | (inc or exc or (pats and pats != [('glob', '**')])) and True) | |
317 |
|
317 | |||
318 | def system(cmd, errprefix=None): |
|
318 | def system(cmd, errprefix=None): | |
319 | """execute a shell command that must succeed""" |
|
319 | """execute a shell command that must succeed""" | |
320 | rc = os.system(cmd) |
|
320 | rc = os.system(cmd) | |
321 | if rc: |
|
321 | if rc: | |
322 | errmsg = "%s %s" % (os.path.basename(cmd.split(None, 1)[0]), |
|
322 | errmsg = "%s %s" % (os.path.basename(cmd.split(None, 1)[0]), | |
323 | explain_exit(rc)[0]) |
|
323 | explain_exit(rc)[0]) | |
324 | if errprefix: |
|
324 | if errprefix: | |
325 | errmsg = "%s: %s" % (errprefix, errmsg) |
|
325 | errmsg = "%s: %s" % (errprefix, errmsg) | |
326 | raise Abort(errmsg) |
|
326 | raise Abort(errmsg) | |
327 |
|
327 | |||
328 | def rename(src, dst): |
|
328 | def rename(src, dst): | |
329 | """forcibly rename a file""" |
|
329 | """forcibly rename a file""" | |
330 | try: |
|
330 | try: | |
331 | os.rename(src, dst) |
|
331 | os.rename(src, dst) | |
332 | except: |
|
332 | except: | |
333 | os.unlink(dst) |
|
333 | os.unlink(dst) | |
334 | os.rename(src, dst) |
|
334 | os.rename(src, dst) | |
335 |
|
335 | |||
336 | def unlink(f): |
|
336 | def unlink(f): | |
337 | """unlink and remove the directory if it is empty""" |
|
337 | """unlink and remove the directory if it is empty""" | |
338 | os.unlink(f) |
|
338 | os.unlink(f) | |
339 | # try removing directories that might now be empty |
|
339 | # try removing directories that might now be empty | |
340 | try: os.removedirs(os.path.dirname(f)) |
|
340 | try: os.removedirs(os.path.dirname(f)) | |
341 | except: pass |
|
341 | except: pass | |
342 |
|
342 | |||
343 | def copyfiles(src, dst, hardlink=None): |
|
343 | def copyfiles(src, dst, hardlink=None): | |
344 | """Copy a directory tree using hardlinks if possible""" |
|
344 | """Copy a directory tree using hardlinks if possible""" | |
345 |
|
345 | |||
346 | if hardlink is None: |
|
346 | if hardlink is None: | |
347 | hardlink = (os.stat(src).st_dev == |
|
347 | hardlink = (os.stat(src).st_dev == | |
348 | os.stat(os.path.dirname(dst)).st_dev) |
|
348 | os.stat(os.path.dirname(dst)).st_dev) | |
349 |
|
349 | |||
350 | if os.path.isdir(src): |
|
350 | if os.path.isdir(src): | |
351 | os.mkdir(dst) |
|
351 | os.mkdir(dst) | |
352 | for name in os.listdir(src): |
|
352 | for name in os.listdir(src): | |
353 | srcname = os.path.join(src, name) |
|
353 | srcname = os.path.join(src, name) | |
354 | dstname = os.path.join(dst, name) |
|
354 | dstname = os.path.join(dst, name) | |
355 | copyfiles(srcname, dstname, hardlink) |
|
355 | copyfiles(srcname, dstname, hardlink) | |
356 | else: |
|
356 | else: | |
357 | if hardlink: |
|
357 | if hardlink: | |
358 | try: |
|
358 | try: | |
359 | os_link(src, dst) |
|
359 | os_link(src, dst) | |
360 | except: |
|
360 | except: | |
361 | hardlink = False |
|
361 | hardlink = False | |
362 | shutil.copy(src, dst) |
|
362 | shutil.copy(src, dst) | |
363 | else: |
|
363 | else: | |
364 | shutil.copy(src, dst) |
|
364 | shutil.copy(src, dst) | |
365 |
|
365 | |||
366 | def opener(base): |
|
366 | def opener(base): | |
367 | """ |
|
367 | """ | |
368 | return a function that opens files relative to base |
|
368 | return a function that opens files relative to base | |
369 |
|
369 | |||
370 | this function is used to hide the details of COW semantics and |
|
370 | this function is used to hide the details of COW semantics and | |
371 | remote file access from higher level code. |
|
371 | remote file access from higher level code. | |
372 | """ |
|
372 | """ | |
373 | p = base |
|
373 | p = base | |
374 |
|
374 | |||
375 | def mktempcopy(name): |
|
375 | def mktempcopy(name): | |
376 | d, fn = os.path.split(name) |
|
376 | d, fn = os.path.split(name) | |
377 | fd, temp = tempfile.mkstemp(prefix=fn, dir=d) |
|
377 | fd, temp = tempfile.mkstemp(prefix=fn, dir=d) | |
378 | fp = os.fdopen(fd, "wb") |
|
378 | fp = os.fdopen(fd, "wb") | |
379 | try: |
|
379 | try: | |
380 | fp.write(file(name, "rb").read()) |
|
380 | fp.write(file(name, "rb").read()) | |
381 | except: |
|
381 | except: | |
382 | try: os.unlink(temp) |
|
382 | try: os.unlink(temp) | |
383 | except: pass |
|
383 | except: pass | |
384 | raise |
|
384 | raise | |
385 | fp.close() |
|
385 | fp.close() | |
386 | st = os.lstat(name) |
|
386 | st = os.lstat(name) | |
387 | os.chmod(temp, st.st_mode) |
|
387 | os.chmod(temp, st.st_mode) | |
388 | return temp |
|
388 | return temp | |
389 |
|
389 | |||
390 | class atomicfile(file): |
|
390 | class atomicfile(file): | |
391 | """the file will only be copied on close""" |
|
391 | """the file will only be copied on close""" | |
392 | def __init__(self, name, mode, atomic=False): |
|
392 | def __init__(self, name, mode, atomic=False): | |
393 | self.__name = name |
|
393 | self.__name = name | |
394 | self.temp = mktempcopy(name) |
|
394 | self.temp = mktempcopy(name) | |
395 | file.__init__(self, self.temp, mode) |
|
395 | file.__init__(self, self.temp, mode) | |
396 | def close(self): |
|
396 | def close(self): | |
397 | if not self.closed: |
|
397 | if not self.closed: | |
398 | file.close(self) |
|
398 | file.close(self) | |
399 | rename(self.temp, self.__name) |
|
399 | rename(self.temp, self.__name) | |
400 | def __del__(self): |
|
400 | def __del__(self): | |
401 | self.close() |
|
401 | self.close() | |
402 |
|
402 | |||
403 | def o(path, mode="r", text=False, atomic=False): |
|
403 | def o(path, mode="r", text=False, atomic=False): | |
404 | f = os.path.join(p, path) |
|
404 | f = os.path.join(p, path) | |
405 |
|
405 | |||
406 | if not text: |
|
406 | if not text: | |
407 | mode += "b" # for that other OS |
|
407 | mode += "b" # for that other OS | |
408 |
|
408 | |||
409 | if mode[0] != "r": |
|
409 | if mode[0] != "r": | |
410 | try: |
|
410 | try: | |
411 | nlink = nlinks(f) |
|
411 | nlink = nlinks(f) | |
412 | except OSError: |
|
412 | except OSError: | |
413 | d = os.path.dirname(f) |
|
413 | d = os.path.dirname(f) | |
414 | if not os.path.isdir(d): |
|
414 | if not os.path.isdir(d): | |
415 | os.makedirs(d) |
|
415 | os.makedirs(d) | |
416 | else: |
|
416 | else: | |
417 | if atomic: |
|
417 | if atomic: | |
418 | return atomicfile(f, mode) |
|
418 | return atomicfile(f, mode) | |
419 | if nlink > 1: |
|
419 | if nlink > 1: | |
420 | rename(mktempcopy(f), f) |
|
420 | rename(mktempcopy(f), f) | |
421 | return file(f, mode) |
|
421 | return file(f, mode) | |
422 |
|
422 | |||
423 | return o |
|
423 | return o | |
424 |
|
424 | |||
425 | def _makelock_file(info, pathname): |
|
425 | def _makelock_file(info, pathname): | |
426 | ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL) |
|
426 | ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL) | |
427 | os.write(ld, info) |
|
427 | os.write(ld, info) | |
428 | os.close(ld) |
|
428 | os.close(ld) | |
429 |
|
429 | |||
430 | def _readlock_file(pathname): |
|
430 | def _readlock_file(pathname): | |
431 | return file(pathname).read() |
|
431 | return file(pathname).read() | |
432 |
|
432 | |||
433 | def nlinks(pathname): |
|
433 | def nlinks(pathname): | |
434 | """Return number of hardlinks for the given file.""" |
|
434 | """Return number of hardlinks for the given file.""" | |
435 | return os.stat(pathname).st_nlink |
|
435 | return os.stat(pathname).st_nlink | |
436 |
|
436 | |||
437 | if hasattr(os, 'link'): |
|
437 | if hasattr(os, 'link'): | |
438 | os_link = os.link |
|
438 | os_link = os.link | |
439 | else: |
|
439 | else: | |
440 | def os_link(src, dst): |
|
440 | def os_link(src, dst): | |
441 | raise OSError(0, _("Hardlinks not supported")) |
|
441 | raise OSError(0, _("Hardlinks not supported")) | |
442 |
|
442 | |||
443 | # Platform specific variants |
|
443 | # Platform specific variants | |
444 | if os.name == 'nt': |
|
444 | if os.name == 'nt': | |
445 | demandload(globals(), "msvcrt") |
|
445 | demandload(globals(), "msvcrt") | |
446 | nulldev = 'NUL:' |
|
446 | nulldev = 'NUL:' | |
447 |
|
447 | |||
448 | class winstdout: |
|
448 | class winstdout: | |
449 | '''stdout on windows misbehaves if sent through a pipe''' |
|
449 | '''stdout on windows misbehaves if sent through a pipe''' | |
450 |
|
450 | |||
451 | def __init__(self, fp): |
|
451 | def __init__(self, fp): | |
452 | self.fp = fp |
|
452 | self.fp = fp | |
453 |
|
453 | |||
454 | def __getattr__(self, key): |
|
454 | def __getattr__(self, key): | |
455 | return getattr(self.fp, key) |
|
455 | return getattr(self.fp, key) | |
456 |
|
456 | |||
457 | def close(self): |
|
457 | def close(self): | |
458 | try: |
|
458 | try: | |
459 | self.fp.close() |
|
459 | self.fp.close() | |
460 | except: pass |
|
460 | except: pass | |
461 |
|
461 | |||
462 | def write(self, s): |
|
462 | def write(self, s): | |
463 | try: |
|
463 | try: | |
464 | return self.fp.write(s) |
|
464 | return self.fp.write(s) | |
465 | except IOError, inst: |
|
465 | except IOError, inst: | |
466 | if inst.errno != 0: raise |
|
466 | if inst.errno != 0: raise | |
467 | self.close() |
|
467 | self.close() | |
468 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
468 | raise IOError(errno.EPIPE, 'Broken pipe') | |
469 |
|
469 | |||
470 | sys.stdout = winstdout(sys.stdout) |
|
470 | sys.stdout = winstdout(sys.stdout) | |
471 |
|
471 | |||
472 | try: |
|
472 | try: | |
473 | import win32api, win32process |
|
473 | import win32api, win32process | |
474 | filename = win32process.GetModuleFileNameEx(win32api.GetCurrentProcess(), 0) |
|
474 | filename = win32process.GetModuleFileNameEx(win32api.GetCurrentProcess(), 0) | |
475 | systemrc = os.path.join(os.path.dirname(filename), 'mercurial.ini') |
|
475 | systemrc = os.path.join(os.path.dirname(filename), 'mercurial.ini') | |
476 |
|
476 | |||
477 | except ImportError: |
|
477 | except ImportError: | |
478 | systemrc = r'c:\mercurial\mercurial.ini' |
|
478 | systemrc = r'c:\mercurial\mercurial.ini' | |
479 | pass |
|
479 | pass | |
480 |
|
480 | |||
481 | rcpath = (systemrc, |
|
481 | rcpath = (systemrc, | |
482 | os.path.join(os.path.expanduser('~'), 'mercurial.ini')) |
|
482 | os.path.join(os.path.expanduser('~'), 'mercurial.ini')) | |
483 |
|
483 | |||
484 | def parse_patch_output(output_line): |
|
484 | def parse_patch_output(output_line): | |
485 | """parses the output produced by patch and returns the file name""" |
|
485 | """parses the output produced by patch and returns the file name""" | |
486 | pf = output_line[14:] |
|
486 | pf = output_line[14:] | |
487 | if pf[0] == '`': |
|
487 | if pf[0] == '`': | |
488 | pf = pf[1:-1] # Remove the quotes |
|
488 | pf = pf[1:-1] # Remove the quotes | |
489 | return pf |
|
489 | return pf | |
490 |
|
490 | |||
491 | try: # ActivePython can create hard links using win32file module |
|
491 | try: # ActivePython can create hard links using win32file module | |
492 | import win32file |
|
492 | import win32file | |
493 |
|
493 | |||
494 | def os_link(src, dst): # NB will only succeed on NTFS |
|
494 | def os_link(src, dst): # NB will only succeed on NTFS | |
495 | win32file.CreateHardLink(dst, src) |
|
495 | win32file.CreateHardLink(dst, src) | |
496 |
|
496 | |||
497 | def nlinks(pathname): |
|
497 | def nlinks(pathname): | |
498 | """Return number of hardlinks for the given file.""" |
|
498 | """Return number of hardlinks for the given file.""" | |
499 | try: |
|
499 | try: | |
500 | fh = win32file.CreateFile(pathname, |
|
500 | fh = win32file.CreateFile(pathname, | |
501 | win32file.GENERIC_READ, win32file.FILE_SHARE_READ, |
|
501 | win32file.GENERIC_READ, win32file.FILE_SHARE_READ, | |
502 | None, win32file.OPEN_EXISTING, 0, None) |
|
502 | None, win32file.OPEN_EXISTING, 0, None) | |
503 | res = win32file.GetFileInformationByHandle(fh) |
|
503 | res = win32file.GetFileInformationByHandle(fh) | |
504 | fh.Close() |
|
504 | fh.Close() | |
505 | return res[7] |
|
505 | return res[7] | |
506 | except: |
|
506 | except: | |
507 | return os.stat(pathname).st_nlink |
|
507 | return os.stat(pathname).st_nlink | |
508 |
|
508 | |||
509 | except ImportError: |
|
509 | except ImportError: | |
510 | pass |
|
510 | pass | |
511 |
|
511 | |||
512 | def is_exec(f, last): |
|
512 | def is_exec(f, last): | |
513 | return last |
|
513 | return last | |
514 |
|
514 | |||
515 | def set_exec(f, mode): |
|
515 | def set_exec(f, mode): | |
516 | pass |
|
516 | pass | |
517 |
|
517 | |||
518 | def set_binary(fd): |
|
518 | def set_binary(fd): | |
519 | msvcrt.setmode(fd.fileno(), os.O_BINARY) |
|
519 | msvcrt.setmode(fd.fileno(), os.O_BINARY) | |
520 |
|
520 | |||
521 | def pconvert(path): |
|
521 | def pconvert(path): | |
522 | return path.replace("\\", "/") |
|
522 | return path.replace("\\", "/") | |
523 |
|
523 | |||
524 | def localpath(path): |
|
524 | def localpath(path): | |
525 | return path.replace('/', '\\') |
|
525 | return path.replace('/', '\\') | |
526 |
|
526 | |||
527 | def normpath(path): |
|
527 | def normpath(path): | |
528 | return pconvert(os.path.normpath(path)) |
|
528 | return pconvert(os.path.normpath(path)) | |
529 |
|
529 | |||
530 | makelock = _makelock_file |
|
530 | makelock = _makelock_file | |
531 | readlock = _readlock_file |
|
531 | readlock = _readlock_file | |
532 |
|
532 | |||
533 | def explain_exit(code): |
|
533 | def explain_exit(code): | |
534 | return _("exited with status %d") % code, code |
|
534 | return _("exited with status %d") % code, code | |
535 |
|
535 | |||
536 | else: |
|
536 | else: | |
537 | nulldev = '/dev/null' |
|
537 | nulldev = '/dev/null' | |
538 |
|
538 | |||
539 | def rcfiles(path): |
|
539 | def rcfiles(path): | |
540 | rcs = [os.path.join(path, 'hgrc')] |
|
540 | rcs = [os.path.join(path, 'hgrc')] | |
541 | rcdir = os.path.join(path, 'hgrc.d') |
|
541 | rcdir = os.path.join(path, 'hgrc.d') | |
542 | try: |
|
542 | try: | |
543 | rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir) |
|
543 | rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir) | |
544 | if f.endswith(".rc")]) |
|
544 | if f.endswith(".rc")]) | |
545 | except OSError, inst: pass |
|
545 | except OSError, inst: pass | |
546 | return rcs |
|
546 | return rcs | |
547 | rcpath = [] |
|
547 | rcpath = [] | |
548 | if len(sys.argv) > 0: |
|
548 | if len(sys.argv) > 0: | |
549 | rcpath.extend(rcfiles(os.path.dirname(sys.argv[0]) + '/../etc/mercurial')) |
|
549 | rcpath.extend(rcfiles(os.path.dirname(sys.argv[0]) + '/../etc/mercurial')) | |
550 | rcpath.extend(rcfiles('/etc/mercurial')) |
|
550 | rcpath.extend(rcfiles('/etc/mercurial')) | |
551 | rcpath.append(os.path.expanduser('~/.hgrc')) |
|
551 | rcpath.append(os.path.expanduser('~/.hgrc')) | |
552 | rcpath = [os.path.normpath(f) for f in rcpath] |
|
552 | rcpath = [os.path.normpath(f) for f in rcpath] | |
553 |
|
553 | |||
554 | def parse_patch_output(output_line): |
|
554 | def parse_patch_output(output_line): | |
555 | """parses the output produced by patch and returns the file name""" |
|
555 | """parses the output produced by patch and returns the file name""" | |
556 | pf = output_line[14:] |
|
556 | pf = output_line[14:] | |
557 | if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0: |
|
557 | if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0: | |
558 | pf = pf[1:-1] # Remove the quotes |
|
558 | pf = pf[1:-1] # Remove the quotes | |
559 | return pf |
|
559 | return pf | |
560 |
|
560 | |||
561 | def is_exec(f, last): |
|
561 | def is_exec(f, last): | |
562 | """check whether a file is executable""" |
|
562 | """check whether a file is executable""" | |
563 | return (os.stat(f).st_mode & 0100 != 0) |
|
563 | return (os.stat(f).st_mode & 0100 != 0) | |
564 |
|
564 | |||
565 | def set_exec(f, mode): |
|
565 | def set_exec(f, mode): | |
566 | s = os.stat(f).st_mode |
|
566 | s = os.stat(f).st_mode | |
567 | if (s & 0100 != 0) == mode: |
|
567 | if (s & 0100 != 0) == mode: | |
568 | return |
|
568 | return | |
569 | if mode: |
|
569 | if mode: | |
570 | # Turn on +x for every +r bit when making a file executable |
|
570 | # Turn on +x for every +r bit when making a file executable | |
571 | # and obey umask. |
|
571 | # and obey umask. | |
572 | umask = os.umask(0) |
|
572 | umask = os.umask(0) | |
573 | os.umask(umask) |
|
573 | os.umask(umask) | |
574 | os.chmod(f, s | (s & 0444) >> 2 & ~umask) |
|
574 | os.chmod(f, s | (s & 0444) >> 2 & ~umask) | |
575 | else: |
|
575 | else: | |
576 | os.chmod(f, s & 0666) |
|
576 | os.chmod(f, s & 0666) | |
577 |
|
577 | |||
578 | def set_binary(fd): |
|
578 | def set_binary(fd): | |
579 | pass |
|
579 | pass | |
580 |
|
580 | |||
581 | def pconvert(path): |
|
581 | def pconvert(path): | |
582 | return path |
|
582 | return path | |
583 |
|
583 | |||
584 | def localpath(path): |
|
584 | def localpath(path): | |
585 | return path |
|
585 | return path | |
586 |
|
586 | |||
587 | normpath = os.path.normpath |
|
587 | normpath = os.path.normpath | |
588 |
|
588 | |||
589 | def makelock(info, pathname): |
|
589 | def makelock(info, pathname): | |
590 | try: |
|
590 | try: | |
591 | os.symlink(info, pathname) |
|
591 | os.symlink(info, pathname) | |
592 | except OSError, why: |
|
592 | except OSError, why: | |
593 | if why.errno == errno.EEXIST: |
|
593 | if why.errno == errno.EEXIST: | |
594 | raise |
|
594 | raise | |
595 | else: |
|
595 | else: | |
596 | _makelock_file(info, pathname) |
|
596 | _makelock_file(info, pathname) | |
597 |
|
597 | |||
598 | def readlock(pathname): |
|
598 | def readlock(pathname): | |
599 | try: |
|
599 | try: | |
600 | return os.readlink(pathname) |
|
600 | return os.readlink(pathname) | |
601 | except OSError, why: |
|
601 | except OSError, why: | |
602 | if why.errno == errno.EINVAL: |
|
602 | if why.errno == errno.EINVAL: | |
603 | return _readlock_file(pathname) |
|
603 | return _readlock_file(pathname) | |
604 | else: |
|
604 | else: | |
605 | raise |
|
605 | raise | |
606 |
|
606 | |||
607 | def explain_exit(code): |
|
607 | def explain_exit(code): | |
608 | """return a 2-tuple (desc, code) describing a process's status""" |
|
608 | """return a 2-tuple (desc, code) describing a process's status""" | |
609 | if os.WIFEXITED(code): |
|
609 | if os.WIFEXITED(code): | |
610 | val = os.WEXITSTATUS(code) |
|
610 | val = os.WEXITSTATUS(code) | |
611 | return _("exited with status %d") % val, val |
|
611 | return _("exited with status %d") % val, val | |
612 | elif os.WIFSIGNALED(code): |
|
612 | elif os.WIFSIGNALED(code): | |
613 | val = os.WTERMSIG(code) |
|
613 | val = os.WTERMSIG(code) | |
614 | return _("killed by signal %d") % val, val |
|
614 | return _("killed by signal %d") % val, val | |
615 | elif os.WIFSTOPPED(code): |
|
615 | elif os.WIFSTOPPED(code): | |
616 | val = os.WSTOPSIG(code) |
|
616 | val = os.WSTOPSIG(code) | |
617 | return _("stopped by signal %d") % val, val |
|
617 | return _("stopped by signal %d") % val, val | |
618 | raise ValueError(_("invalid exit code")) |
|
618 | raise ValueError(_("invalid exit code")) | |
619 |
|
619 | |||
620 | class chunkbuffer(object): |
|
620 | class chunkbuffer(object): | |
621 | """Allow arbitrary sized chunks of data to be efficiently read from an |
|
621 | """Allow arbitrary sized chunks of data to be efficiently read from an | |
622 | iterator over chunks of arbitrary size.""" |
|
622 | iterator over chunks of arbitrary size.""" | |
623 |
|
623 | |||
624 | def __init__(self, in_iter, targetsize = 2**16): |
|
624 | def __init__(self, in_iter, targetsize = 2**16): | |
625 | """in_iter is the iterator that's iterating over the input chunks. |
|
625 | """in_iter is the iterator that's iterating over the input chunks. | |
626 | targetsize is how big a buffer to try to maintain.""" |
|
626 | targetsize is how big a buffer to try to maintain.""" | |
627 | self.in_iter = iter(in_iter) |
|
627 | self.in_iter = iter(in_iter) | |
628 | self.buf = '' |
|
628 | self.buf = '' | |
629 | self.targetsize = int(targetsize) |
|
629 | self.targetsize = int(targetsize) | |
630 | if self.targetsize <= 0: |
|
630 | if self.targetsize <= 0: | |
631 | raise ValueError(_("targetsize must be greater than 0, was %d") % |
|
631 | raise ValueError(_("targetsize must be greater than 0, was %d") % | |
632 | targetsize) |
|
632 | targetsize) | |
633 | self.iterempty = False |
|
633 | self.iterempty = False | |
634 |
|
634 | |||
635 | def fillbuf(self): |
|
635 | def fillbuf(self): | |
636 | """Ignore target size; read every chunk from iterator until empty.""" |
|
636 | """Ignore target size; read every chunk from iterator until empty.""" | |
637 | if not self.iterempty: |
|
637 | if not self.iterempty: | |
638 | collector = cStringIO.StringIO() |
|
638 | collector = cStringIO.StringIO() | |
639 | collector.write(self.buf) |
|
639 | collector.write(self.buf) | |
640 | for ch in self.in_iter: |
|
640 | for ch in self.in_iter: | |
641 | collector.write(ch) |
|
641 | collector.write(ch) | |
642 | self.buf = collector.getvalue() |
|
642 | self.buf = collector.getvalue() | |
643 | self.iterempty = True |
|
643 | self.iterempty = True | |
644 |
|
644 | |||
645 | def read(self, l): |
|
645 | def read(self, l): | |
646 | """Read L bytes of data from the iterator of chunks of data. |
|
646 | """Read L bytes of data from the iterator of chunks of data. | |
647 | Returns less than L bytes if the iterator runs dry.""" |
|
647 | Returns less than L bytes if the iterator runs dry.""" | |
648 | if l > len(self.buf) and not self.iterempty: |
|
648 | if l > len(self.buf) and not self.iterempty: | |
649 | # Clamp to a multiple of self.targetsize |
|
649 | # Clamp to a multiple of self.targetsize | |
650 | targetsize = self.targetsize * ((l // self.targetsize) + 1) |
|
650 | targetsize = self.targetsize * ((l // self.targetsize) + 1) | |
651 | collector = cStringIO.StringIO() |
|
651 | collector = cStringIO.StringIO() | |
652 | collector.write(self.buf) |
|
652 | collector.write(self.buf) | |
653 | collected = len(self.buf) |
|
653 | collected = len(self.buf) | |
654 | for chunk in self.in_iter: |
|
654 | for chunk in self.in_iter: | |
655 | collector.write(chunk) |
|
655 | collector.write(chunk) | |
656 | collected += len(chunk) |
|
656 | collected += len(chunk) | |
657 | if collected >= targetsize: |
|
657 | if collected >= targetsize: | |
658 | break |
|
658 | break | |
659 | if collected < targetsize: |
|
659 | if collected < targetsize: | |
660 | self.iterempty = True |
|
660 | self.iterempty = True | |
661 | self.buf = collector.getvalue() |
|
661 | self.buf = collector.getvalue() | |
662 | s, self.buf = self.buf[:l], buffer(self.buf, l) |
|
662 | s, self.buf = self.buf[:l], buffer(self.buf, l) | |
663 | return s |
|
663 | return s | |
664 |
|
664 | |||
665 | def filechunkiter(f, size = 65536): |
|
665 | def filechunkiter(f, size = 65536): | |
666 | """Create a generator that produces all the data in the file size |
|
666 | """Create a generator that produces all the data in the file size | |
667 | (default 65536) bytes at a time. Chunks may be less than size |
|
667 | (default 65536) bytes at a time. Chunks may be less than size | |
668 | bytes if the chunk is the last chunk in the file, or the file is a |
|
668 | bytes if the chunk is the last chunk in the file, or the file is a | |
669 | socket or some other type of file that sometimes reads less data |
|
669 | socket or some other type of file that sometimes reads less data | |
670 | than is requested.""" |
|
670 | than is requested.""" | |
671 | s = f.read(size) |
|
671 | s = f.read(size) | |
672 | while len(s) > 0: |
|
672 | while len(s) > 0: | |
673 | yield s |
|
673 | yield s | |
674 | s = f.read(size) |
|
674 | s = f.read(size) | |
675 |
|
675 | |||
676 | def makedate(): |
|
676 | def makedate(): | |
677 | lt = time.localtime() |
|
677 | lt = time.localtime() | |
678 | if lt[8] == 1 and time.daylight: |
|
678 | if lt[8] == 1 and time.daylight: | |
679 | tz = time.altzone |
|
679 | tz = time.altzone | |
680 | else: |
|
680 | else: | |
681 | tz = time.timezone |
|
681 | tz = time.timezone | |
682 | return time.mktime(lt), tz |
|
682 | return time.mktime(lt), tz | |
683 |
|
683 | |||
684 | def datestr(date=None, format='%c'): |
|
684 | def datestr(date=None, format='%c'): | |
685 | """represent a (unixtime, offset) tuple as a localized time. |
|
685 | """represent a (unixtime, offset) tuple as a localized time. | |
686 | unixtime is seconds since the epoch, and offset is the time zone's |
|
686 | unixtime is seconds since the epoch, and offset is the time zone's | |
687 | number of seconds away from UTC.""" |
|
687 | number of seconds away from UTC.""" | |
688 | t, tz = date or makedate() |
|
688 | t, tz = date or makedate() | |
689 | return ("%s %+03d%02d" % |
|
689 | return ("%s %+03d%02d" % | |
690 | (time.strftime(format, time.gmtime(float(t) - tz)), |
|
690 | (time.strftime(format, time.gmtime(float(t) - tz)), | |
691 | -tz / 3600, |
|
691 | -tz / 3600, | |
692 | ((-tz % 3600) / 60))) |
|
692 | ((-tz % 3600) / 60))) |
General Comments 0
You need to be logged in to leave comments.
Login now