##// END OF EJS Templates
Add portable shell-quoting function; teach mq to use it.
Brendan Cully -
r2791:f4d91635 default
parent child Browse files
Show More
@@ -1,1685 +1,1685 b''
1
1
2 # queue.py - patch queues for mercurial
2 # queue.py - patch queues for mercurial
3 #
3 #
4 # Copyright 2005 Chris Mason <mason@suse.com>
4 # Copyright 2005 Chris Mason <mason@suse.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 '''patch management and development
9 '''patch management and development
10
10
11 This extension lets you work with a stack of patches in a Mercurial
11 This extension lets you work with a stack of patches in a Mercurial
12 repository. It manages two stacks of patches - all known patches, and
12 repository. It manages two stacks of patches - all known patches, and
13 applied patches (subset of known patches).
13 applied patches (subset of known patches).
14
14
15 Known patches are represented as patch files in the .hg/patches
15 Known patches are represented as patch files in the .hg/patches
16 directory. Applied patches are both patch files and changesets.
16 directory. Applied patches are both patch files and changesets.
17
17
18 Common tasks (use "hg help command" for more details):
18 Common tasks (use "hg help command" for more details):
19
19
20 prepare repository to work with patches qinit
20 prepare repository to work with patches qinit
21 create new patch qnew
21 create new patch qnew
22 import existing patch qimport
22 import existing patch qimport
23
23
24 print patch series qseries
24 print patch series qseries
25 print applied patches qapplied
25 print applied patches qapplied
26 print name of top applied patch qtop
26 print name of top applied patch qtop
27
27
28 add known patch to applied stack qpush
28 add known patch to applied stack qpush
29 remove patch from applied stack qpop
29 remove patch from applied stack qpop
30 refresh contents of top applied patch qrefresh
30 refresh contents of top applied patch qrefresh
31 '''
31 '''
32
32
33 from mercurial.demandload import *
33 from mercurial.demandload import *
34 demandload(globals(), "os sys re struct traceback errno bz2")
34 demandload(globals(), "os sys re struct traceback errno bz2")
35 from mercurial.i18n import gettext as _
35 from mercurial.i18n import gettext as _
36 from mercurial import ui, hg, revlog, commands, util
36 from mercurial import ui, hg, revlog, commands, util
37
37
38 versionstr = "0.45"
38 versionstr = "0.45"
39
39
40 commands.norepo += " qclone qversion"
40 commands.norepo += " qclone qversion"
41
41
42 class StatusEntry:
42 class StatusEntry:
43 def __init__(self, rev, name=None):
43 def __init__(self, rev, name=None):
44 if not name:
44 if not name:
45 self.rev, self.name = rev.split(':')
45 self.rev, self.name = rev.split(':')
46 else:
46 else:
47 self.rev, self.name = rev, name
47 self.rev, self.name = rev, name
48
48
49 def __str__(self):
49 def __str__(self):
50 return self.rev + ':' + self.name
50 return self.rev + ':' + self.name
51
51
52 class queue:
52 class queue:
53 def __init__(self, ui, path, patchdir=None):
53 def __init__(self, ui, path, patchdir=None):
54 self.basepath = path
54 self.basepath = path
55 if patchdir:
55 if patchdir:
56 self.path = patchdir
56 self.path = patchdir
57 else:
57 else:
58 self.path = os.path.join(path, "patches")
58 self.path = os.path.join(path, "patches")
59 self.opener = util.opener(self.path)
59 self.opener = util.opener(self.path)
60 self.ui = ui
60 self.ui = ui
61 self.applied = []
61 self.applied = []
62 self.full_series = []
62 self.full_series = []
63 self.applied_dirty = 0
63 self.applied_dirty = 0
64 self.series_dirty = 0
64 self.series_dirty = 0
65 self.series_path = "series"
65 self.series_path = "series"
66 self.status_path = "status"
66 self.status_path = "status"
67
67
68 if os.path.exists(os.path.join(self.path, self.series_path)):
68 if os.path.exists(os.path.join(self.path, self.series_path)):
69 self.full_series = self.opener(self.series_path).read().splitlines()
69 self.full_series = self.opener(self.series_path).read().splitlines()
70 self.parse_series()
70 self.parse_series()
71
71
72 if os.path.exists(os.path.join(self.path, self.status_path)):
72 if os.path.exists(os.path.join(self.path, self.status_path)):
73 self.applied = [StatusEntry(l)
73 self.applied = [StatusEntry(l)
74 for l in self.opener(self.status_path).read().splitlines()]
74 for l in self.opener(self.status_path).read().splitlines()]
75
75
76 def find_series(self, patch):
76 def find_series(self, patch):
77 pre = re.compile("(\s*)([^#]+)")
77 pre = re.compile("(\s*)([^#]+)")
78 index = 0
78 index = 0
79 for l in self.full_series:
79 for l in self.full_series:
80 m = pre.match(l)
80 m = pre.match(l)
81 if m:
81 if m:
82 s = m.group(2)
82 s = m.group(2)
83 s = s.rstrip()
83 s = s.rstrip()
84 if s == patch:
84 if s == patch:
85 return index
85 return index
86 index += 1
86 index += 1
87 return None
87 return None
88
88
89 def parse_series(self):
89 def parse_series(self):
90 self.series = []
90 self.series = []
91 for l in self.full_series:
91 for l in self.full_series:
92 s = l.split('#', 1)[0].strip()
92 s = l.split('#', 1)[0].strip()
93 if s:
93 if s:
94 self.series.append(s)
94 self.series.append(s)
95
95
96 def save_dirty(self):
96 def save_dirty(self):
97 def write_list(items, path):
97 def write_list(items, path):
98 fp = self.opener(path, 'w')
98 fp = self.opener(path, 'w')
99 for i in items:
99 for i in items:
100 print >> fp, i
100 print >> fp, i
101 fp.close()
101 fp.close()
102 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
102 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
103 if self.series_dirty: write_list(self.full_series, self.series_path)
103 if self.series_dirty: write_list(self.full_series, self.series_path)
104
104
105 def readheaders(self, patch):
105 def readheaders(self, patch):
106 def eatdiff(lines):
106 def eatdiff(lines):
107 while lines:
107 while lines:
108 l = lines[-1]
108 l = lines[-1]
109 if (l.startswith("diff -") or
109 if (l.startswith("diff -") or
110 l.startswith("Index:") or
110 l.startswith("Index:") or
111 l.startswith("===========")):
111 l.startswith("===========")):
112 del lines[-1]
112 del lines[-1]
113 else:
113 else:
114 break
114 break
115 def eatempty(lines):
115 def eatempty(lines):
116 while lines:
116 while lines:
117 l = lines[-1]
117 l = lines[-1]
118 if re.match('\s*$', l):
118 if re.match('\s*$', l):
119 del lines[-1]
119 del lines[-1]
120 else:
120 else:
121 break
121 break
122
122
123 pf = os.path.join(self.path, patch)
123 pf = os.path.join(self.path, patch)
124 message = []
124 message = []
125 comments = []
125 comments = []
126 user = None
126 user = None
127 date = None
127 date = None
128 format = None
128 format = None
129 subject = None
129 subject = None
130 diffstart = 0
130 diffstart = 0
131
131
132 for line in file(pf):
132 for line in file(pf):
133 line = line.rstrip()
133 line = line.rstrip()
134 if diffstart:
134 if diffstart:
135 if line.startswith('+++ '):
135 if line.startswith('+++ '):
136 diffstart = 2
136 diffstart = 2
137 break
137 break
138 if line.startswith("--- "):
138 if line.startswith("--- "):
139 diffstart = 1
139 diffstart = 1
140 continue
140 continue
141 elif format == "hgpatch":
141 elif format == "hgpatch":
142 # parse values when importing the result of an hg export
142 # parse values when importing the result of an hg export
143 if line.startswith("# User "):
143 if line.startswith("# User "):
144 user = line[7:]
144 user = line[7:]
145 elif line.startswith("# Date "):
145 elif line.startswith("# Date "):
146 date = line[7:]
146 date = line[7:]
147 elif not line.startswith("# ") and line:
147 elif not line.startswith("# ") and line:
148 message.append(line)
148 message.append(line)
149 format = None
149 format = None
150 elif line == '# HG changeset patch':
150 elif line == '# HG changeset patch':
151 format = "hgpatch"
151 format = "hgpatch"
152 elif (format != "tagdone" and (line.startswith("Subject: ") or
152 elif (format != "tagdone" and (line.startswith("Subject: ") or
153 line.startswith("subject: "))):
153 line.startswith("subject: "))):
154 subject = line[9:]
154 subject = line[9:]
155 format = "tag"
155 format = "tag"
156 elif (format != "tagdone" and (line.startswith("From: ") or
156 elif (format != "tagdone" and (line.startswith("From: ") or
157 line.startswith("from: "))):
157 line.startswith("from: "))):
158 user = line[6:]
158 user = line[6:]
159 format = "tag"
159 format = "tag"
160 elif format == "tag" and line == "":
160 elif format == "tag" and line == "":
161 # when looking for tags (subject: from: etc) they
161 # when looking for tags (subject: from: etc) they
162 # end once you find a blank line in the source
162 # end once you find a blank line in the source
163 format = "tagdone"
163 format = "tagdone"
164 elif message or line:
164 elif message or line:
165 message.append(line)
165 message.append(line)
166 comments.append(line)
166 comments.append(line)
167
167
168 eatdiff(message)
168 eatdiff(message)
169 eatdiff(comments)
169 eatdiff(comments)
170 eatempty(message)
170 eatempty(message)
171 eatempty(comments)
171 eatempty(comments)
172
172
173 # make sure message isn't empty
173 # make sure message isn't empty
174 if format and format.startswith("tag") and subject:
174 if format and format.startswith("tag") and subject:
175 message.insert(0, "")
175 message.insert(0, "")
176 message.insert(0, subject)
176 message.insert(0, subject)
177 return (message, comments, user, date, diffstart > 1)
177 return (message, comments, user, date, diffstart > 1)
178
178
179 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
179 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
180 # first try just applying the patch
180 # first try just applying the patch
181 (err, n) = self.apply(repo, [ patch ], update_status=False,
181 (err, n) = self.apply(repo, [ patch ], update_status=False,
182 strict=True, merge=rev, wlock=wlock)
182 strict=True, merge=rev, wlock=wlock)
183
183
184 if err == 0:
184 if err == 0:
185 return (err, n)
185 return (err, n)
186
186
187 if n is None:
187 if n is None:
188 raise util.Abort(_("apply failed for patch %s") % patch)
188 raise util.Abort(_("apply failed for patch %s") % patch)
189
189
190 self.ui.warn("patch didn't work out, merging %s\n" % patch)
190 self.ui.warn("patch didn't work out, merging %s\n" % patch)
191
191
192 # apply failed, strip away that rev and merge.
192 # apply failed, strip away that rev and merge.
193 repo.update(head, allow=False, force=True, wlock=wlock)
193 repo.update(head, allow=False, force=True, wlock=wlock)
194 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
194 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
195
195
196 c = repo.changelog.read(rev)
196 c = repo.changelog.read(rev)
197 ret = repo.update(rev, allow=True, wlock=wlock)
197 ret = repo.update(rev, allow=True, wlock=wlock)
198 if ret:
198 if ret:
199 raise util.Abort(_("update returned %d") % ret)
199 raise util.Abort(_("update returned %d") % ret)
200 n = repo.commit(None, c[4], c[1], force=1, wlock=wlock)
200 n = repo.commit(None, c[4], c[1], force=1, wlock=wlock)
201 if n == None:
201 if n == None:
202 raise util.Abort(_("repo commit failed"))
202 raise util.Abort(_("repo commit failed"))
203 try:
203 try:
204 message, comments, user, date, patchfound = mergeq.readheaders(patch)
204 message, comments, user, date, patchfound = mergeq.readheaders(patch)
205 except:
205 except:
206 raise util.Abort(_("unable to read %s") % patch)
206 raise util.Abort(_("unable to read %s") % patch)
207
207
208 patchf = self.opener(patch, "w")
208 patchf = self.opener(patch, "w")
209 if comments:
209 if comments:
210 comments = "\n".join(comments) + '\n\n'
210 comments = "\n".join(comments) + '\n\n'
211 patchf.write(comments)
211 patchf.write(comments)
212 commands.dodiff(patchf, self.ui, repo, head, n)
212 commands.dodiff(patchf, self.ui, repo, head, n)
213 patchf.close()
213 patchf.close()
214 return (0, n)
214 return (0, n)
215
215
216 def qparents(self, repo, rev=None):
216 def qparents(self, repo, rev=None):
217 if rev is None:
217 if rev is None:
218 (p1, p2) = repo.dirstate.parents()
218 (p1, p2) = repo.dirstate.parents()
219 if p2 == revlog.nullid:
219 if p2 == revlog.nullid:
220 return p1
220 return p1
221 if len(self.applied) == 0:
221 if len(self.applied) == 0:
222 return None
222 return None
223 return revlog.bin(self.applied[-1].rev)
223 return revlog.bin(self.applied[-1].rev)
224 pp = repo.changelog.parents(rev)
224 pp = repo.changelog.parents(rev)
225 if pp[1] != revlog.nullid:
225 if pp[1] != revlog.nullid:
226 arevs = [ x.rev for x in self.applied ]
226 arevs = [ x.rev for x in self.applied ]
227 p0 = revlog.hex(pp[0])
227 p0 = revlog.hex(pp[0])
228 p1 = revlog.hex(pp[1])
228 p1 = revlog.hex(pp[1])
229 if p0 in arevs:
229 if p0 in arevs:
230 return pp[0]
230 return pp[0]
231 if p1 in arevs:
231 if p1 in arevs:
232 return pp[1]
232 return pp[1]
233 return pp[0]
233 return pp[0]
234
234
235 def mergepatch(self, repo, mergeq, series, wlock):
235 def mergepatch(self, repo, mergeq, series, wlock):
236 if len(self.applied) == 0:
236 if len(self.applied) == 0:
237 # each of the patches merged in will have two parents. This
237 # each of the patches merged in will have two parents. This
238 # can confuse the qrefresh, qdiff, and strip code because it
238 # can confuse the qrefresh, qdiff, and strip code because it
239 # needs to know which parent is actually in the patch queue.
239 # needs to know which parent is actually in the patch queue.
240 # so, we insert a merge marker with only one parent. This way
240 # so, we insert a merge marker with only one parent. This way
241 # the first patch in the queue is never a merge patch
241 # the first patch in the queue is never a merge patch
242 #
242 #
243 pname = ".hg.patches.merge.marker"
243 pname = ".hg.patches.merge.marker"
244 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
244 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
245 wlock=wlock)
245 wlock=wlock)
246 self.applied.append(StatusEntry(revlog.hex(n), pname))
246 self.applied.append(StatusEntry(revlog.hex(n), pname))
247 self.applied_dirty = 1
247 self.applied_dirty = 1
248
248
249 head = self.qparents(repo)
249 head = self.qparents(repo)
250
250
251 for patch in series:
251 for patch in series:
252 patch = mergeq.lookup(patch, strict=True)
252 patch = mergeq.lookup(patch, strict=True)
253 if not patch:
253 if not patch:
254 self.ui.warn("patch %s does not exist\n" % patch)
254 self.ui.warn("patch %s does not exist\n" % patch)
255 return (1, None)
255 return (1, None)
256
256
257 info = mergeq.isapplied(patch)
257 info = mergeq.isapplied(patch)
258 if not info:
258 if not info:
259 self.ui.warn("patch %s is not applied\n" % patch)
259 self.ui.warn("patch %s is not applied\n" % patch)
260 return (1, None)
260 return (1, None)
261 rev = revlog.bin(info[1])
261 rev = revlog.bin(info[1])
262 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
262 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
263 if head:
263 if head:
264 self.applied.append(StatusEntry(revlog.hex(head), patch))
264 self.applied.append(StatusEntry(revlog.hex(head), patch))
265 self.applied_dirty = 1
265 self.applied_dirty = 1
266 if err:
266 if err:
267 return (err, head)
267 return (err, head)
268 return (0, head)
268 return (0, head)
269
269
270 def patch(self, repo, patchfile):
270 def patch(self, repo, patchfile):
271 '''Apply patchfile to the working directory.
271 '''Apply patchfile to the working directory.
272 patchfile: file name of patch'''
272 patchfile: file name of patch'''
273 try:
273 try:
274 pp = util.find_in_path('gpatch', os.environ.get('PATH', ''), 'patch')
274 pp = util.find_in_path('gpatch', os.environ.get('PATH', ''), 'patch')
275 f = os.popen("%s -d '%s' -p1 --no-backup-if-mismatch < '%s'" %
275 f = os.popen("%s -d %s -p1 --no-backup-if-mismatch < %s" %
276 (pp, repo.root, patchfile))
276 (pp, util.shellquote(repo.root), util.shellquote(patchfile)))
277 except:
277 except:
278 self.ui.warn("patch failed, unable to continue (try -v)\n")
278 self.ui.warn("patch failed, unable to continue (try -v)\n")
279 return (None, [], False)
279 return (None, [], False)
280 files = []
280 files = []
281 fuzz = False
281 fuzz = False
282 for l in f:
282 for l in f:
283 l = l.rstrip('\r\n');
283 l = l.rstrip('\r\n');
284 if self.ui.verbose:
284 if self.ui.verbose:
285 self.ui.warn(l + "\n")
285 self.ui.warn(l + "\n")
286 if l[:14] == 'patching file ':
286 if l[:14] == 'patching file ':
287 pf = os.path.normpath(l[14:])
287 pf = os.path.normpath(l[14:])
288 # when patch finds a space in the file name, it puts
288 # when patch finds a space in the file name, it puts
289 # single quotes around the filename. strip them off
289 # single quotes around the filename. strip them off
290 if pf[0] == "'" and pf[-1] == "'":
290 if pf[0] == "'" and pf[-1] == "'":
291 pf = pf[1:-1]
291 pf = pf[1:-1]
292 if pf not in files:
292 if pf not in files:
293 files.append(pf)
293 files.append(pf)
294 printed_file = False
294 printed_file = False
295 file_str = l
295 file_str = l
296 elif l.find('with fuzz') >= 0:
296 elif l.find('with fuzz') >= 0:
297 if not printed_file:
297 if not printed_file:
298 self.ui.warn(file_str + '\n')
298 self.ui.warn(file_str + '\n')
299 printed_file = True
299 printed_file = True
300 self.ui.warn(l + '\n')
300 self.ui.warn(l + '\n')
301 fuzz = True
301 fuzz = True
302 elif l.find('saving rejects to file') >= 0:
302 elif l.find('saving rejects to file') >= 0:
303 self.ui.warn(l + '\n')
303 self.ui.warn(l + '\n')
304 elif l.find('FAILED') >= 0:
304 elif l.find('FAILED') >= 0:
305 if not printed_file:
305 if not printed_file:
306 self.ui.warn(file_str + '\n')
306 self.ui.warn(file_str + '\n')
307 printed_file = True
307 printed_file = True
308 self.ui.warn(l + '\n')
308 self.ui.warn(l + '\n')
309
309
310 return (not f.close(), files, fuzz)
310 return (not f.close(), files, fuzz)
311
311
312 def apply(self, repo, series, list=False, update_status=True,
312 def apply(self, repo, series, list=False, update_status=True,
313 strict=False, patchdir=None, merge=None, wlock=None):
313 strict=False, patchdir=None, merge=None, wlock=None):
314 # TODO unify with commands.py
314 # TODO unify with commands.py
315 if not patchdir:
315 if not patchdir:
316 patchdir = self.path
316 patchdir = self.path
317 err = 0
317 err = 0
318 if not wlock:
318 if not wlock:
319 wlock = repo.wlock()
319 wlock = repo.wlock()
320 lock = repo.lock()
320 lock = repo.lock()
321 tr = repo.transaction()
321 tr = repo.transaction()
322 n = None
322 n = None
323 for patch in series:
323 for patch in series:
324 self.ui.warn("applying %s\n" % patch)
324 self.ui.warn("applying %s\n" % patch)
325 pf = os.path.join(patchdir, patch)
325 pf = os.path.join(patchdir, patch)
326
326
327 try:
327 try:
328 message, comments, user, date, patchfound = self.readheaders(patch)
328 message, comments, user, date, patchfound = self.readheaders(patch)
329 except:
329 except:
330 self.ui.warn("Unable to read %s\n" % pf)
330 self.ui.warn("Unable to read %s\n" % pf)
331 err = 1
331 err = 1
332 break
332 break
333
333
334 if not message:
334 if not message:
335 message = "imported patch %s\n" % patch
335 message = "imported patch %s\n" % patch
336 else:
336 else:
337 if list:
337 if list:
338 message.append("\nimported patch %s" % patch)
338 message.append("\nimported patch %s" % patch)
339 message = '\n'.join(message)
339 message = '\n'.join(message)
340
340
341 (patcherr, files, fuzz) = self.patch(repo, pf)
341 (patcherr, files, fuzz) = self.patch(repo, pf)
342 patcherr = not patcherr
342 patcherr = not patcherr
343
343
344 if merge and len(files) > 0:
344 if merge and len(files) > 0:
345 # Mark as merged and update dirstate parent info
345 # Mark as merged and update dirstate parent info
346 repo.dirstate.update(repo.dirstate.filterfiles(files), 'm')
346 repo.dirstate.update(repo.dirstate.filterfiles(files), 'm')
347 p1, p2 = repo.dirstate.parents()
347 p1, p2 = repo.dirstate.parents()
348 repo.dirstate.setparents(p1, merge)
348 repo.dirstate.setparents(p1, merge)
349 if len(files) > 0:
349 if len(files) > 0:
350 cwd = repo.getcwd()
350 cwd = repo.getcwd()
351 cfiles = files
351 cfiles = files
352 if cwd:
352 if cwd:
353 cfiles = [util.pathto(cwd, f) for f in files]
353 cfiles = [util.pathto(cwd, f) for f in files]
354 commands.addremove_lock(self.ui, repo, cfiles,
354 commands.addremove_lock(self.ui, repo, cfiles,
355 opts={}, wlock=wlock)
355 opts={}, wlock=wlock)
356 n = repo.commit(files, message, user, date, force=1, lock=lock,
356 n = repo.commit(files, message, user, date, force=1, lock=lock,
357 wlock=wlock)
357 wlock=wlock)
358
358
359 if n == None:
359 if n == None:
360 raise util.Abort(_("repo commit failed"))
360 raise util.Abort(_("repo commit failed"))
361
361
362 if update_status:
362 if update_status:
363 self.applied.append(StatusEntry(revlog.hex(n), patch))
363 self.applied.append(StatusEntry(revlog.hex(n), patch))
364
364
365 if patcherr:
365 if patcherr:
366 if not patchfound:
366 if not patchfound:
367 self.ui.warn("patch %s is empty\n" % patch)
367 self.ui.warn("patch %s is empty\n" % patch)
368 err = 0
368 err = 0
369 else:
369 else:
370 self.ui.warn("patch failed, rejects left in working dir\n")
370 self.ui.warn("patch failed, rejects left in working dir\n")
371 err = 1
371 err = 1
372 break
372 break
373
373
374 if fuzz and strict:
374 if fuzz and strict:
375 self.ui.warn("fuzz found when applying patch, stopping\n")
375 self.ui.warn("fuzz found when applying patch, stopping\n")
376 err = 1
376 err = 1
377 break
377 break
378 tr.close()
378 tr.close()
379 return (err, n)
379 return (err, n)
380
380
381 def delete(self, repo, patch, force=False):
381 def delete(self, repo, patch, force=False):
382 patch = self.lookup(patch, strict=True)
382 patch = self.lookup(patch, strict=True)
383 info = self.isapplied(patch)
383 info = self.isapplied(patch)
384 if info:
384 if info:
385 raise util.Abort(_("cannot delete applied patch %s") % patch)
385 raise util.Abort(_("cannot delete applied patch %s") % patch)
386 if patch not in self.series:
386 if patch not in self.series:
387 raise util.Abort(_("patch %s not in series file") % patch)
387 raise util.Abort(_("patch %s not in series file") % patch)
388 if force:
388 if force:
389 r = self.qrepo()
389 r = self.qrepo()
390 if r:
390 if r:
391 r.remove([patch], True)
391 r.remove([patch], True)
392 else:
392 else:
393 os.unlink(os.path.join(self.path, patch))
393 os.unlink(os.path.join(self.path, patch))
394 i = self.find_series(patch)
394 i = self.find_series(patch)
395 del self.full_series[i]
395 del self.full_series[i]
396 self.parse_series()
396 self.parse_series()
397 self.series_dirty = 1
397 self.series_dirty = 1
398
398
399 def check_toppatch(self, repo):
399 def check_toppatch(self, repo):
400 if len(self.applied) > 0:
400 if len(self.applied) > 0:
401 top = revlog.bin(self.applied[-1].rev)
401 top = revlog.bin(self.applied[-1].rev)
402 pp = repo.dirstate.parents()
402 pp = repo.dirstate.parents()
403 if top not in pp:
403 if top not in pp:
404 raise util.Abort(_("queue top not at same revision as working directory"))
404 raise util.Abort(_("queue top not at same revision as working directory"))
405 return top
405 return top
406 return None
406 return None
407 def check_localchanges(self, repo):
407 def check_localchanges(self, repo):
408 (c, a, r, d, u) = repo.changes(None, None)
408 (c, a, r, d, u) = repo.changes(None, None)
409 if c or a or d or r:
409 if c or a or d or r:
410 raise util.Abort(_("local changes found, refresh first"))
410 raise util.Abort(_("local changes found, refresh first"))
411 def new(self, repo, patch, msg=None, force=None):
411 def new(self, repo, patch, msg=None, force=None):
412 if os.path.exists(os.path.join(self.path, patch)):
412 if os.path.exists(os.path.join(self.path, patch)):
413 raise util.Abort(_('patch "%s" already exists') % patch)
413 raise util.Abort(_('patch "%s" already exists') % patch)
414 commitfiles = []
414 commitfiles = []
415 (c, a, r, d, u) = repo.changes(None, None)
415 (c, a, r, d, u) = repo.changes(None, None)
416 if c or a or d or r:
416 if c or a or d or r:
417 if not force:
417 if not force:
418 raise util.Abort(_("local changes found, refresh first"))
418 raise util.Abort(_("local changes found, refresh first"))
419 commitfiles = c + a + r
419 commitfiles = c + a + r
420 self.check_toppatch(repo)
420 self.check_toppatch(repo)
421 wlock = repo.wlock()
421 wlock = repo.wlock()
422 insert = self.full_series_end()
422 insert = self.full_series_end()
423 if msg:
423 if msg:
424 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
424 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
425 wlock=wlock)
425 wlock=wlock)
426 else:
426 else:
427 n = repo.commit(commitfiles,
427 n = repo.commit(commitfiles,
428 "New patch: %s" % patch, force=True, wlock=wlock)
428 "New patch: %s" % patch, force=True, wlock=wlock)
429 if n == None:
429 if n == None:
430 raise util.Abort(_("repo commit failed"))
430 raise util.Abort(_("repo commit failed"))
431 self.full_series[insert:insert] = [patch]
431 self.full_series[insert:insert] = [patch]
432 self.applied.append(StatusEntry(revlog.hex(n), patch))
432 self.applied.append(StatusEntry(revlog.hex(n), patch))
433 self.parse_series()
433 self.parse_series()
434 self.series_dirty = 1
434 self.series_dirty = 1
435 self.applied_dirty = 1
435 self.applied_dirty = 1
436 p = self.opener(patch, "w")
436 p = self.opener(patch, "w")
437 if msg:
437 if msg:
438 msg = msg + "\n"
438 msg = msg + "\n"
439 p.write(msg)
439 p.write(msg)
440 p.close()
440 p.close()
441 wlock = None
441 wlock = None
442 r = self.qrepo()
442 r = self.qrepo()
443 if r: r.add([patch])
443 if r: r.add([patch])
444 if commitfiles:
444 if commitfiles:
445 self.refresh(repo, msg=None, short=True)
445 self.refresh(repo, msg=None, short=True)
446
446
447 def strip(self, repo, rev, update=True, backup="all", wlock=None):
447 def strip(self, repo, rev, update=True, backup="all", wlock=None):
448 def limitheads(chlog, stop):
448 def limitheads(chlog, stop):
449 """return the list of all nodes that have no children"""
449 """return the list of all nodes that have no children"""
450 p = {}
450 p = {}
451 h = []
451 h = []
452 stoprev = 0
452 stoprev = 0
453 if stop in chlog.nodemap:
453 if stop in chlog.nodemap:
454 stoprev = chlog.rev(stop)
454 stoprev = chlog.rev(stop)
455
455
456 for r in range(chlog.count() - 1, -1, -1):
456 for r in range(chlog.count() - 1, -1, -1):
457 n = chlog.node(r)
457 n = chlog.node(r)
458 if n not in p:
458 if n not in p:
459 h.append(n)
459 h.append(n)
460 if n == stop:
460 if n == stop:
461 break
461 break
462 if r < stoprev:
462 if r < stoprev:
463 break
463 break
464 for pn in chlog.parents(n):
464 for pn in chlog.parents(n):
465 p[pn] = 1
465 p[pn] = 1
466 return h
466 return h
467
467
468 def bundle(cg):
468 def bundle(cg):
469 backupdir = repo.join("strip-backup")
469 backupdir = repo.join("strip-backup")
470 if not os.path.isdir(backupdir):
470 if not os.path.isdir(backupdir):
471 os.mkdir(backupdir)
471 os.mkdir(backupdir)
472 name = os.path.join(backupdir, "%s" % revlog.short(rev))
472 name = os.path.join(backupdir, "%s" % revlog.short(rev))
473 name = savename(name)
473 name = savename(name)
474 self.ui.warn("saving bundle to %s\n" % name)
474 self.ui.warn("saving bundle to %s\n" % name)
475 # TODO, exclusive open
475 # TODO, exclusive open
476 f = open(name, "wb")
476 f = open(name, "wb")
477 try:
477 try:
478 f.write("HG10")
478 f.write("HG10")
479 z = bz2.BZ2Compressor(9)
479 z = bz2.BZ2Compressor(9)
480 while 1:
480 while 1:
481 chunk = cg.read(4096)
481 chunk = cg.read(4096)
482 if not chunk:
482 if not chunk:
483 break
483 break
484 f.write(z.compress(chunk))
484 f.write(z.compress(chunk))
485 f.write(z.flush())
485 f.write(z.flush())
486 except:
486 except:
487 os.unlink(name)
487 os.unlink(name)
488 raise
488 raise
489 f.close()
489 f.close()
490 return name
490 return name
491
491
492 def stripall(rev, revnum):
492 def stripall(rev, revnum):
493 cl = repo.changelog
493 cl = repo.changelog
494 c = cl.read(rev)
494 c = cl.read(rev)
495 mm = repo.manifest.read(c[0])
495 mm = repo.manifest.read(c[0])
496 seen = {}
496 seen = {}
497
497
498 for x in xrange(revnum, cl.count()):
498 for x in xrange(revnum, cl.count()):
499 c = cl.read(cl.node(x))
499 c = cl.read(cl.node(x))
500 for f in c[3]:
500 for f in c[3]:
501 if f in seen:
501 if f in seen:
502 continue
502 continue
503 seen[f] = 1
503 seen[f] = 1
504 if f in mm:
504 if f in mm:
505 filerev = mm[f]
505 filerev = mm[f]
506 else:
506 else:
507 filerev = 0
507 filerev = 0
508 seen[f] = filerev
508 seen[f] = filerev
509 # we go in two steps here so the strip loop happens in a
509 # we go in two steps here so the strip loop happens in a
510 # sensible order. When stripping many files, this helps keep
510 # sensible order. When stripping many files, this helps keep
511 # our disk access patterns under control.
511 # our disk access patterns under control.
512 list = seen.keys()
512 list = seen.keys()
513 list.sort()
513 list.sort()
514 for f in list:
514 for f in list:
515 ff = repo.file(f)
515 ff = repo.file(f)
516 filerev = seen[f]
516 filerev = seen[f]
517 if filerev != 0:
517 if filerev != 0:
518 if filerev in ff.nodemap:
518 if filerev in ff.nodemap:
519 filerev = ff.rev(filerev)
519 filerev = ff.rev(filerev)
520 else:
520 else:
521 filerev = 0
521 filerev = 0
522 ff.strip(filerev, revnum)
522 ff.strip(filerev, revnum)
523
523
524 if not wlock:
524 if not wlock:
525 wlock = repo.wlock()
525 wlock = repo.wlock()
526 lock = repo.lock()
526 lock = repo.lock()
527 chlog = repo.changelog
527 chlog = repo.changelog
528 # TODO delete the undo files, and handle undo of merge sets
528 # TODO delete the undo files, and handle undo of merge sets
529 pp = chlog.parents(rev)
529 pp = chlog.parents(rev)
530 revnum = chlog.rev(rev)
530 revnum = chlog.rev(rev)
531
531
532 if update:
532 if update:
533 (c, a, r, d, u) = repo.changes(None, None)
533 (c, a, r, d, u) = repo.changes(None, None)
534 if c or a or d or r:
534 if c or a or d or r:
535 raise util.Abort(_("local changes found"))
535 raise util.Abort(_("local changes found"))
536 urev = self.qparents(repo, rev)
536 urev = self.qparents(repo, rev)
537 repo.update(urev, allow=False, force=True, wlock=wlock)
537 repo.update(urev, allow=False, force=True, wlock=wlock)
538 repo.dirstate.write()
538 repo.dirstate.write()
539
539
540 # save is a list of all the branches we are truncating away
540 # save is a list of all the branches we are truncating away
541 # that we actually want to keep. changegroup will be used
541 # that we actually want to keep. changegroup will be used
542 # to preserve them and add them back after the truncate
542 # to preserve them and add them back after the truncate
543 saveheads = []
543 saveheads = []
544 savebases = {}
544 savebases = {}
545
545
546 tip = chlog.tip()
546 tip = chlog.tip()
547 heads = limitheads(chlog, rev)
547 heads = limitheads(chlog, rev)
548 seen = {}
548 seen = {}
549
549
550 # search through all the heads, finding those where the revision
550 # search through all the heads, finding those where the revision
551 # we want to strip away is an ancestor. Also look for merges
551 # we want to strip away is an ancestor. Also look for merges
552 # that might be turned into new heads by the strip.
552 # that might be turned into new heads by the strip.
553 while heads:
553 while heads:
554 h = heads.pop()
554 h = heads.pop()
555 n = h
555 n = h
556 while True:
556 while True:
557 seen[n] = 1
557 seen[n] = 1
558 pp = chlog.parents(n)
558 pp = chlog.parents(n)
559 if pp[1] != revlog.nullid and chlog.rev(pp[1]) > revnum:
559 if pp[1] != revlog.nullid and chlog.rev(pp[1]) > revnum:
560 if pp[1] not in seen:
560 if pp[1] not in seen:
561 heads.append(pp[1])
561 heads.append(pp[1])
562 if pp[0] == revlog.nullid:
562 if pp[0] == revlog.nullid:
563 break
563 break
564 if chlog.rev(pp[0]) < revnum:
564 if chlog.rev(pp[0]) < revnum:
565 break
565 break
566 n = pp[0]
566 n = pp[0]
567 if n == rev:
567 if n == rev:
568 break
568 break
569 r = chlog.reachable(h, rev)
569 r = chlog.reachable(h, rev)
570 if rev not in r:
570 if rev not in r:
571 saveheads.append(h)
571 saveheads.append(h)
572 for x in r:
572 for x in r:
573 if chlog.rev(x) > revnum:
573 if chlog.rev(x) > revnum:
574 savebases[x] = 1
574 savebases[x] = 1
575
575
576 # create a changegroup for all the branches we need to keep
576 # create a changegroup for all the branches we need to keep
577 if backup is "all":
577 if backup is "all":
578 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
578 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
579 bundle(backupch)
579 bundle(backupch)
580 if saveheads:
580 if saveheads:
581 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
581 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
582 chgrpfile = bundle(backupch)
582 chgrpfile = bundle(backupch)
583
583
584 stripall(rev, revnum)
584 stripall(rev, revnum)
585
585
586 change = chlog.read(rev)
586 change = chlog.read(rev)
587 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
587 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
588 chlog.strip(revnum, revnum)
588 chlog.strip(revnum, revnum)
589 if saveheads:
589 if saveheads:
590 self.ui.status("adding branch\n")
590 self.ui.status("adding branch\n")
591 commands.unbundle(self.ui, repo, chgrpfile, update=False)
591 commands.unbundle(self.ui, repo, chgrpfile, update=False)
592 if backup is not "strip":
592 if backup is not "strip":
593 os.unlink(chgrpfile)
593 os.unlink(chgrpfile)
594
594
595 def isapplied(self, patch):
595 def isapplied(self, patch):
596 """returns (index, rev, patch)"""
596 """returns (index, rev, patch)"""
597 for i in xrange(len(self.applied)):
597 for i in xrange(len(self.applied)):
598 a = self.applied[i]
598 a = self.applied[i]
599 if a.name == patch:
599 if a.name == patch:
600 return (i, a.rev, a.name)
600 return (i, a.rev, a.name)
601 return None
601 return None
602
602
603 # if the exact patch name does not exist, we try a few
603 # if the exact patch name does not exist, we try a few
604 # variations. If strict is passed, we try only #1
604 # variations. If strict is passed, we try only #1
605 #
605 #
606 # 1) a number to indicate an offset in the series file
606 # 1) a number to indicate an offset in the series file
607 # 2) a unique substring of the patch name was given
607 # 2) a unique substring of the patch name was given
608 # 3) patchname[-+]num to indicate an offset in the series file
608 # 3) patchname[-+]num to indicate an offset in the series file
609 def lookup(self, patch, strict=False):
609 def lookup(self, patch, strict=False):
610 def partial_name(s):
610 def partial_name(s):
611 if s in self.series:
611 if s in self.series:
612 return s
612 return s
613 matches = [x for x in self.series if s in x]
613 matches = [x for x in self.series if s in x]
614 if len(matches) > 1:
614 if len(matches) > 1:
615 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
615 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
616 for m in matches:
616 for m in matches:
617 self.ui.warn(' %s\n' % m)
617 self.ui.warn(' %s\n' % m)
618 return None
618 return None
619 if matches:
619 if matches:
620 return matches[0]
620 return matches[0]
621 if len(self.series) > 0 and len(self.applied) > 0:
621 if len(self.series) > 0 and len(self.applied) > 0:
622 if s == 'qtip':
622 if s == 'qtip':
623 return self.series[self.series_end()-1]
623 return self.series[self.series_end()-1]
624 if s == 'qbase':
624 if s == 'qbase':
625 return self.series[0]
625 return self.series[0]
626 return None
626 return None
627 if patch == None:
627 if patch == None:
628 return None
628 return None
629
629
630 # we don't want to return a partial match until we make
630 # we don't want to return a partial match until we make
631 # sure the file name passed in does not exist (checked below)
631 # sure the file name passed in does not exist (checked below)
632 res = partial_name(patch)
632 res = partial_name(patch)
633 if res and res == patch:
633 if res and res == patch:
634 return res
634 return res
635
635
636 if not os.path.isfile(os.path.join(self.path, patch)):
636 if not os.path.isfile(os.path.join(self.path, patch)):
637 try:
637 try:
638 sno = int(patch)
638 sno = int(patch)
639 except(ValueError, OverflowError):
639 except(ValueError, OverflowError):
640 pass
640 pass
641 else:
641 else:
642 if sno < len(self.series):
642 if sno < len(self.series):
643 patch = self.series[sno]
643 patch = self.series[sno]
644 return patch
644 return patch
645 if not strict:
645 if not strict:
646 # return any partial match made above
646 # return any partial match made above
647 if res:
647 if res:
648 return res
648 return res
649 minus = patch.rsplit('-', 1)
649 minus = patch.rsplit('-', 1)
650 if len(minus) > 1:
650 if len(minus) > 1:
651 res = partial_name(minus[0])
651 res = partial_name(minus[0])
652 if res:
652 if res:
653 i = self.series.index(res)
653 i = self.series.index(res)
654 try:
654 try:
655 off = int(minus[1] or 1)
655 off = int(minus[1] or 1)
656 except(ValueError, OverflowError):
656 except(ValueError, OverflowError):
657 pass
657 pass
658 else:
658 else:
659 if i - off >= 0:
659 if i - off >= 0:
660 return self.series[i - off]
660 return self.series[i - off]
661 plus = patch.rsplit('+', 1)
661 plus = patch.rsplit('+', 1)
662 if len(plus) > 1:
662 if len(plus) > 1:
663 res = partial_name(plus[0])
663 res = partial_name(plus[0])
664 if res:
664 if res:
665 i = self.series.index(res)
665 i = self.series.index(res)
666 try:
666 try:
667 off = int(plus[1] or 1)
667 off = int(plus[1] or 1)
668 except(ValueError, OverflowError):
668 except(ValueError, OverflowError):
669 pass
669 pass
670 else:
670 else:
671 if i + off < len(self.series):
671 if i + off < len(self.series):
672 return self.series[i + off]
672 return self.series[i + off]
673 raise util.Abort(_("patch %s not in series") % patch)
673 raise util.Abort(_("patch %s not in series") % patch)
674
674
675 def push(self, repo, patch=None, force=False, list=False,
675 def push(self, repo, patch=None, force=False, list=False,
676 mergeq=None, wlock=None):
676 mergeq=None, wlock=None):
677 if not wlock:
677 if not wlock:
678 wlock = repo.wlock()
678 wlock = repo.wlock()
679 patch = self.lookup(patch)
679 patch = self.lookup(patch)
680 if patch and self.isapplied(patch):
680 if patch and self.isapplied(patch):
681 self.ui.warn(_("patch %s is already applied\n") % patch)
681 self.ui.warn(_("patch %s is already applied\n") % patch)
682 sys.exit(1)
682 sys.exit(1)
683 if self.series_end() == len(self.series):
683 if self.series_end() == len(self.series):
684 self.ui.warn(_("patch series fully applied\n"))
684 self.ui.warn(_("patch series fully applied\n"))
685 sys.exit(1)
685 sys.exit(1)
686 if not force:
686 if not force:
687 self.check_localchanges(repo)
687 self.check_localchanges(repo)
688
688
689 self.applied_dirty = 1;
689 self.applied_dirty = 1;
690 start = self.series_end()
690 start = self.series_end()
691 if start > 0:
691 if start > 0:
692 self.check_toppatch(repo)
692 self.check_toppatch(repo)
693 if not patch:
693 if not patch:
694 patch = self.series[start]
694 patch = self.series[start]
695 end = start + 1
695 end = start + 1
696 else:
696 else:
697 end = self.series.index(patch, start) + 1
697 end = self.series.index(patch, start) + 1
698 s = self.series[start:end]
698 s = self.series[start:end]
699 if mergeq:
699 if mergeq:
700 ret = self.mergepatch(repo, mergeq, s, wlock)
700 ret = self.mergepatch(repo, mergeq, s, wlock)
701 else:
701 else:
702 ret = self.apply(repo, s, list, wlock=wlock)
702 ret = self.apply(repo, s, list, wlock=wlock)
703 top = self.applied[-1].name
703 top = self.applied[-1].name
704 if ret[0]:
704 if ret[0]:
705 self.ui.write("Errors during apply, please fix and refresh %s\n" %
705 self.ui.write("Errors during apply, please fix and refresh %s\n" %
706 top)
706 top)
707 else:
707 else:
708 self.ui.write("Now at: %s\n" % top)
708 self.ui.write("Now at: %s\n" % top)
709 return ret[0]
709 return ret[0]
710
710
711 def pop(self, repo, patch=None, force=False, update=True, all=False,
711 def pop(self, repo, patch=None, force=False, update=True, all=False,
712 wlock=None):
712 wlock=None):
713 def getfile(f, rev):
713 def getfile(f, rev):
714 t = repo.file(f).read(rev)
714 t = repo.file(f).read(rev)
715 try:
715 try:
716 repo.wfile(f, "w").write(t)
716 repo.wfile(f, "w").write(t)
717 except IOError:
717 except IOError:
718 try:
718 try:
719 os.makedirs(os.path.dirname(repo.wjoin(f)))
719 os.makedirs(os.path.dirname(repo.wjoin(f)))
720 except OSError, err:
720 except OSError, err:
721 if err.errno != errno.EEXIST: raise
721 if err.errno != errno.EEXIST: raise
722 repo.wfile(f, "w").write(t)
722 repo.wfile(f, "w").write(t)
723
723
724 if not wlock:
724 if not wlock:
725 wlock = repo.wlock()
725 wlock = repo.wlock()
726 if patch:
726 if patch:
727 # index, rev, patch
727 # index, rev, patch
728 info = self.isapplied(patch)
728 info = self.isapplied(patch)
729 if not info:
729 if not info:
730 patch = self.lookup(patch)
730 patch = self.lookup(patch)
731 info = self.isapplied(patch)
731 info = self.isapplied(patch)
732 if not info:
732 if not info:
733 raise util.Abort(_("patch %s is not applied") % patch)
733 raise util.Abort(_("patch %s is not applied") % patch)
734 if len(self.applied) == 0:
734 if len(self.applied) == 0:
735 self.ui.warn(_("no patches applied\n"))
735 self.ui.warn(_("no patches applied\n"))
736 sys.exit(1)
736 sys.exit(1)
737
737
738 if not update:
738 if not update:
739 parents = repo.dirstate.parents()
739 parents = repo.dirstate.parents()
740 rr = [ revlog.bin(x.rev) for x in self.applied ]
740 rr = [ revlog.bin(x.rev) for x in self.applied ]
741 for p in parents:
741 for p in parents:
742 if p in rr:
742 if p in rr:
743 self.ui.warn("qpop: forcing dirstate update\n")
743 self.ui.warn("qpop: forcing dirstate update\n")
744 update = True
744 update = True
745
745
746 if not force and update:
746 if not force and update:
747 self.check_localchanges(repo)
747 self.check_localchanges(repo)
748
748
749 self.applied_dirty = 1;
749 self.applied_dirty = 1;
750 end = len(self.applied)
750 end = len(self.applied)
751 if not patch:
751 if not patch:
752 if all:
752 if all:
753 popi = 0
753 popi = 0
754 else:
754 else:
755 popi = len(self.applied) - 1
755 popi = len(self.applied) - 1
756 else:
756 else:
757 popi = info[0] + 1
757 popi = info[0] + 1
758 if popi >= end:
758 if popi >= end:
759 self.ui.warn("qpop: %s is already at the top\n" % patch)
759 self.ui.warn("qpop: %s is already at the top\n" % patch)
760 return
760 return
761 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
761 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
762
762
763 start = info[0]
763 start = info[0]
764 rev = revlog.bin(info[1])
764 rev = revlog.bin(info[1])
765
765
766 # we know there are no local changes, so we can make a simplified
766 # we know there are no local changes, so we can make a simplified
767 # form of hg.update.
767 # form of hg.update.
768 if update:
768 if update:
769 top = self.check_toppatch(repo)
769 top = self.check_toppatch(repo)
770 qp = self.qparents(repo, rev)
770 qp = self.qparents(repo, rev)
771 changes = repo.changelog.read(qp)
771 changes = repo.changelog.read(qp)
772 mf1 = repo.manifest.readflags(changes[0])
772 mf1 = repo.manifest.readflags(changes[0])
773 mmap = repo.manifest.read(changes[0])
773 mmap = repo.manifest.read(changes[0])
774 (c, a, r, d, u) = repo.changes(qp, top)
774 (c, a, r, d, u) = repo.changes(qp, top)
775 if d:
775 if d:
776 raise util.Abort("deletions found between repo revs")
776 raise util.Abort("deletions found between repo revs")
777 for f in c:
777 for f in c:
778 getfile(f, mmap[f])
778 getfile(f, mmap[f])
779 for f in r:
779 for f in r:
780 getfile(f, mmap[f])
780 getfile(f, mmap[f])
781 util.set_exec(repo.wjoin(f), mf1[f])
781 util.set_exec(repo.wjoin(f), mf1[f])
782 repo.dirstate.update(c + r, 'n')
782 repo.dirstate.update(c + r, 'n')
783 for f in a:
783 for f in a:
784 try: os.unlink(repo.wjoin(f))
784 try: os.unlink(repo.wjoin(f))
785 except: raise
785 except: raise
786 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
786 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
787 except: pass
787 except: pass
788 if a:
788 if a:
789 repo.dirstate.forget(a)
789 repo.dirstate.forget(a)
790 repo.dirstate.setparents(qp, revlog.nullid)
790 repo.dirstate.setparents(qp, revlog.nullid)
791 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
791 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
792 del self.applied[start:end]
792 del self.applied[start:end]
793 if len(self.applied):
793 if len(self.applied):
794 self.ui.write("Now at: %s\n" % self.applied[-1].name)
794 self.ui.write("Now at: %s\n" % self.applied[-1].name)
795 else:
795 else:
796 self.ui.write("Patch queue now empty\n")
796 self.ui.write("Patch queue now empty\n")
797
797
798 def diff(self, repo, files):
798 def diff(self, repo, files):
799 top = self.check_toppatch(repo)
799 top = self.check_toppatch(repo)
800 if not top:
800 if not top:
801 self.ui.write("No patches applied\n")
801 self.ui.write("No patches applied\n")
802 return
802 return
803 qp = self.qparents(repo, top)
803 qp = self.qparents(repo, top)
804 commands.dodiff(sys.stdout, self.ui, repo, qp, None, files)
804 commands.dodiff(sys.stdout, self.ui, repo, qp, None, files)
805
805
806 def refresh(self, repo, msg=None, short=False):
806 def refresh(self, repo, msg=None, short=False):
807 if len(self.applied) == 0:
807 if len(self.applied) == 0:
808 self.ui.write("No patches applied\n")
808 self.ui.write("No patches applied\n")
809 return
809 return
810 wlock = repo.wlock()
810 wlock = repo.wlock()
811 self.check_toppatch(repo)
811 self.check_toppatch(repo)
812 qp = self.qparents(repo)
812 qp = self.qparents(repo)
813 (top, patch) = (self.applied[-1].rev, self.applied[-1].name)
813 (top, patch) = (self.applied[-1].rev, self.applied[-1].name)
814 top = revlog.bin(top)
814 top = revlog.bin(top)
815 cparents = repo.changelog.parents(top)
815 cparents = repo.changelog.parents(top)
816 patchparent = self.qparents(repo, top)
816 patchparent = self.qparents(repo, top)
817 message, comments, user, date, patchfound = self.readheaders(patch)
817 message, comments, user, date, patchfound = self.readheaders(patch)
818
818
819 patchf = self.opener(patch, "w")
819 patchf = self.opener(patch, "w")
820 msg = msg.rstrip()
820 msg = msg.rstrip()
821 if msg:
821 if msg:
822 if comments:
822 if comments:
823 # Remove existing message.
823 # Remove existing message.
824 ci = 0
824 ci = 0
825 for mi in range(len(message)):
825 for mi in range(len(message)):
826 while message[mi] != comments[ci]:
826 while message[mi] != comments[ci]:
827 ci += 1
827 ci += 1
828 del comments[ci]
828 del comments[ci]
829 comments.append(msg)
829 comments.append(msg)
830 if comments:
830 if comments:
831 comments = "\n".join(comments) + '\n\n'
831 comments = "\n".join(comments) + '\n\n'
832 patchf.write(comments)
832 patchf.write(comments)
833
833
834 tip = repo.changelog.tip()
834 tip = repo.changelog.tip()
835 if top == tip:
835 if top == tip:
836 # if the top of our patch queue is also the tip, there is an
836 # if the top of our patch queue is also the tip, there is an
837 # optimization here. We update the dirstate in place and strip
837 # optimization here. We update the dirstate in place and strip
838 # off the tip commit. Then just commit the current directory
838 # off the tip commit. Then just commit the current directory
839 # tree. We can also send repo.commit the list of files
839 # tree. We can also send repo.commit the list of files
840 # changed to speed up the diff
840 # changed to speed up the diff
841 #
841 #
842 # in short mode, we only diff the files included in the
842 # in short mode, we only diff the files included in the
843 # patch already
843 # patch already
844 #
844 #
845 # this should really read:
845 # this should really read:
846 #(cc, dd, aa, aa2, uu) = repo.changes(tip, patchparent)
846 #(cc, dd, aa, aa2, uu) = repo.changes(tip, patchparent)
847 # but we do it backwards to take advantage of manifest/chlog
847 # but we do it backwards to take advantage of manifest/chlog
848 # caching against the next repo.changes call
848 # caching against the next repo.changes call
849 #
849 #
850 (cc, aa, dd, aa2, uu) = repo.changes(patchparent, tip)
850 (cc, aa, dd, aa2, uu) = repo.changes(patchparent, tip)
851 if short:
851 if short:
852 filelist = cc + aa + dd
852 filelist = cc + aa + dd
853 else:
853 else:
854 filelist = None
854 filelist = None
855 (c, a, r, d, u) = repo.changes(None, None, filelist)
855 (c, a, r, d, u) = repo.changes(None, None, filelist)
856
856
857 # we might end up with files that were added between tip and
857 # we might end up with files that were added between tip and
858 # the dirstate parent, but then changed in the local dirstate.
858 # the dirstate parent, but then changed in the local dirstate.
859 # in this case, we want them to only show up in the added section
859 # in this case, we want them to only show up in the added section
860 for x in c:
860 for x in c:
861 if x not in aa:
861 if x not in aa:
862 cc.append(x)
862 cc.append(x)
863 # we might end up with files added by the local dirstate that
863 # we might end up with files added by the local dirstate that
864 # were deleted by the patch. In this case, they should only
864 # were deleted by the patch. In this case, they should only
865 # show up in the changed section.
865 # show up in the changed section.
866 for x in a:
866 for x in a:
867 if x in dd:
867 if x in dd:
868 del dd[dd.index(x)]
868 del dd[dd.index(x)]
869 cc.append(x)
869 cc.append(x)
870 else:
870 else:
871 aa.append(x)
871 aa.append(x)
872 # make sure any files deleted in the local dirstate
872 # make sure any files deleted in the local dirstate
873 # are not in the add or change column of the patch
873 # are not in the add or change column of the patch
874 forget = []
874 forget = []
875 for x in d + r:
875 for x in d + r:
876 if x in aa:
876 if x in aa:
877 del aa[aa.index(x)]
877 del aa[aa.index(x)]
878 forget.append(x)
878 forget.append(x)
879 continue
879 continue
880 elif x in cc:
880 elif x in cc:
881 del cc[cc.index(x)]
881 del cc[cc.index(x)]
882 dd.append(x)
882 dd.append(x)
883
883
884 c = list(util.unique(cc))
884 c = list(util.unique(cc))
885 r = list(util.unique(dd))
885 r = list(util.unique(dd))
886 a = list(util.unique(aa))
886 a = list(util.unique(aa))
887 filelist = list(util.unique(c + r + a ))
887 filelist = list(util.unique(c + r + a ))
888 commands.dodiff(patchf, self.ui, repo, patchparent, None,
888 commands.dodiff(patchf, self.ui, repo, patchparent, None,
889 filelist, changes=(c, a, r, [], u))
889 filelist, changes=(c, a, r, [], u))
890 patchf.close()
890 patchf.close()
891
891
892 changes = repo.changelog.read(tip)
892 changes = repo.changelog.read(tip)
893 repo.dirstate.setparents(*cparents)
893 repo.dirstate.setparents(*cparents)
894 repo.dirstate.update(a, 'a')
894 repo.dirstate.update(a, 'a')
895 repo.dirstate.update(r, 'r')
895 repo.dirstate.update(r, 'r')
896 repo.dirstate.update(c, 'n')
896 repo.dirstate.update(c, 'n')
897 repo.dirstate.forget(forget)
897 repo.dirstate.forget(forget)
898
898
899 if not msg:
899 if not msg:
900 if not message:
900 if not message:
901 message = "patch queue: %s\n" % patch
901 message = "patch queue: %s\n" % patch
902 else:
902 else:
903 message = "\n".join(message)
903 message = "\n".join(message)
904 else:
904 else:
905 message = msg
905 message = msg
906
906
907 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
907 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
908 n = repo.commit(filelist, message, changes[1], force=1, wlock=wlock)
908 n = repo.commit(filelist, message, changes[1], force=1, wlock=wlock)
909 self.applied[-1] = StatusEntry(revlog.hex(n), patch)
909 self.applied[-1] = StatusEntry(revlog.hex(n), patch)
910 self.applied_dirty = 1
910 self.applied_dirty = 1
911 else:
911 else:
912 commands.dodiff(patchf, self.ui, repo, patchparent, None)
912 commands.dodiff(patchf, self.ui, repo, patchparent, None)
913 patchf.close()
913 patchf.close()
914 self.pop(repo, force=True, wlock=wlock)
914 self.pop(repo, force=True, wlock=wlock)
915 self.push(repo, force=True, wlock=wlock)
915 self.push(repo, force=True, wlock=wlock)
916
916
917 def init(self, repo, create=False):
917 def init(self, repo, create=False):
918 if os.path.isdir(self.path):
918 if os.path.isdir(self.path):
919 raise util.Abort(_("patch queue directory already exists"))
919 raise util.Abort(_("patch queue directory already exists"))
920 os.mkdir(self.path)
920 os.mkdir(self.path)
921 if create:
921 if create:
922 return self.qrepo(create=True)
922 return self.qrepo(create=True)
923
923
924 def unapplied(self, repo, patch=None):
924 def unapplied(self, repo, patch=None):
925 if patch and patch not in self.series:
925 if patch and patch not in self.series:
926 raise util.Abort(_("patch %s is not in series file") % patch)
926 raise util.Abort(_("patch %s is not in series file") % patch)
927 if not patch:
927 if not patch:
928 start = self.series_end()
928 start = self.series_end()
929 else:
929 else:
930 start = self.series.index(patch) + 1
930 start = self.series.index(patch) + 1
931 return [(i, self.series[i]) for i in xrange(start, len(self.series))]
931 return [(i, self.series[i]) for i in xrange(start, len(self.series))]
932
932
933 def qseries(self, repo, missing=None, summary=False):
933 def qseries(self, repo, missing=None, summary=False):
934 start = self.series_end()
934 start = self.series_end()
935 if not missing:
935 if not missing:
936 for i in range(len(self.series)):
936 for i in range(len(self.series)):
937 patch = self.series[i]
937 patch = self.series[i]
938 if self.ui.verbose:
938 if self.ui.verbose:
939 if i < start:
939 if i < start:
940 status = 'A'
940 status = 'A'
941 else:
941 else:
942 status = 'U'
942 status = 'U'
943 self.ui.write('%d %s ' % (i, status))
943 self.ui.write('%d %s ' % (i, status))
944 if summary:
944 if summary:
945 msg = self.readheaders(patch)[0]
945 msg = self.readheaders(patch)[0]
946 msg = msg and ': ' + msg[0] or ': '
946 msg = msg and ': ' + msg[0] or ': '
947 else:
947 else:
948 msg = ''
948 msg = ''
949 self.ui.write('%s%s\n' % (patch, msg))
949 self.ui.write('%s%s\n' % (patch, msg))
950 else:
950 else:
951 list = []
951 list = []
952 for root, dirs, files in os.walk(self.path):
952 for root, dirs, files in os.walk(self.path):
953 d = root[len(self.path) + 1:]
953 d = root[len(self.path) + 1:]
954 for f in files:
954 for f in files:
955 fl = os.path.join(d, f)
955 fl = os.path.join(d, f)
956 if (fl not in self.series and
956 if (fl not in self.series and
957 fl not in (self.status_path, self.series_path)
957 fl not in (self.status_path, self.series_path)
958 and not fl.startswith('.')):
958 and not fl.startswith('.')):
959 list.append(fl)
959 list.append(fl)
960 list.sort()
960 list.sort()
961 if list:
961 if list:
962 for x in list:
962 for x in list:
963 if self.ui.verbose:
963 if self.ui.verbose:
964 self.ui.write("D ")
964 self.ui.write("D ")
965 self.ui.write("%s\n" % x)
965 self.ui.write("%s\n" % x)
966
966
967 def issaveline(self, l):
967 def issaveline(self, l):
968 name = l.split(':')[1]
968 name = l.split(':')[1]
969 if name == '.hg.patches.save.line':
969 if name == '.hg.patches.save.line':
970 return True
970 return True
971
971
972 def qrepo(self, create=False):
972 def qrepo(self, create=False):
973 if create or os.path.isdir(os.path.join(self.path, ".hg")):
973 if create or os.path.isdir(os.path.join(self.path, ".hg")):
974 return hg.repository(self.ui, path=self.path, create=create)
974 return hg.repository(self.ui, path=self.path, create=create)
975
975
976 def restore(self, repo, rev, delete=None, qupdate=None):
976 def restore(self, repo, rev, delete=None, qupdate=None):
977 c = repo.changelog.read(rev)
977 c = repo.changelog.read(rev)
978 desc = c[4].strip()
978 desc = c[4].strip()
979 lines = desc.splitlines()
979 lines = desc.splitlines()
980 i = 0
980 i = 0
981 datastart = None
981 datastart = None
982 series = []
982 series = []
983 applied = []
983 applied = []
984 qpp = None
984 qpp = None
985 for i in xrange(0, len(lines)):
985 for i in xrange(0, len(lines)):
986 if lines[i] == 'Patch Data:':
986 if lines[i] == 'Patch Data:':
987 datastart = i + 1
987 datastart = i + 1
988 elif lines[i].startswith('Dirstate:'):
988 elif lines[i].startswith('Dirstate:'):
989 l = lines[i].rstrip()
989 l = lines[i].rstrip()
990 l = l[10:].split(' ')
990 l = l[10:].split(' ')
991 qpp = [ hg.bin(x) for x in l ]
991 qpp = [ hg.bin(x) for x in l ]
992 elif datastart != None:
992 elif datastart != None:
993 l = lines[i].rstrip()
993 l = lines[i].rstrip()
994 se = StatusEntry(l)
994 se = StatusEntry(l)
995 id = se.rev
995 id = se.rev
996 file = se.name
996 file = se.name
997 if id:
997 if id:
998 applied.append(se)
998 applied.append(se)
999 series.append(file)
999 series.append(file)
1000 if datastart == None:
1000 if datastart == None:
1001 self.ui.warn("No saved patch data found\n")
1001 self.ui.warn("No saved patch data found\n")
1002 return 1
1002 return 1
1003 self.ui.warn("restoring status: %s\n" % lines[0])
1003 self.ui.warn("restoring status: %s\n" % lines[0])
1004 self.full_series = series
1004 self.full_series = series
1005 self.applied = applied
1005 self.applied = applied
1006 self.parse_series()
1006 self.parse_series()
1007 self.series_dirty = 1
1007 self.series_dirty = 1
1008 self.applied_dirty = 1
1008 self.applied_dirty = 1
1009 heads = repo.changelog.heads()
1009 heads = repo.changelog.heads()
1010 if delete:
1010 if delete:
1011 if rev not in heads:
1011 if rev not in heads:
1012 self.ui.warn("save entry has children, leaving it alone\n")
1012 self.ui.warn("save entry has children, leaving it alone\n")
1013 else:
1013 else:
1014 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1014 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1015 pp = repo.dirstate.parents()
1015 pp = repo.dirstate.parents()
1016 if rev in pp:
1016 if rev in pp:
1017 update = True
1017 update = True
1018 else:
1018 else:
1019 update = False
1019 update = False
1020 self.strip(repo, rev, update=update, backup='strip')
1020 self.strip(repo, rev, update=update, backup='strip')
1021 if qpp:
1021 if qpp:
1022 self.ui.warn("saved queue repository parents: %s %s\n" %
1022 self.ui.warn("saved queue repository parents: %s %s\n" %
1023 (hg.short(qpp[0]), hg.short(qpp[1])))
1023 (hg.short(qpp[0]), hg.short(qpp[1])))
1024 if qupdate:
1024 if qupdate:
1025 print "queue directory updating"
1025 print "queue directory updating"
1026 r = self.qrepo()
1026 r = self.qrepo()
1027 if not r:
1027 if not r:
1028 self.ui.warn("Unable to load queue repository\n")
1028 self.ui.warn("Unable to load queue repository\n")
1029 return 1
1029 return 1
1030 r.update(qpp[0], allow=False, force=True)
1030 r.update(qpp[0], allow=False, force=True)
1031
1031
1032 def save(self, repo, msg=None):
1032 def save(self, repo, msg=None):
1033 if len(self.applied) == 0:
1033 if len(self.applied) == 0:
1034 self.ui.warn("save: no patches applied, exiting\n")
1034 self.ui.warn("save: no patches applied, exiting\n")
1035 return 1
1035 return 1
1036 if self.issaveline(self.applied[-1]):
1036 if self.issaveline(self.applied[-1]):
1037 self.ui.warn("status is already saved\n")
1037 self.ui.warn("status is already saved\n")
1038 return 1
1038 return 1
1039
1039
1040 ar = [ ':' + x for x in self.full_series ]
1040 ar = [ ':' + x for x in self.full_series ]
1041 if not msg:
1041 if not msg:
1042 msg = "hg patches saved state"
1042 msg = "hg patches saved state"
1043 else:
1043 else:
1044 msg = "hg patches: " + msg.rstrip('\r\n')
1044 msg = "hg patches: " + msg.rstrip('\r\n')
1045 r = self.qrepo()
1045 r = self.qrepo()
1046 if r:
1046 if r:
1047 pp = r.dirstate.parents()
1047 pp = r.dirstate.parents()
1048 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1048 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1049 msg += "\n\nPatch Data:\n"
1049 msg += "\n\nPatch Data:\n"
1050 text = msg + "\n".join(str(self.applied)) + '\n' + (ar and "\n".join(ar)
1050 text = msg + "\n".join(str(self.applied)) + '\n' + (ar and "\n".join(ar)
1051 + '\n' or "")
1051 + '\n' or "")
1052 n = repo.commit(None, text, user=None, force=1)
1052 n = repo.commit(None, text, user=None, force=1)
1053 if not n:
1053 if not n:
1054 self.ui.warn("repo commit failed\n")
1054 self.ui.warn("repo commit failed\n")
1055 return 1
1055 return 1
1056 self.applied.append(StatusEntry(revlog.hex(n),'.hg.patches.save.line'))
1056 self.applied.append(StatusEntry(revlog.hex(n),'.hg.patches.save.line'))
1057 self.applied_dirty = 1
1057 self.applied_dirty = 1
1058
1058
1059 def full_series_end(self):
1059 def full_series_end(self):
1060 if len(self.applied) > 0:
1060 if len(self.applied) > 0:
1061 p = self.applied[-1].name
1061 p = self.applied[-1].name
1062 end = self.find_series(p)
1062 end = self.find_series(p)
1063 if end == None:
1063 if end == None:
1064 return len(self.full_series)
1064 return len(self.full_series)
1065 return end + 1
1065 return end + 1
1066 return 0
1066 return 0
1067
1067
1068 def series_end(self):
1068 def series_end(self):
1069 end = 0
1069 end = 0
1070 if len(self.applied) > 0:
1070 if len(self.applied) > 0:
1071 p = self.applied[-1].name
1071 p = self.applied[-1].name
1072 try:
1072 try:
1073 end = self.series.index(p)
1073 end = self.series.index(p)
1074 except ValueError:
1074 except ValueError:
1075 return 0
1075 return 0
1076 return end + 1
1076 return end + 1
1077 return end
1077 return end
1078
1078
1079 def qapplied(self, repo, patch=None):
1079 def qapplied(self, repo, patch=None):
1080 if patch and patch not in self.series:
1080 if patch and patch not in self.series:
1081 raise util.Abort(_("patch %s is not in series file") % patch)
1081 raise util.Abort(_("patch %s is not in series file") % patch)
1082 if not patch:
1082 if not patch:
1083 end = len(self.applied)
1083 end = len(self.applied)
1084 else:
1084 else:
1085 end = self.series.index(patch) + 1
1085 end = self.series.index(patch) + 1
1086 for x in xrange(end):
1086 for x in xrange(end):
1087 p = self.appliedname(x)
1087 p = self.appliedname(x)
1088 self.ui.write("%s\n" % p)
1088 self.ui.write("%s\n" % p)
1089
1089
1090 def appliedname(self, index):
1090 def appliedname(self, index):
1091 pname = self.applied[index].name
1091 pname = self.applied[index].name
1092 if not self.ui.verbose:
1092 if not self.ui.verbose:
1093 p = pname
1093 p = pname
1094 else:
1094 else:
1095 p = str(self.series.index(pname)) + " " + p
1095 p = str(self.series.index(pname)) + " " + p
1096 return p
1096 return p
1097
1097
1098 def top(self, repo):
1098 def top(self, repo):
1099 if len(self.applied):
1099 if len(self.applied):
1100 p = self.appliedname(-1)
1100 p = self.appliedname(-1)
1101 self.ui.write(p + '\n')
1101 self.ui.write(p + '\n')
1102 else:
1102 else:
1103 self.ui.write("No patches applied\n")
1103 self.ui.write("No patches applied\n")
1104
1104
1105 def next(self, repo):
1105 def next(self, repo):
1106 end = self.series_end()
1106 end = self.series_end()
1107 if end == len(self.series):
1107 if end == len(self.series):
1108 self.ui.write("All patches applied\n")
1108 self.ui.write("All patches applied\n")
1109 else:
1109 else:
1110 p = self.series[end]
1110 p = self.series[end]
1111 if self.ui.verbose:
1111 if self.ui.verbose:
1112 self.ui.write("%d " % self.series.index(p))
1112 self.ui.write("%d " % self.series.index(p))
1113 self.ui.write(p + '\n')
1113 self.ui.write(p + '\n')
1114
1114
1115 def prev(self, repo):
1115 def prev(self, repo):
1116 if len(self.applied) > 1:
1116 if len(self.applied) > 1:
1117 p = self.appliedname(-2)
1117 p = self.appliedname(-2)
1118 self.ui.write(p + '\n')
1118 self.ui.write(p + '\n')
1119 elif len(self.applied) == 1:
1119 elif len(self.applied) == 1:
1120 self.ui.write("Only one patch applied\n")
1120 self.ui.write("Only one patch applied\n")
1121 else:
1121 else:
1122 self.ui.write("No patches applied\n")
1122 self.ui.write("No patches applied\n")
1123
1123
1124 def qimport(self, repo, files, patch=None, existing=None, force=None):
1124 def qimport(self, repo, files, patch=None, existing=None, force=None):
1125 if len(files) > 1 and patch:
1125 if len(files) > 1 and patch:
1126 raise util.Abort(_('option "-n" not valid when importing multiple '
1126 raise util.Abort(_('option "-n" not valid when importing multiple '
1127 'files'))
1127 'files'))
1128 i = 0
1128 i = 0
1129 added = []
1129 added = []
1130 for filename in files:
1130 for filename in files:
1131 if existing:
1131 if existing:
1132 if not patch:
1132 if not patch:
1133 patch = filename
1133 patch = filename
1134 if not os.path.isfile(os.path.join(self.path, patch)):
1134 if not os.path.isfile(os.path.join(self.path, patch)):
1135 raise util.Abort(_("patch %s does not exist") % patch)
1135 raise util.Abort(_("patch %s does not exist") % patch)
1136 else:
1136 else:
1137 try:
1137 try:
1138 text = file(filename).read()
1138 text = file(filename).read()
1139 except IOError:
1139 except IOError:
1140 raise util.Abort(_("unable to read %s") % patch)
1140 raise util.Abort(_("unable to read %s") % patch)
1141 if not patch:
1141 if not patch:
1142 patch = os.path.split(filename)[1]
1142 patch = os.path.split(filename)[1]
1143 if not force and os.path.exists(os.path.join(self.path, patch)):
1143 if not force and os.path.exists(os.path.join(self.path, patch)):
1144 raise util.Abort(_('patch "%s" already exists') % patch)
1144 raise util.Abort(_('patch "%s" already exists') % patch)
1145 patchf = self.opener(patch, "w")
1145 patchf = self.opener(patch, "w")
1146 patchf.write(text)
1146 patchf.write(text)
1147 if patch in self.series:
1147 if patch in self.series:
1148 raise util.Abort(_('patch %s is already in the series file')
1148 raise util.Abort(_('patch %s is already in the series file')
1149 % patch)
1149 % patch)
1150 index = self.full_series_end() + i
1150 index = self.full_series_end() + i
1151 self.full_series[index:index] = [patch]
1151 self.full_series[index:index] = [patch]
1152 self.parse_series()
1152 self.parse_series()
1153 self.ui.warn("adding %s to series file\n" % patch)
1153 self.ui.warn("adding %s to series file\n" % patch)
1154 i += 1
1154 i += 1
1155 added.append(patch)
1155 added.append(patch)
1156 patch = None
1156 patch = None
1157 self.series_dirty = 1
1157 self.series_dirty = 1
1158 qrepo = self.qrepo()
1158 qrepo = self.qrepo()
1159 if qrepo:
1159 if qrepo:
1160 qrepo.add(added)
1160 qrepo.add(added)
1161
1161
1162 def delete(ui, repo, patch, **opts):
1162 def delete(ui, repo, patch, **opts):
1163 """remove a patch from the series file
1163 """remove a patch from the series file
1164
1164
1165 The patch must not be applied.
1165 The patch must not be applied.
1166 With -f, deletes the patch file as well as the series entry."""
1166 With -f, deletes the patch file as well as the series entry."""
1167 q = repo.mq
1167 q = repo.mq
1168 q.delete(repo, patch, force=opts.get('force'))
1168 q.delete(repo, patch, force=opts.get('force'))
1169 q.save_dirty()
1169 q.save_dirty()
1170 return 0
1170 return 0
1171
1171
1172 def applied(ui, repo, patch=None, **opts):
1172 def applied(ui, repo, patch=None, **opts):
1173 """print the patches already applied"""
1173 """print the patches already applied"""
1174 repo.mq.qapplied(repo, patch)
1174 repo.mq.qapplied(repo, patch)
1175 return 0
1175 return 0
1176
1176
1177 def unapplied(ui, repo, patch=None, **opts):
1177 def unapplied(ui, repo, patch=None, **opts):
1178 """print the patches not yet applied"""
1178 """print the patches not yet applied"""
1179 for i, p in repo.mq.unapplied(repo, patch):
1179 for i, p in repo.mq.unapplied(repo, patch):
1180 if ui.verbose:
1180 if ui.verbose:
1181 ui.write("%d " % i)
1181 ui.write("%d " % i)
1182 ui.write("%s\n" % p)
1182 ui.write("%s\n" % p)
1183
1183
1184 def qimport(ui, repo, *filename, **opts):
1184 def qimport(ui, repo, *filename, **opts):
1185 """import a patch"""
1185 """import a patch"""
1186 q = repo.mq
1186 q = repo.mq
1187 q.qimport(repo, filename, patch=opts['name'],
1187 q.qimport(repo, filename, patch=opts['name'],
1188 existing=opts['existing'], force=opts['force'])
1188 existing=opts['existing'], force=opts['force'])
1189 q.save_dirty()
1189 q.save_dirty()
1190 return 0
1190 return 0
1191
1191
1192 def init(ui, repo, **opts):
1192 def init(ui, repo, **opts):
1193 """init a new queue repository
1193 """init a new queue repository
1194
1194
1195 The queue repository is unversioned by default. If -c is
1195 The queue repository is unversioned by default. If -c is
1196 specified, qinit will create a separate nested repository
1196 specified, qinit will create a separate nested repository
1197 for patches. Use qcommit to commit changes to this queue
1197 for patches. Use qcommit to commit changes to this queue
1198 repository."""
1198 repository."""
1199 q = repo.mq
1199 q = repo.mq
1200 r = q.init(repo, create=opts['create_repo'])
1200 r = q.init(repo, create=opts['create_repo'])
1201 q.save_dirty()
1201 q.save_dirty()
1202 if r:
1202 if r:
1203 fp = r.wopener('.hgignore', 'w')
1203 fp = r.wopener('.hgignore', 'w')
1204 print >> fp, 'syntax: glob'
1204 print >> fp, 'syntax: glob'
1205 print >> fp, 'status'
1205 print >> fp, 'status'
1206 fp.close()
1206 fp.close()
1207 r.wopener('series', 'w').close()
1207 r.wopener('series', 'w').close()
1208 r.add(['.hgignore', 'series'])
1208 r.add(['.hgignore', 'series'])
1209 return 0
1209 return 0
1210
1210
1211 def clone(ui, source, dest=None, **opts):
1211 def clone(ui, source, dest=None, **opts):
1212 '''clone main and patch repository at same time
1212 '''clone main and patch repository at same time
1213
1213
1214 If source is local, destination will have no patches applied. If
1214 If source is local, destination will have no patches applied. If
1215 source is remote, this command can not check if patches are
1215 source is remote, this command can not check if patches are
1216 applied in source, so cannot guarantee that patches are not
1216 applied in source, so cannot guarantee that patches are not
1217 applied in destination. If you clone remote repository, be sure
1217 applied in destination. If you clone remote repository, be sure
1218 before that it has no patches applied.
1218 before that it has no patches applied.
1219
1219
1220 Source patch repository is looked for in <src>/.hg/patches by
1220 Source patch repository is looked for in <src>/.hg/patches by
1221 default. Use -p <url> to change.
1221 default. Use -p <url> to change.
1222 '''
1222 '''
1223 commands.setremoteconfig(ui, opts)
1223 commands.setremoteconfig(ui, opts)
1224 if dest is None:
1224 if dest is None:
1225 dest = hg.defaultdest(source)
1225 dest = hg.defaultdest(source)
1226 sr = hg.repository(ui, ui.expandpath(source))
1226 sr = hg.repository(ui, ui.expandpath(source))
1227 qbase, destrev = None, None
1227 qbase, destrev = None, None
1228 if sr.local():
1228 if sr.local():
1229 reposetup(ui, sr)
1229 reposetup(ui, sr)
1230 if sr.mq.applied:
1230 if sr.mq.applied:
1231 qbase = revlog.bin(sr.mq.applied[0].rev)
1231 qbase = revlog.bin(sr.mq.applied[0].rev)
1232 if not hg.islocal(dest):
1232 if not hg.islocal(dest):
1233 destrev = sr.parents(qbase)[0]
1233 destrev = sr.parents(qbase)[0]
1234 ui.note(_('cloning main repo\n'))
1234 ui.note(_('cloning main repo\n'))
1235 sr, dr = hg.clone(ui, sr, dest,
1235 sr, dr = hg.clone(ui, sr, dest,
1236 pull=opts['pull'],
1236 pull=opts['pull'],
1237 rev=destrev,
1237 rev=destrev,
1238 update=False,
1238 update=False,
1239 stream=opts['uncompressed'])
1239 stream=opts['uncompressed'])
1240 ui.note(_('cloning patch repo\n'))
1240 ui.note(_('cloning patch repo\n'))
1241 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1241 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1242 dr.url() + '/.hg/patches',
1242 dr.url() + '/.hg/patches',
1243 pull=opts['pull'],
1243 pull=opts['pull'],
1244 update=not opts['noupdate'],
1244 update=not opts['noupdate'],
1245 stream=opts['uncompressed'])
1245 stream=opts['uncompressed'])
1246 if dr.local():
1246 if dr.local():
1247 if qbase:
1247 if qbase:
1248 ui.note(_('stripping applied patches from destination repo\n'))
1248 ui.note(_('stripping applied patches from destination repo\n'))
1249 reposetup(ui, dr)
1249 reposetup(ui, dr)
1250 dr.mq.strip(dr, qbase, update=False, backup=None)
1250 dr.mq.strip(dr, qbase, update=False, backup=None)
1251 if not opts['noupdate']:
1251 if not opts['noupdate']:
1252 ui.note(_('updating destination repo\n'))
1252 ui.note(_('updating destination repo\n'))
1253 dr.update(dr.changelog.tip())
1253 dr.update(dr.changelog.tip())
1254
1254
1255 def commit(ui, repo, *pats, **opts):
1255 def commit(ui, repo, *pats, **opts):
1256 """commit changes in the queue repository"""
1256 """commit changes in the queue repository"""
1257 q = repo.mq
1257 q = repo.mq
1258 r = q.qrepo()
1258 r = q.qrepo()
1259 if not r: raise util.Abort('no queue repository')
1259 if not r: raise util.Abort('no queue repository')
1260 commands.commit(r.ui, r, *pats, **opts)
1260 commands.commit(r.ui, r, *pats, **opts)
1261
1261
1262 def series(ui, repo, **opts):
1262 def series(ui, repo, **opts):
1263 """print the entire series file"""
1263 """print the entire series file"""
1264 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1264 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1265 return 0
1265 return 0
1266
1266
1267 def top(ui, repo, **opts):
1267 def top(ui, repo, **opts):
1268 """print the name of the current patch"""
1268 """print the name of the current patch"""
1269 repo.mq.top(repo)
1269 repo.mq.top(repo)
1270 return 0
1270 return 0
1271
1271
1272 def next(ui, repo, **opts):
1272 def next(ui, repo, **opts):
1273 """print the name of the next patch"""
1273 """print the name of the next patch"""
1274 repo.mq.next(repo)
1274 repo.mq.next(repo)
1275 return 0
1275 return 0
1276
1276
1277 def prev(ui, repo, **opts):
1277 def prev(ui, repo, **opts):
1278 """print the name of the previous patch"""
1278 """print the name of the previous patch"""
1279 repo.mq.prev(repo)
1279 repo.mq.prev(repo)
1280 return 0
1280 return 0
1281
1281
1282 def new(ui, repo, patch, **opts):
1282 def new(ui, repo, patch, **opts):
1283 """create a new patch
1283 """create a new patch
1284
1284
1285 qnew creates a new patch on top of the currently-applied patch
1285 qnew creates a new patch on top of the currently-applied patch
1286 (if any). It will refuse to run if there are any outstanding
1286 (if any). It will refuse to run if there are any outstanding
1287 changes unless -f is specified, in which case the patch will
1287 changes unless -f is specified, in which case the patch will
1288 be initialised with them.
1288 be initialised with them.
1289
1289
1290 -m or -l set the patch header as well as the commit message.
1290 -m or -l set the patch header as well as the commit message.
1291 If neither is specified, the patch header is empty and the
1291 If neither is specified, the patch header is empty and the
1292 commit message is 'New patch: PATCH'"""
1292 commit message is 'New patch: PATCH'"""
1293 q = repo.mq
1293 q = repo.mq
1294 message=commands.logmessage(**opts)
1294 message=commands.logmessage(**opts)
1295 q.new(repo, patch, msg=message, force=opts['force'])
1295 q.new(repo, patch, msg=message, force=opts['force'])
1296 q.save_dirty()
1296 q.save_dirty()
1297 return 0
1297 return 0
1298
1298
1299 def refresh(ui, repo, **opts):
1299 def refresh(ui, repo, **opts):
1300 """update the current patch"""
1300 """update the current patch"""
1301 q = repo.mq
1301 q = repo.mq
1302 message=commands.logmessage(**opts)
1302 message=commands.logmessage(**opts)
1303 if opts['edit']:
1303 if opts['edit']:
1304 if message:
1304 if message:
1305 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1305 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1306 patch = q.applied[-1].name
1306 patch = q.applied[-1].name
1307 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1307 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1308 message = ui.edit('\n'.join(message), user or ui.username())
1308 message = ui.edit('\n'.join(message), user or ui.username())
1309 q.refresh(repo, msg=message, short=opts['short'])
1309 q.refresh(repo, msg=message, short=opts['short'])
1310 q.save_dirty()
1310 q.save_dirty()
1311 return 0
1311 return 0
1312
1312
1313 def diff(ui, repo, *files, **opts):
1313 def diff(ui, repo, *files, **opts):
1314 """diff of the current patch"""
1314 """diff of the current patch"""
1315 # deep in the dirstate code, the walkhelper method wants a list, not a tuple
1315 # deep in the dirstate code, the walkhelper method wants a list, not a tuple
1316 repo.mq.diff(repo, list(files))
1316 repo.mq.diff(repo, list(files))
1317 return 0
1317 return 0
1318
1318
1319 def fold(ui, repo, *files, **opts):
1319 def fold(ui, repo, *files, **opts):
1320 """fold the named patches into the current patch
1320 """fold the named patches into the current patch
1321
1321
1322 Patches must not yet be applied. Each patch will be successively
1322 Patches must not yet be applied. Each patch will be successively
1323 applied to the current patch in the order given. If all the
1323 applied to the current patch in the order given. If all the
1324 patches apply successfully, the current patch will be refreshed
1324 patches apply successfully, the current patch will be refreshed
1325 with the new cumulative patch, and the folded patches will
1325 with the new cumulative patch, and the folded patches will
1326 be deleted. With -f/--force, the folded patch files will
1326 be deleted. With -f/--force, the folded patch files will
1327 be removed afterwards.
1327 be removed afterwards.
1328
1328
1329 The header for each folded patch will be concatenated with
1329 The header for each folded patch will be concatenated with
1330 the current patch header, separated by a line of '* * *'."""
1330 the current patch header, separated by a line of '* * *'."""
1331
1331
1332 q = repo.mq
1332 q = repo.mq
1333
1333
1334 if not files:
1334 if not files:
1335 raise util.Abort(_('qfold requires at least one patch name'))
1335 raise util.Abort(_('qfold requires at least one patch name'))
1336 if not q.check_toppatch(repo):
1336 if not q.check_toppatch(repo):
1337 raise util.Abort(_('No patches applied\n'))
1337 raise util.Abort(_('No patches applied\n'))
1338
1338
1339 message=commands.logmessage(**opts)
1339 message=commands.logmessage(**opts)
1340 if opts['edit']:
1340 if opts['edit']:
1341 if message:
1341 if message:
1342 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1342 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1343
1343
1344 parent = q.lookup('qtip')
1344 parent = q.lookup('qtip')
1345 patches = []
1345 patches = []
1346 messages = []
1346 messages = []
1347 for f in files:
1347 for f in files:
1348 patch = q.lookup(f)
1348 patch = q.lookup(f)
1349 if patch in patches or patch == parent:
1349 if patch in patches or patch == parent:
1350 self.ui.warn(_('Skipping already folded patch %s') % patch)
1350 self.ui.warn(_('Skipping already folded patch %s') % patch)
1351 if q.isapplied(patch):
1351 if q.isapplied(patch):
1352 raise util.Abort(_('qfold cannot fold already applied patch %s') % patch)
1352 raise util.Abort(_('qfold cannot fold already applied patch %s') % patch)
1353 patches.append(patch)
1353 patches.append(patch)
1354
1354
1355 for patch in patches:
1355 for patch in patches:
1356 if not message:
1356 if not message:
1357 messages.append(q.readheaders(patch)[0])
1357 messages.append(q.readheaders(patch)[0])
1358 pf = os.path.join(q.path, patch)
1358 pf = os.path.join(q.path, patch)
1359 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1359 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1360 if not patchsuccess:
1360 if not patchsuccess:
1361 raise util.Abort(_('Error folding patch %s') % patch)
1361 raise util.Abort(_('Error folding patch %s') % patch)
1362
1362
1363 if not message:
1363 if not message:
1364 message, comments, user = q.readheaders(parent)[0:3]
1364 message, comments, user = q.readheaders(parent)[0:3]
1365 for msg in messages:
1365 for msg in messages:
1366 message.append('* * *')
1366 message.append('* * *')
1367 message.extend(msg)
1367 message.extend(msg)
1368 message = '\n'.join(message)
1368 message = '\n'.join(message)
1369
1369
1370 if opts['edit']:
1370 if opts['edit']:
1371 message = ui.edit(message, user or ui.username())
1371 message = ui.edit(message, user or ui.username())
1372
1372
1373 q.refresh(repo, msg=message)
1373 q.refresh(repo, msg=message)
1374
1374
1375 for patch in patches:
1375 for patch in patches:
1376 q.delete(repo, patch, force=opts['force'])
1376 q.delete(repo, patch, force=opts['force'])
1377
1377
1378 q.save_dirty()
1378 q.save_dirty()
1379
1379
1380 def header(ui, repo, patch=None):
1380 def header(ui, repo, patch=None):
1381 """Print the header of the topmost or specified patch"""
1381 """Print the header of the topmost or specified patch"""
1382 q = repo.mq
1382 q = repo.mq
1383
1383
1384 if patch:
1384 if patch:
1385 patch = q.lookup(patch)
1385 patch = q.lookup(patch)
1386 else:
1386 else:
1387 if not q.applied:
1387 if not q.applied:
1388 ui.write('No patches applied\n')
1388 ui.write('No patches applied\n')
1389 return
1389 return
1390 patch = q.lookup('qtip')
1390 patch = q.lookup('qtip')
1391 message = repo.mq.readheaders(patch)[0]
1391 message = repo.mq.readheaders(patch)[0]
1392
1392
1393 ui.write('\n'.join(message) + '\n')
1393 ui.write('\n'.join(message) + '\n')
1394
1394
1395 def lastsavename(path):
1395 def lastsavename(path):
1396 (dir, base) = os.path.split(path)
1396 (dir, base) = os.path.split(path)
1397 names = os.listdir(dir)
1397 names = os.listdir(dir)
1398 namere = re.compile("%s.([0-9]+)" % base)
1398 namere = re.compile("%s.([0-9]+)" % base)
1399 max = None
1399 max = None
1400 maxname = None
1400 maxname = None
1401 for f in names:
1401 for f in names:
1402 m = namere.match(f)
1402 m = namere.match(f)
1403 if m:
1403 if m:
1404 index = int(m.group(1))
1404 index = int(m.group(1))
1405 if max == None or index > max:
1405 if max == None or index > max:
1406 max = index
1406 max = index
1407 maxname = f
1407 maxname = f
1408 if maxname:
1408 if maxname:
1409 return (os.path.join(dir, maxname), max)
1409 return (os.path.join(dir, maxname), max)
1410 return (None, None)
1410 return (None, None)
1411
1411
1412 def savename(path):
1412 def savename(path):
1413 (last, index) = lastsavename(path)
1413 (last, index) = lastsavename(path)
1414 if last is None:
1414 if last is None:
1415 index = 0
1415 index = 0
1416 newpath = path + ".%d" % (index + 1)
1416 newpath = path + ".%d" % (index + 1)
1417 return newpath
1417 return newpath
1418
1418
1419 def push(ui, repo, patch=None, **opts):
1419 def push(ui, repo, patch=None, **opts):
1420 """push the next patch onto the stack"""
1420 """push the next patch onto the stack"""
1421 q = repo.mq
1421 q = repo.mq
1422 mergeq = None
1422 mergeq = None
1423
1423
1424 if opts['all']:
1424 if opts['all']:
1425 patch = q.series[-1]
1425 patch = q.series[-1]
1426 if opts['merge']:
1426 if opts['merge']:
1427 if opts['name']:
1427 if opts['name']:
1428 newpath = opts['name']
1428 newpath = opts['name']
1429 else:
1429 else:
1430 newpath, i = lastsavename(q.path)
1430 newpath, i = lastsavename(q.path)
1431 if not newpath:
1431 if not newpath:
1432 ui.warn("no saved queues found, please use -n\n")
1432 ui.warn("no saved queues found, please use -n\n")
1433 return 1
1433 return 1
1434 mergeq = queue(ui, repo.join(""), newpath)
1434 mergeq = queue(ui, repo.join(""), newpath)
1435 ui.warn("merging with queue at: %s\n" % mergeq.path)
1435 ui.warn("merging with queue at: %s\n" % mergeq.path)
1436 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1436 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1437 mergeq=mergeq)
1437 mergeq=mergeq)
1438 q.save_dirty()
1438 q.save_dirty()
1439 return ret
1439 return ret
1440
1440
1441 def pop(ui, repo, patch=None, **opts):
1441 def pop(ui, repo, patch=None, **opts):
1442 """pop the current patch off the stack"""
1442 """pop the current patch off the stack"""
1443 localupdate = True
1443 localupdate = True
1444 if opts['name']:
1444 if opts['name']:
1445 q = queue(ui, repo.join(""), repo.join(opts['name']))
1445 q = queue(ui, repo.join(""), repo.join(opts['name']))
1446 ui.warn('using patch queue: %s\n' % q.path)
1446 ui.warn('using patch queue: %s\n' % q.path)
1447 localupdate = False
1447 localupdate = False
1448 else:
1448 else:
1449 q = repo.mq
1449 q = repo.mq
1450 q.pop(repo, patch, force=opts['force'], update=localupdate, all=opts['all'])
1450 q.pop(repo, patch, force=opts['force'], update=localupdate, all=opts['all'])
1451 q.save_dirty()
1451 q.save_dirty()
1452 return 0
1452 return 0
1453
1453
1454 def rename(ui, repo, patch, name=None, **opts):
1454 def rename(ui, repo, patch, name=None, **opts):
1455 """rename a patch
1455 """rename a patch
1456
1456
1457 With one argument, renames the current patch to PATCH1.
1457 With one argument, renames the current patch to PATCH1.
1458 With two arguments, renames PATCH1 to PATCH2."""
1458 With two arguments, renames PATCH1 to PATCH2."""
1459
1459
1460 q = repo.mq
1460 q = repo.mq
1461
1461
1462 if not name:
1462 if not name:
1463 name = patch
1463 name = patch
1464 patch = None
1464 patch = None
1465
1465
1466 if name in q.series:
1466 if name in q.series:
1467 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1467 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1468
1468
1469 absdest = os.path.join(q.path, name)
1469 absdest = os.path.join(q.path, name)
1470 if os.path.exists(absdest):
1470 if os.path.exists(absdest):
1471 raise util.Abort(_('%s already exists') % absdest)
1471 raise util.Abort(_('%s already exists') % absdest)
1472
1472
1473 if patch:
1473 if patch:
1474 patch = q.lookup(patch)
1474 patch = q.lookup(patch)
1475 else:
1475 else:
1476 if not q.applied:
1476 if not q.applied:
1477 ui.write(_('No patches applied\n'))
1477 ui.write(_('No patches applied\n'))
1478 return
1478 return
1479 patch = q.lookup('qtip')
1479 patch = q.lookup('qtip')
1480
1480
1481 if ui.verbose:
1481 if ui.verbose:
1482 ui.write('Renaming %s to %s\n' % (patch, name))
1482 ui.write('Renaming %s to %s\n' % (patch, name))
1483 i = q.find_series(patch)
1483 i = q.find_series(patch)
1484 q.full_series[i] = name
1484 q.full_series[i] = name
1485 q.parse_series()
1485 q.parse_series()
1486 q.series_dirty = 1
1486 q.series_dirty = 1
1487
1487
1488 info = q.isapplied(patch)
1488 info = q.isapplied(patch)
1489 if info:
1489 if info:
1490 q.applied[info[0]] = StatusEntry(info[1], name)
1490 q.applied[info[0]] = StatusEntry(info[1], name)
1491 q.applied_dirty = 1
1491 q.applied_dirty = 1
1492
1492
1493 util.rename(os.path.join(q.path, patch), absdest)
1493 util.rename(os.path.join(q.path, patch), absdest)
1494 r = q.qrepo()
1494 r = q.qrepo()
1495 if r:
1495 if r:
1496 wlock = r.wlock()
1496 wlock = r.wlock()
1497 if r.dirstate.state(name) == 'r':
1497 if r.dirstate.state(name) == 'r':
1498 r.undelete([name], wlock)
1498 r.undelete([name], wlock)
1499 r.copy(patch, name, wlock)
1499 r.copy(patch, name, wlock)
1500 r.remove([patch], False, wlock)
1500 r.remove([patch], False, wlock)
1501
1501
1502 q.save_dirty()
1502 q.save_dirty()
1503
1503
1504 def restore(ui, repo, rev, **opts):
1504 def restore(ui, repo, rev, **opts):
1505 """restore the queue state saved by a rev"""
1505 """restore the queue state saved by a rev"""
1506 rev = repo.lookup(rev)
1506 rev = repo.lookup(rev)
1507 q = repo.mq
1507 q = repo.mq
1508 q.restore(repo, rev, delete=opts['delete'],
1508 q.restore(repo, rev, delete=opts['delete'],
1509 qupdate=opts['update'])
1509 qupdate=opts['update'])
1510 q.save_dirty()
1510 q.save_dirty()
1511 return 0
1511 return 0
1512
1512
1513 def save(ui, repo, **opts):
1513 def save(ui, repo, **opts):
1514 """save current queue state"""
1514 """save current queue state"""
1515 q = repo.mq
1515 q = repo.mq
1516 message=commands.logmessage(**opts)
1516 message=commands.logmessage(**opts)
1517 ret = q.save(repo, msg=message)
1517 ret = q.save(repo, msg=message)
1518 if ret:
1518 if ret:
1519 return ret
1519 return ret
1520 q.save_dirty()
1520 q.save_dirty()
1521 if opts['copy']:
1521 if opts['copy']:
1522 path = q.path
1522 path = q.path
1523 if opts['name']:
1523 if opts['name']:
1524 newpath = os.path.join(q.basepath, opts['name'])
1524 newpath = os.path.join(q.basepath, opts['name'])
1525 if os.path.exists(newpath):
1525 if os.path.exists(newpath):
1526 if not os.path.isdir(newpath):
1526 if not os.path.isdir(newpath):
1527 raise util.Abort(_('destination %s exists and is not '
1527 raise util.Abort(_('destination %s exists and is not '
1528 'a directory') % newpath)
1528 'a directory') % newpath)
1529 if not opts['force']:
1529 if not opts['force']:
1530 raise util.Abort(_('destination %s exists, '
1530 raise util.Abort(_('destination %s exists, '
1531 'use -f to force') % newpath)
1531 'use -f to force') % newpath)
1532 else:
1532 else:
1533 newpath = savename(path)
1533 newpath = savename(path)
1534 ui.warn("copy %s to %s\n" % (path, newpath))
1534 ui.warn("copy %s to %s\n" % (path, newpath))
1535 util.copyfiles(path, newpath)
1535 util.copyfiles(path, newpath)
1536 if opts['empty']:
1536 if opts['empty']:
1537 try:
1537 try:
1538 os.unlink(os.path.join(q.path, q.status_path))
1538 os.unlink(os.path.join(q.path, q.status_path))
1539 except:
1539 except:
1540 pass
1540 pass
1541 return 0
1541 return 0
1542
1542
1543 def strip(ui, repo, rev, **opts):
1543 def strip(ui, repo, rev, **opts):
1544 """strip a revision and all later revs on the same branch"""
1544 """strip a revision and all later revs on the same branch"""
1545 rev = repo.lookup(rev)
1545 rev = repo.lookup(rev)
1546 backup = 'all'
1546 backup = 'all'
1547 if opts['backup']:
1547 if opts['backup']:
1548 backup = 'strip'
1548 backup = 'strip'
1549 elif opts['nobackup']:
1549 elif opts['nobackup']:
1550 backup = 'none'
1550 backup = 'none'
1551 repo.mq.strip(repo, rev, backup=backup)
1551 repo.mq.strip(repo, rev, backup=backup)
1552 return 0
1552 return 0
1553
1553
1554 def version(ui, q=None):
1554 def version(ui, q=None):
1555 """print the version number of the mq extension"""
1555 """print the version number of the mq extension"""
1556 ui.write("mq version %s\n" % versionstr)
1556 ui.write("mq version %s\n" % versionstr)
1557 return 0
1557 return 0
1558
1558
1559 def reposetup(ui, repo):
1559 def reposetup(ui, repo):
1560 class MqRepo(repo.__class__):
1560 class MqRepo(repo.__class__):
1561 def tags(self):
1561 def tags(self):
1562 if self.tagscache:
1562 if self.tagscache:
1563 return self.tagscache
1563 return self.tagscache
1564
1564
1565 tagscache = super(MqRepo, self).tags()
1565 tagscache = super(MqRepo, self).tags()
1566
1566
1567 q = self.mq
1567 q = self.mq
1568 if not q.applied:
1568 if not q.applied:
1569 return tagscache
1569 return tagscache
1570
1570
1571 mqtags = [(patch.rev, patch.name) for patch in q.applied]
1571 mqtags = [(patch.rev, patch.name) for patch in q.applied]
1572 mqtags.append((mqtags[-1][0], 'qtip'))
1572 mqtags.append((mqtags[-1][0], 'qtip'))
1573 mqtags.append((mqtags[0][0], 'qbase'))
1573 mqtags.append((mqtags[0][0], 'qbase'))
1574 for patch in mqtags:
1574 for patch in mqtags:
1575 if patch[1] in tagscache:
1575 if patch[1] in tagscache:
1576 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
1576 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
1577 else:
1577 else:
1578 tagscache[patch[1]] = revlog.bin(patch[0])
1578 tagscache[patch[1]] = revlog.bin(patch[0])
1579
1579
1580 return tagscache
1580 return tagscache
1581
1581
1582 repo.__class__ = MqRepo
1582 repo.__class__ = MqRepo
1583 repo.mq = queue(ui, repo.join(""))
1583 repo.mq = queue(ui, repo.join(""))
1584
1584
1585 cmdtable = {
1585 cmdtable = {
1586 "qapplied": (applied, [], 'hg qapplied [PATCH]'),
1586 "qapplied": (applied, [], 'hg qapplied [PATCH]'),
1587 "qclone": (clone,
1587 "qclone": (clone,
1588 [('', 'pull', None, _('use pull protocol to copy metadata')),
1588 [('', 'pull', None, _('use pull protocol to copy metadata')),
1589 ('U', 'noupdate', None, _('do not update the new working directories')),
1589 ('U', 'noupdate', None, _('do not update the new working directories')),
1590 ('', 'uncompressed', None,
1590 ('', 'uncompressed', None,
1591 _('use uncompressed transfer (fast over LAN)')),
1591 _('use uncompressed transfer (fast over LAN)')),
1592 ('e', 'ssh', '', _('specify ssh command to use')),
1592 ('e', 'ssh', '', _('specify ssh command to use')),
1593 ('p', 'patches', '', _('location of source patch repo')),
1593 ('p', 'patches', '', _('location of source patch repo')),
1594 ('', 'remotecmd', '',
1594 ('', 'remotecmd', '',
1595 _('specify hg command to run on the remote side'))],
1595 _('specify hg command to run on the remote side'))],
1596 'hg qclone [OPTION]... SOURCE [DEST]'),
1596 'hg qclone [OPTION]... SOURCE [DEST]'),
1597 "qcommit|qci":
1597 "qcommit|qci":
1598 (commit,
1598 (commit,
1599 commands.table["^commit|ci"][1],
1599 commands.table["^commit|ci"][1],
1600 'hg qcommit [OPTION]... [FILE]...'),
1600 'hg qcommit [OPTION]... [FILE]...'),
1601 "^qdiff": (diff, [], 'hg qdiff [FILE]...'),
1601 "^qdiff": (diff, [], 'hg qdiff [FILE]...'),
1602 "qdelete":
1602 "qdelete":
1603 (delete,
1603 (delete,
1604 [('f', 'force', None, _('delete patch file'))],
1604 [('f', 'force', None, _('delete patch file'))],
1605 'hg qdelete [-f] PATCH'),
1605 'hg qdelete [-f] PATCH'),
1606 'qfold':
1606 'qfold':
1607 (fold,
1607 (fold,
1608 [('e', 'edit', None, _('edit patch header')),
1608 [('e', 'edit', None, _('edit patch header')),
1609 ('f', 'force', None, _('delete folded patch files')),
1609 ('f', 'force', None, _('delete folded patch files')),
1610 ('m', 'message', '', _('set patch header to <text>')),
1610 ('m', 'message', '', _('set patch header to <text>')),
1611 ('l', 'logfile', '', _('set patch header to contents of <file>'))],
1611 ('l', 'logfile', '', _('set patch header to contents of <file>'))],
1612 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
1612 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
1613 'qheader': (header, [],
1613 'qheader': (header, [],
1614 _('hg qheader [PATCH]')),
1614 _('hg qheader [PATCH]')),
1615 "^qimport":
1615 "^qimport":
1616 (qimport,
1616 (qimport,
1617 [('e', 'existing', None, 'import file in patch dir'),
1617 [('e', 'existing', None, 'import file in patch dir'),
1618 ('n', 'name', '', 'patch file name'),
1618 ('n', 'name', '', 'patch file name'),
1619 ('f', 'force', None, 'overwrite existing files')],
1619 ('f', 'force', None, 'overwrite existing files')],
1620 'hg qimport [-e] [-n NAME] [-f] FILE...'),
1620 'hg qimport [-e] [-n NAME] [-f] FILE...'),
1621 "^qinit":
1621 "^qinit":
1622 (init,
1622 (init,
1623 [('c', 'create-repo', None, 'create queue repository')],
1623 [('c', 'create-repo', None, 'create queue repository')],
1624 'hg qinit [-c]'),
1624 'hg qinit [-c]'),
1625 "qnew":
1625 "qnew":
1626 (new,
1626 (new,
1627 [('m', 'message', '', _('use <text> as commit message')),
1627 [('m', 'message', '', _('use <text> as commit message')),
1628 ('l', 'logfile', '', _('read the commit message from <file>')),
1628 ('l', 'logfile', '', _('read the commit message from <file>')),
1629 ('f', 'force', None, _('import uncommitted changes into patch'))],
1629 ('f', 'force', None, _('import uncommitted changes into patch'))],
1630 'hg qnew [-m TEXT] [-l FILE] [-f] PATCH'),
1630 'hg qnew [-m TEXT] [-l FILE] [-f] PATCH'),
1631 "qnext": (next, [], 'hg qnext'),
1631 "qnext": (next, [], 'hg qnext'),
1632 "qprev": (prev, [], 'hg qprev'),
1632 "qprev": (prev, [], 'hg qprev'),
1633 "^qpop":
1633 "^qpop":
1634 (pop,
1634 (pop,
1635 [('a', 'all', None, 'pop all patches'),
1635 [('a', 'all', None, 'pop all patches'),
1636 ('n', 'name', '', 'queue name to pop'),
1636 ('n', 'name', '', 'queue name to pop'),
1637 ('f', 'force', None, 'forget any local changes')],
1637 ('f', 'force', None, 'forget any local changes')],
1638 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
1638 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
1639 "^qpush":
1639 "^qpush":
1640 (push,
1640 (push,
1641 [('f', 'force', None, 'apply if the patch has rejects'),
1641 [('f', 'force', None, 'apply if the patch has rejects'),
1642 ('l', 'list', None, 'list patch name in commit text'),
1642 ('l', 'list', None, 'list patch name in commit text'),
1643 ('a', 'all', None, 'apply all patches'),
1643 ('a', 'all', None, 'apply all patches'),
1644 ('m', 'merge', None, 'merge from another queue'),
1644 ('m', 'merge', None, 'merge from another queue'),
1645 ('n', 'name', '', 'merge queue name')],
1645 ('n', 'name', '', 'merge queue name')],
1646 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
1646 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
1647 "^qrefresh":
1647 "^qrefresh":
1648 (refresh,
1648 (refresh,
1649 [('e', 'edit', None, _('edit commit message')),
1649 [('e', 'edit', None, _('edit commit message')),
1650 ('m', 'message', '', _('change commit message with <text>')),
1650 ('m', 'message', '', _('change commit message with <text>')),
1651 ('l', 'logfile', '', _('change commit message with <file> content')),
1651 ('l', 'logfile', '', _('change commit message with <file> content')),
1652 ('s', 'short', None, 'short refresh')],
1652 ('s', 'short', None, 'short refresh')],
1653 'hg qrefresh [-e] [-m TEXT] [-l FILE] [-s]'),
1653 'hg qrefresh [-e] [-m TEXT] [-l FILE] [-s]'),
1654 'qrename|qmv':
1654 'qrename|qmv':
1655 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
1655 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
1656 "qrestore":
1656 "qrestore":
1657 (restore,
1657 (restore,
1658 [('d', 'delete', None, 'delete save entry'),
1658 [('d', 'delete', None, 'delete save entry'),
1659 ('u', 'update', None, 'update queue working dir')],
1659 ('u', 'update', None, 'update queue working dir')],
1660 'hg qrestore [-d] [-u] REV'),
1660 'hg qrestore [-d] [-u] REV'),
1661 "qsave":
1661 "qsave":
1662 (save,
1662 (save,
1663 [('m', 'message', '', _('use <text> as commit message')),
1663 [('m', 'message', '', _('use <text> as commit message')),
1664 ('l', 'logfile', '', _('read the commit message from <file>')),
1664 ('l', 'logfile', '', _('read the commit message from <file>')),
1665 ('c', 'copy', None, 'copy patch directory'),
1665 ('c', 'copy', None, 'copy patch directory'),
1666 ('n', 'name', '', 'copy directory name'),
1666 ('n', 'name', '', 'copy directory name'),
1667 ('e', 'empty', None, 'clear queue status file'),
1667 ('e', 'empty', None, 'clear queue status file'),
1668 ('f', 'force', None, 'force copy')],
1668 ('f', 'force', None, 'force copy')],
1669 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
1669 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
1670 "qseries":
1670 "qseries":
1671 (series,
1671 (series,
1672 [('m', 'missing', None, 'print patches not in series'),
1672 [('m', 'missing', None, 'print patches not in series'),
1673 ('s', 'summary', None, _('print first line of patch header'))],
1673 ('s', 'summary', None, _('print first line of patch header'))],
1674 'hg qseries [-m]'),
1674 'hg qseries [-m]'),
1675 "^strip":
1675 "^strip":
1676 (strip,
1676 (strip,
1677 [('f', 'force', None, 'force multi-head removal'),
1677 [('f', 'force', None, 'force multi-head removal'),
1678 ('b', 'backup', None, 'bundle unrelated changesets'),
1678 ('b', 'backup', None, 'bundle unrelated changesets'),
1679 ('n', 'nobackup', None, 'no backups')],
1679 ('n', 'nobackup', None, 'no backups')],
1680 'hg strip [-f] [-b] [-n] REV'),
1680 'hg strip [-f] [-b] [-n] REV'),
1681 "qtop": (top, [], 'hg qtop'),
1681 "qtop": (top, [], 'hg qtop'),
1682 "qunapplied": (unapplied, [], 'hg qunapplied [PATCH]'),
1682 "qunapplied": (unapplied, [], 'hg qunapplied [PATCH]'),
1683 "qversion": (version, [], 'hg qversion')
1683 "qversion": (version, [], 'hg qversion')
1684 }
1684 }
1685
1685
@@ -1,1010 +1,1016 b''
1 """
1 """
2 util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
3
3
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8
8
9 This contains helper routines that are independent of the SCM core and hide
9 This contains helper routines that are independent of the SCM core and hide
10 platform-specific details from the core.
10 platform-specific details from the core.
11 """
11 """
12
12
13 from i18n import gettext as _
13 from i18n import gettext as _
14 from demandload import *
14 from demandload import *
15 demandload(globals(), "cStringIO errno getpass popen2 re shutil sys tempfile")
15 demandload(globals(), "cStringIO errno getpass popen2 re shutil sys tempfile")
16 demandload(globals(), "os threading time")
16 demandload(globals(), "os threading time")
17
17
18 # used by parsedate
18 # used by parsedate
19 defaultdateformats = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',
19 defaultdateformats = ('%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M',
20 '%a %b %d %H:%M:%S %Y')
20 '%a %b %d %H:%M:%S %Y')
21
21
22 class SignalInterrupt(Exception):
22 class SignalInterrupt(Exception):
23 """Exception raised on SIGTERM and SIGHUP."""
23 """Exception raised on SIGTERM and SIGHUP."""
24
24
25 def pipefilter(s, cmd):
25 def pipefilter(s, cmd):
26 '''filter string S through command CMD, returning its output'''
26 '''filter string S through command CMD, returning its output'''
27 (pout, pin) = popen2.popen2(cmd, -1, 'b')
27 (pout, pin) = popen2.popen2(cmd, -1, 'b')
28 def writer():
28 def writer():
29 try:
29 try:
30 pin.write(s)
30 pin.write(s)
31 pin.close()
31 pin.close()
32 except IOError, inst:
32 except IOError, inst:
33 if inst.errno != errno.EPIPE:
33 if inst.errno != errno.EPIPE:
34 raise
34 raise
35
35
36 # we should use select instead on UNIX, but this will work on most
36 # we should use select instead on UNIX, but this will work on most
37 # systems, including Windows
37 # systems, including Windows
38 w = threading.Thread(target=writer)
38 w = threading.Thread(target=writer)
39 w.start()
39 w.start()
40 f = pout.read()
40 f = pout.read()
41 pout.close()
41 pout.close()
42 w.join()
42 w.join()
43 return f
43 return f
44
44
45 def tempfilter(s, cmd):
45 def tempfilter(s, cmd):
46 '''filter string S through a pair of temporary files with CMD.
46 '''filter string S through a pair of temporary files with CMD.
47 CMD is used as a template to create the real command to be run,
47 CMD is used as a template to create the real command to be run,
48 with the strings INFILE and OUTFILE replaced by the real names of
48 with the strings INFILE and OUTFILE replaced by the real names of
49 the temporary files generated.'''
49 the temporary files generated.'''
50 inname, outname = None, None
50 inname, outname = None, None
51 try:
51 try:
52 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
52 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
53 fp = os.fdopen(infd, 'wb')
53 fp = os.fdopen(infd, 'wb')
54 fp.write(s)
54 fp.write(s)
55 fp.close()
55 fp.close()
56 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
56 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
57 os.close(outfd)
57 os.close(outfd)
58 cmd = cmd.replace('INFILE', inname)
58 cmd = cmd.replace('INFILE', inname)
59 cmd = cmd.replace('OUTFILE', outname)
59 cmd = cmd.replace('OUTFILE', outname)
60 code = os.system(cmd)
60 code = os.system(cmd)
61 if code: raise Abort(_("command '%s' failed: %s") %
61 if code: raise Abort(_("command '%s' failed: %s") %
62 (cmd, explain_exit(code)))
62 (cmd, explain_exit(code)))
63 return open(outname, 'rb').read()
63 return open(outname, 'rb').read()
64 finally:
64 finally:
65 try:
65 try:
66 if inname: os.unlink(inname)
66 if inname: os.unlink(inname)
67 except: pass
67 except: pass
68 try:
68 try:
69 if outname: os.unlink(outname)
69 if outname: os.unlink(outname)
70 except: pass
70 except: pass
71
71
72 filtertable = {
72 filtertable = {
73 'tempfile:': tempfilter,
73 'tempfile:': tempfilter,
74 'pipe:': pipefilter,
74 'pipe:': pipefilter,
75 }
75 }
76
76
77 def filter(s, cmd):
77 def filter(s, cmd):
78 "filter a string through a command that transforms its input to its output"
78 "filter a string through a command that transforms its input to its output"
79 for name, fn in filtertable.iteritems():
79 for name, fn in filtertable.iteritems():
80 if cmd.startswith(name):
80 if cmd.startswith(name):
81 return fn(s, cmd[len(name):].lstrip())
81 return fn(s, cmd[len(name):].lstrip())
82 return pipefilter(s, cmd)
82 return pipefilter(s, cmd)
83
83
84 def find_in_path(name, path, default=None):
84 def find_in_path(name, path, default=None):
85 '''find name in search path. path can be string (will be split
85 '''find name in search path. path can be string (will be split
86 with os.pathsep), or iterable thing that returns strings. if name
86 with os.pathsep), or iterable thing that returns strings. if name
87 found, return path to name. else return default.'''
87 found, return path to name. else return default.'''
88 if isinstance(path, str):
88 if isinstance(path, str):
89 path = path.split(os.pathsep)
89 path = path.split(os.pathsep)
90 for p in path:
90 for p in path:
91 p_name = os.path.join(p, name)
91 p_name = os.path.join(p, name)
92 if os.path.exists(p_name):
92 if os.path.exists(p_name):
93 return p_name
93 return p_name
94 return default
94 return default
95
95
96 def patch(strip, patchname, ui, cwd=None):
96 def patch(strip, patchname, ui, cwd=None):
97 """apply the patch <patchname> to the working directory.
97 """apply the patch <patchname> to the working directory.
98 a list of patched files is returned"""
98 a list of patched files is returned"""
99 patcher = find_in_path('gpatch', os.environ.get('PATH', ''), 'patch')
99 patcher = find_in_path('gpatch', os.environ.get('PATH', ''), 'patch')
100 args = []
100 args = []
101 if cwd:
101 if cwd:
102 args.append('-d "%s"' % cwd)
102 args.append('-d "%s"' % cwd)
103 fp = os.popen('%s %s -p%d < "%s"' % (patcher, ' '.join(args), strip,
103 fp = os.popen('%s %s -p%d < "%s"' % (patcher, ' '.join(args), strip,
104 patchname))
104 patchname))
105 files = {}
105 files = {}
106 for line in fp:
106 for line in fp:
107 line = line.rstrip()
107 line = line.rstrip()
108 ui.status("%s\n" % line)
108 ui.status("%s\n" % line)
109 if line.startswith('patching file '):
109 if line.startswith('patching file '):
110 pf = parse_patch_output(line)
110 pf = parse_patch_output(line)
111 files.setdefault(pf, 1)
111 files.setdefault(pf, 1)
112 code = fp.close()
112 code = fp.close()
113 if code:
113 if code:
114 raise Abort(_("patch command failed: %s") % explain_exit(code)[0])
114 raise Abort(_("patch command failed: %s") % explain_exit(code)[0])
115 return files.keys()
115 return files.keys()
116
116
117 def binary(s):
117 def binary(s):
118 """return true if a string is binary data using diff's heuristic"""
118 """return true if a string is binary data using diff's heuristic"""
119 if s and '\0' in s[:4096]:
119 if s and '\0' in s[:4096]:
120 return True
120 return True
121 return False
121 return False
122
122
123 def unique(g):
123 def unique(g):
124 """return the uniq elements of iterable g"""
124 """return the uniq elements of iterable g"""
125 seen = {}
125 seen = {}
126 for f in g:
126 for f in g:
127 if f not in seen:
127 if f not in seen:
128 seen[f] = 1
128 seen[f] = 1
129 yield f
129 yield f
130
130
131 class Abort(Exception):
131 class Abort(Exception):
132 """Raised if a command needs to print an error and exit."""
132 """Raised if a command needs to print an error and exit."""
133
133
134 def always(fn): return True
134 def always(fn): return True
135 def never(fn): return False
135 def never(fn): return False
136
136
137 def patkind(name, dflt_pat='glob'):
137 def patkind(name, dflt_pat='glob'):
138 """Split a string into an optional pattern kind prefix and the
138 """Split a string into an optional pattern kind prefix and the
139 actual pattern."""
139 actual pattern."""
140 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
140 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
141 if name.startswith(prefix + ':'): return name.split(':', 1)
141 if name.startswith(prefix + ':'): return name.split(':', 1)
142 return dflt_pat, name
142 return dflt_pat, name
143
143
144 def globre(pat, head='^', tail='$'):
144 def globre(pat, head='^', tail='$'):
145 "convert a glob pattern into a regexp"
145 "convert a glob pattern into a regexp"
146 i, n = 0, len(pat)
146 i, n = 0, len(pat)
147 res = ''
147 res = ''
148 group = False
148 group = False
149 def peek(): return i < n and pat[i]
149 def peek(): return i < n and pat[i]
150 while i < n:
150 while i < n:
151 c = pat[i]
151 c = pat[i]
152 i = i+1
152 i = i+1
153 if c == '*':
153 if c == '*':
154 if peek() == '*':
154 if peek() == '*':
155 i += 1
155 i += 1
156 res += '.*'
156 res += '.*'
157 else:
157 else:
158 res += '[^/]*'
158 res += '[^/]*'
159 elif c == '?':
159 elif c == '?':
160 res += '.'
160 res += '.'
161 elif c == '[':
161 elif c == '[':
162 j = i
162 j = i
163 if j < n and pat[j] in '!]':
163 if j < n and pat[j] in '!]':
164 j += 1
164 j += 1
165 while j < n and pat[j] != ']':
165 while j < n and pat[j] != ']':
166 j += 1
166 j += 1
167 if j >= n:
167 if j >= n:
168 res += '\\['
168 res += '\\['
169 else:
169 else:
170 stuff = pat[i:j].replace('\\','\\\\')
170 stuff = pat[i:j].replace('\\','\\\\')
171 i = j + 1
171 i = j + 1
172 if stuff[0] == '!':
172 if stuff[0] == '!':
173 stuff = '^' + stuff[1:]
173 stuff = '^' + stuff[1:]
174 elif stuff[0] == '^':
174 elif stuff[0] == '^':
175 stuff = '\\' + stuff
175 stuff = '\\' + stuff
176 res = '%s[%s]' % (res, stuff)
176 res = '%s[%s]' % (res, stuff)
177 elif c == '{':
177 elif c == '{':
178 group = True
178 group = True
179 res += '(?:'
179 res += '(?:'
180 elif c == '}' and group:
180 elif c == '}' and group:
181 res += ')'
181 res += ')'
182 group = False
182 group = False
183 elif c == ',' and group:
183 elif c == ',' and group:
184 res += '|'
184 res += '|'
185 elif c == '\\':
185 elif c == '\\':
186 p = peek()
186 p = peek()
187 if p:
187 if p:
188 i += 1
188 i += 1
189 res += re.escape(p)
189 res += re.escape(p)
190 else:
190 else:
191 res += re.escape(c)
191 res += re.escape(c)
192 else:
192 else:
193 res += re.escape(c)
193 res += re.escape(c)
194 return head + res + tail
194 return head + res + tail
195
195
196 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
196 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
197
197
198 def pathto(n1, n2):
198 def pathto(n1, n2):
199 '''return the relative path from one place to another.
199 '''return the relative path from one place to another.
200 this returns a path in the form used by the local filesystem, not hg.'''
200 this returns a path in the form used by the local filesystem, not hg.'''
201 if not n1: return localpath(n2)
201 if not n1: return localpath(n2)
202 a, b = n1.split('/'), n2.split('/')
202 a, b = n1.split('/'), n2.split('/')
203 a.reverse()
203 a.reverse()
204 b.reverse()
204 b.reverse()
205 while a and b and a[-1] == b[-1]:
205 while a and b and a[-1] == b[-1]:
206 a.pop()
206 a.pop()
207 b.pop()
207 b.pop()
208 b.reverse()
208 b.reverse()
209 return os.sep.join((['..'] * len(a)) + b)
209 return os.sep.join((['..'] * len(a)) + b)
210
210
211 def canonpath(root, cwd, myname):
211 def canonpath(root, cwd, myname):
212 """return the canonical path of myname, given cwd and root"""
212 """return the canonical path of myname, given cwd and root"""
213 if root == os.sep:
213 if root == os.sep:
214 rootsep = os.sep
214 rootsep = os.sep
215 elif root.endswith(os.sep):
215 elif root.endswith(os.sep):
216 rootsep = root
216 rootsep = root
217 else:
217 else:
218 rootsep = root + os.sep
218 rootsep = root + os.sep
219 name = myname
219 name = myname
220 if not os.path.isabs(name):
220 if not os.path.isabs(name):
221 name = os.path.join(root, cwd, name)
221 name = os.path.join(root, cwd, name)
222 name = os.path.normpath(name)
222 name = os.path.normpath(name)
223 if name != rootsep and name.startswith(rootsep):
223 if name != rootsep and name.startswith(rootsep):
224 name = name[len(rootsep):]
224 name = name[len(rootsep):]
225 audit_path(name)
225 audit_path(name)
226 return pconvert(name)
226 return pconvert(name)
227 elif name == root:
227 elif name == root:
228 return ''
228 return ''
229 else:
229 else:
230 # Determine whether `name' is in the hierarchy at or beneath `root',
230 # Determine whether `name' is in the hierarchy at or beneath `root',
231 # by iterating name=dirname(name) until that causes no change (can't
231 # by iterating name=dirname(name) until that causes no change (can't
232 # check name == '/', because that doesn't work on windows). For each
232 # check name == '/', because that doesn't work on windows). For each
233 # `name', compare dev/inode numbers. If they match, the list `rel'
233 # `name', compare dev/inode numbers. If they match, the list `rel'
234 # holds the reversed list of components making up the relative file
234 # holds the reversed list of components making up the relative file
235 # name we want.
235 # name we want.
236 root_st = os.stat(root)
236 root_st = os.stat(root)
237 rel = []
237 rel = []
238 while True:
238 while True:
239 try:
239 try:
240 name_st = os.stat(name)
240 name_st = os.stat(name)
241 except OSError:
241 except OSError:
242 break
242 break
243 if samestat(name_st, root_st):
243 if samestat(name_st, root_st):
244 rel.reverse()
244 rel.reverse()
245 name = os.path.join(*rel)
245 name = os.path.join(*rel)
246 audit_path(name)
246 audit_path(name)
247 return pconvert(name)
247 return pconvert(name)
248 dirname, basename = os.path.split(name)
248 dirname, basename = os.path.split(name)
249 rel.append(basename)
249 rel.append(basename)
250 if dirname == name:
250 if dirname == name:
251 break
251 break
252 name = dirname
252 name = dirname
253
253
254 raise Abort('%s not under root' % myname)
254 raise Abort('%s not under root' % myname)
255
255
256 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
256 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
257 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
257 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
258
258
259 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
259 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
260 if os.name == 'nt':
260 if os.name == 'nt':
261 dflt_pat = 'glob'
261 dflt_pat = 'glob'
262 else:
262 else:
263 dflt_pat = 'relpath'
263 dflt_pat = 'relpath'
264 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
264 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
265
265
266 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
266 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
267 """build a function to match a set of file patterns
267 """build a function to match a set of file patterns
268
268
269 arguments:
269 arguments:
270 canonroot - the canonical root of the tree you're matching against
270 canonroot - the canonical root of the tree you're matching against
271 cwd - the current working directory, if relevant
271 cwd - the current working directory, if relevant
272 names - patterns to find
272 names - patterns to find
273 inc - patterns to include
273 inc - patterns to include
274 exc - patterns to exclude
274 exc - patterns to exclude
275 head - a regex to prepend to patterns to control whether a match is rooted
275 head - a regex to prepend to patterns to control whether a match is rooted
276
276
277 a pattern is one of:
277 a pattern is one of:
278 'glob:<rooted glob>'
278 'glob:<rooted glob>'
279 're:<rooted regexp>'
279 're:<rooted regexp>'
280 'path:<rooted path>'
280 'path:<rooted path>'
281 'relglob:<relative glob>'
281 'relglob:<relative glob>'
282 'relpath:<relative path>'
282 'relpath:<relative path>'
283 'relre:<relative regexp>'
283 'relre:<relative regexp>'
284 '<rooted path or regexp>'
284 '<rooted path or regexp>'
285
285
286 returns:
286 returns:
287 a 3-tuple containing
287 a 3-tuple containing
288 - list of explicit non-pattern names passed in
288 - list of explicit non-pattern names passed in
289 - a bool match(filename) function
289 - a bool match(filename) function
290 - a bool indicating if any patterns were passed in
290 - a bool indicating if any patterns were passed in
291
291
292 todo:
292 todo:
293 make head regex a rooted bool
293 make head regex a rooted bool
294 """
294 """
295
295
296 def contains_glob(name):
296 def contains_glob(name):
297 for c in name:
297 for c in name:
298 if c in _globchars: return True
298 if c in _globchars: return True
299 return False
299 return False
300
300
301 def regex(kind, name, tail):
301 def regex(kind, name, tail):
302 '''convert a pattern into a regular expression'''
302 '''convert a pattern into a regular expression'''
303 if kind == 're':
303 if kind == 're':
304 return name
304 return name
305 elif kind == 'path':
305 elif kind == 'path':
306 return '^' + re.escape(name) + '(?:/|$)'
306 return '^' + re.escape(name) + '(?:/|$)'
307 elif kind == 'relglob':
307 elif kind == 'relglob':
308 return head + globre(name, '(?:|.*/)', tail)
308 return head + globre(name, '(?:|.*/)', tail)
309 elif kind == 'relpath':
309 elif kind == 'relpath':
310 return head + re.escape(name) + tail
310 return head + re.escape(name) + tail
311 elif kind == 'relre':
311 elif kind == 'relre':
312 if name.startswith('^'):
312 if name.startswith('^'):
313 return name
313 return name
314 return '.*' + name
314 return '.*' + name
315 return head + globre(name, '', tail)
315 return head + globre(name, '', tail)
316
316
317 def matchfn(pats, tail):
317 def matchfn(pats, tail):
318 """build a matching function from a set of patterns"""
318 """build a matching function from a set of patterns"""
319 if not pats:
319 if not pats:
320 return
320 return
321 matches = []
321 matches = []
322 for k, p in pats:
322 for k, p in pats:
323 try:
323 try:
324 pat = '(?:%s)' % regex(k, p, tail)
324 pat = '(?:%s)' % regex(k, p, tail)
325 matches.append(re.compile(pat).match)
325 matches.append(re.compile(pat).match)
326 except re.error:
326 except re.error:
327 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
327 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
328 else: raise Abort("invalid pattern (%s): %s" % (k, p))
328 else: raise Abort("invalid pattern (%s): %s" % (k, p))
329
329
330 def buildfn(text):
330 def buildfn(text):
331 for m in matches:
331 for m in matches:
332 r = m(text)
332 r = m(text)
333 if r:
333 if r:
334 return r
334 return r
335
335
336 return buildfn
336 return buildfn
337
337
338 def globprefix(pat):
338 def globprefix(pat):
339 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
339 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
340 root = []
340 root = []
341 for p in pat.split(os.sep):
341 for p in pat.split(os.sep):
342 if contains_glob(p): break
342 if contains_glob(p): break
343 root.append(p)
343 root.append(p)
344 return '/'.join(root)
344 return '/'.join(root)
345
345
346 pats = []
346 pats = []
347 files = []
347 files = []
348 roots = []
348 roots = []
349 for kind, name in [patkind(p, dflt_pat) for p in names]:
349 for kind, name in [patkind(p, dflt_pat) for p in names]:
350 if kind in ('glob', 'relpath'):
350 if kind in ('glob', 'relpath'):
351 name = canonpath(canonroot, cwd, name)
351 name = canonpath(canonroot, cwd, name)
352 if name == '':
352 if name == '':
353 kind, name = 'glob', '**'
353 kind, name = 'glob', '**'
354 if kind in ('glob', 'path', 're'):
354 if kind in ('glob', 'path', 're'):
355 pats.append((kind, name))
355 pats.append((kind, name))
356 if kind == 'glob':
356 if kind == 'glob':
357 root = globprefix(name)
357 root = globprefix(name)
358 if root: roots.append(root)
358 if root: roots.append(root)
359 elif kind == 'relpath':
359 elif kind == 'relpath':
360 files.append((kind, name))
360 files.append((kind, name))
361 roots.append(name)
361 roots.append(name)
362
362
363 patmatch = matchfn(pats, '$') or always
363 patmatch = matchfn(pats, '$') or always
364 filematch = matchfn(files, '(?:/|$)') or always
364 filematch = matchfn(files, '(?:/|$)') or always
365 incmatch = always
365 incmatch = always
366 if inc:
366 if inc:
367 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
367 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
368 incmatch = matchfn(inckinds, '(?:/|$)')
368 incmatch = matchfn(inckinds, '(?:/|$)')
369 excmatch = lambda fn: False
369 excmatch = lambda fn: False
370 if exc:
370 if exc:
371 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
371 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
372 excmatch = matchfn(exckinds, '(?:/|$)')
372 excmatch = matchfn(exckinds, '(?:/|$)')
373
373
374 return (roots,
374 return (roots,
375 lambda fn: (incmatch(fn) and not excmatch(fn) and
375 lambda fn: (incmatch(fn) and not excmatch(fn) and
376 (fn.endswith('/') or
376 (fn.endswith('/') or
377 (not pats and not files) or
377 (not pats and not files) or
378 (pats and patmatch(fn)) or
378 (pats and patmatch(fn)) or
379 (files and filematch(fn)))),
379 (files and filematch(fn)))),
380 (inc or exc or (pats and pats != [('glob', '**')])) and True)
380 (inc or exc or (pats and pats != [('glob', '**')])) and True)
381
381
382 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
382 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
383 '''enhanced shell command execution.
383 '''enhanced shell command execution.
384 run with environment maybe modified, maybe in different dir.
384 run with environment maybe modified, maybe in different dir.
385
385
386 if command fails and onerr is None, return status. if ui object,
386 if command fails and onerr is None, return status. if ui object,
387 print error message and return status, else raise onerr object as
387 print error message and return status, else raise onerr object as
388 exception.'''
388 exception.'''
389 def py2shell(val):
389 def py2shell(val):
390 'convert python object into string that is useful to shell'
390 'convert python object into string that is useful to shell'
391 if val in (None, False):
391 if val in (None, False):
392 return '0'
392 return '0'
393 if val == True:
393 if val == True:
394 return '1'
394 return '1'
395 return str(val)
395 return str(val)
396 oldenv = {}
396 oldenv = {}
397 for k in environ:
397 for k in environ:
398 oldenv[k] = os.environ.get(k)
398 oldenv[k] = os.environ.get(k)
399 if cwd is not None:
399 if cwd is not None:
400 oldcwd = os.getcwd()
400 oldcwd = os.getcwd()
401 try:
401 try:
402 for k, v in environ.iteritems():
402 for k, v in environ.iteritems():
403 os.environ[k] = py2shell(v)
403 os.environ[k] = py2shell(v)
404 if cwd is not None and oldcwd != cwd:
404 if cwd is not None and oldcwd != cwd:
405 os.chdir(cwd)
405 os.chdir(cwd)
406 rc = os.system(cmd)
406 rc = os.system(cmd)
407 if rc and onerr:
407 if rc and onerr:
408 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
408 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
409 explain_exit(rc)[0])
409 explain_exit(rc)[0])
410 if errprefix:
410 if errprefix:
411 errmsg = '%s: %s' % (errprefix, errmsg)
411 errmsg = '%s: %s' % (errprefix, errmsg)
412 try:
412 try:
413 onerr.warn(errmsg + '\n')
413 onerr.warn(errmsg + '\n')
414 except AttributeError:
414 except AttributeError:
415 raise onerr(errmsg)
415 raise onerr(errmsg)
416 return rc
416 return rc
417 finally:
417 finally:
418 for k, v in oldenv.iteritems():
418 for k, v in oldenv.iteritems():
419 if v is None:
419 if v is None:
420 del os.environ[k]
420 del os.environ[k]
421 else:
421 else:
422 os.environ[k] = v
422 os.environ[k] = v
423 if cwd is not None and oldcwd != cwd:
423 if cwd is not None and oldcwd != cwd:
424 os.chdir(oldcwd)
424 os.chdir(oldcwd)
425
425
426 def rename(src, dst):
426 def rename(src, dst):
427 """forcibly rename a file"""
427 """forcibly rename a file"""
428 try:
428 try:
429 os.rename(src, dst)
429 os.rename(src, dst)
430 except OSError, err:
430 except OSError, err:
431 # on windows, rename to existing file is not allowed, so we
431 # on windows, rename to existing file is not allowed, so we
432 # must delete destination first. but if file is open, unlink
432 # must delete destination first. but if file is open, unlink
433 # schedules it for delete but does not delete it. rename
433 # schedules it for delete but does not delete it. rename
434 # happens immediately even for open files, so we create
434 # happens immediately even for open files, so we create
435 # temporary file, delete it, rename destination to that name,
435 # temporary file, delete it, rename destination to that name,
436 # then delete that. then rename is safe to do.
436 # then delete that. then rename is safe to do.
437 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
437 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
438 os.close(fd)
438 os.close(fd)
439 os.unlink(temp)
439 os.unlink(temp)
440 os.rename(dst, temp)
440 os.rename(dst, temp)
441 os.unlink(temp)
441 os.unlink(temp)
442 os.rename(src, dst)
442 os.rename(src, dst)
443
443
444 def unlink(f):
444 def unlink(f):
445 """unlink and remove the directory if it is empty"""
445 """unlink and remove the directory if it is empty"""
446 os.unlink(f)
446 os.unlink(f)
447 # try removing directories that might now be empty
447 # try removing directories that might now be empty
448 try:
448 try:
449 os.removedirs(os.path.dirname(f))
449 os.removedirs(os.path.dirname(f))
450 except OSError:
450 except OSError:
451 pass
451 pass
452
452
453 def copyfiles(src, dst, hardlink=None):
453 def copyfiles(src, dst, hardlink=None):
454 """Copy a directory tree using hardlinks if possible"""
454 """Copy a directory tree using hardlinks if possible"""
455
455
456 if hardlink is None:
456 if hardlink is None:
457 hardlink = (os.stat(src).st_dev ==
457 hardlink = (os.stat(src).st_dev ==
458 os.stat(os.path.dirname(dst)).st_dev)
458 os.stat(os.path.dirname(dst)).st_dev)
459
459
460 if os.path.isdir(src):
460 if os.path.isdir(src):
461 os.mkdir(dst)
461 os.mkdir(dst)
462 for name in os.listdir(src):
462 for name in os.listdir(src):
463 srcname = os.path.join(src, name)
463 srcname = os.path.join(src, name)
464 dstname = os.path.join(dst, name)
464 dstname = os.path.join(dst, name)
465 copyfiles(srcname, dstname, hardlink)
465 copyfiles(srcname, dstname, hardlink)
466 else:
466 else:
467 if hardlink:
467 if hardlink:
468 try:
468 try:
469 os_link(src, dst)
469 os_link(src, dst)
470 except (IOError, OSError):
470 except (IOError, OSError):
471 hardlink = False
471 hardlink = False
472 shutil.copy(src, dst)
472 shutil.copy(src, dst)
473 else:
473 else:
474 shutil.copy(src, dst)
474 shutil.copy(src, dst)
475
475
476 def audit_path(path):
476 def audit_path(path):
477 """Abort if path contains dangerous components"""
477 """Abort if path contains dangerous components"""
478 parts = os.path.normcase(path).split(os.sep)
478 parts = os.path.normcase(path).split(os.sep)
479 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
479 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
480 or os.pardir in parts):
480 or os.pardir in parts):
481 raise Abort(_("path contains illegal component: %s\n") % path)
481 raise Abort(_("path contains illegal component: %s\n") % path)
482
482
483 def _makelock_file(info, pathname):
483 def _makelock_file(info, pathname):
484 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
484 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
485 os.write(ld, info)
485 os.write(ld, info)
486 os.close(ld)
486 os.close(ld)
487
487
488 def _readlock_file(pathname):
488 def _readlock_file(pathname):
489 return posixfile(pathname).read()
489 return posixfile(pathname).read()
490
490
491 def nlinks(pathname):
491 def nlinks(pathname):
492 """Return number of hardlinks for the given file."""
492 """Return number of hardlinks for the given file."""
493 return os.lstat(pathname).st_nlink
493 return os.lstat(pathname).st_nlink
494
494
495 if hasattr(os, 'link'):
495 if hasattr(os, 'link'):
496 os_link = os.link
496 os_link = os.link
497 else:
497 else:
498 def os_link(src, dst):
498 def os_link(src, dst):
499 raise OSError(0, _("Hardlinks not supported"))
499 raise OSError(0, _("Hardlinks not supported"))
500
500
501 def fstat(fp):
501 def fstat(fp):
502 '''stat file object that may not have fileno method.'''
502 '''stat file object that may not have fileno method.'''
503 try:
503 try:
504 return os.fstat(fp.fileno())
504 return os.fstat(fp.fileno())
505 except AttributeError:
505 except AttributeError:
506 return os.stat(fp.name)
506 return os.stat(fp.name)
507
507
508 posixfile = file
508 posixfile = file
509
509
510 def is_win_9x():
510 def is_win_9x():
511 '''return true if run on windows 95, 98 or me.'''
511 '''return true if run on windows 95, 98 or me.'''
512 try:
512 try:
513 return sys.getwindowsversion()[3] == 1
513 return sys.getwindowsversion()[3] == 1
514 except AttributeError:
514 except AttributeError:
515 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
515 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
516
516
517 getuser_fallback = None
517 getuser_fallback = None
518
518
519 def getuser():
519 def getuser():
520 '''return name of current user'''
520 '''return name of current user'''
521 try:
521 try:
522 return getpass.getuser()
522 return getpass.getuser()
523 except ImportError:
523 except ImportError:
524 # import of pwd will fail on windows - try fallback
524 # import of pwd will fail on windows - try fallback
525 if getuser_fallback:
525 if getuser_fallback:
526 return getuser_fallback()
526 return getuser_fallback()
527 # raised if win32api not available
527 # raised if win32api not available
528 raise Abort(_('user name not available - set USERNAME '
528 raise Abort(_('user name not available - set USERNAME '
529 'environment variable'))
529 'environment variable'))
530
530
531 # Platform specific variants
531 # Platform specific variants
532 if os.name == 'nt':
532 if os.name == 'nt':
533 demandload(globals(), "msvcrt")
533 demandload(globals(), "msvcrt")
534 nulldev = 'NUL:'
534 nulldev = 'NUL:'
535
535
536 class winstdout:
536 class winstdout:
537 '''stdout on windows misbehaves if sent through a pipe'''
537 '''stdout on windows misbehaves if sent through a pipe'''
538
538
539 def __init__(self, fp):
539 def __init__(self, fp):
540 self.fp = fp
540 self.fp = fp
541
541
542 def __getattr__(self, key):
542 def __getattr__(self, key):
543 return getattr(self.fp, key)
543 return getattr(self.fp, key)
544
544
545 def close(self):
545 def close(self):
546 try:
546 try:
547 self.fp.close()
547 self.fp.close()
548 except: pass
548 except: pass
549
549
550 def write(self, s):
550 def write(self, s):
551 try:
551 try:
552 return self.fp.write(s)
552 return self.fp.write(s)
553 except IOError, inst:
553 except IOError, inst:
554 if inst.errno != 0: raise
554 if inst.errno != 0: raise
555 self.close()
555 self.close()
556 raise IOError(errno.EPIPE, 'Broken pipe')
556 raise IOError(errno.EPIPE, 'Broken pipe')
557
557
558 sys.stdout = winstdout(sys.stdout)
558 sys.stdout = winstdout(sys.stdout)
559
559
560 def system_rcpath():
560 def system_rcpath():
561 try:
561 try:
562 return system_rcpath_win32()
562 return system_rcpath_win32()
563 except:
563 except:
564 return [r'c:\mercurial\mercurial.ini']
564 return [r'c:\mercurial\mercurial.ini']
565
565
566 def os_rcpath():
566 def os_rcpath():
567 '''return default os-specific hgrc search path'''
567 '''return default os-specific hgrc search path'''
568 path = system_rcpath()
568 path = system_rcpath()
569 path.append(user_rcpath())
569 path.append(user_rcpath())
570 userprofile = os.environ.get('USERPROFILE')
570 userprofile = os.environ.get('USERPROFILE')
571 if userprofile:
571 if userprofile:
572 path.append(os.path.join(userprofile, 'mercurial.ini'))
572 path.append(os.path.join(userprofile, 'mercurial.ini'))
573 return path
573 return path
574
574
575 def user_rcpath():
575 def user_rcpath():
576 '''return os-specific hgrc search path to the user dir'''
576 '''return os-specific hgrc search path to the user dir'''
577 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
577 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
578
578
579 def parse_patch_output(output_line):
579 def parse_patch_output(output_line):
580 """parses the output produced by patch and returns the file name"""
580 """parses the output produced by patch and returns the file name"""
581 pf = output_line[14:]
581 pf = output_line[14:]
582 if pf[0] == '`':
582 if pf[0] == '`':
583 pf = pf[1:-1] # Remove the quotes
583 pf = pf[1:-1] # Remove the quotes
584 return pf
584 return pf
585
585
586 def testpid(pid):
586 def testpid(pid):
587 '''return False if pid dead, True if running or not known'''
587 '''return False if pid dead, True if running or not known'''
588 return True
588 return True
589
589
590 def is_exec(f, last):
590 def is_exec(f, last):
591 return last
591 return last
592
592
593 def set_exec(f, mode):
593 def set_exec(f, mode):
594 pass
594 pass
595
595
596 def set_binary(fd):
596 def set_binary(fd):
597 msvcrt.setmode(fd.fileno(), os.O_BINARY)
597 msvcrt.setmode(fd.fileno(), os.O_BINARY)
598
598
599 def pconvert(path):
599 def pconvert(path):
600 return path.replace("\\", "/")
600 return path.replace("\\", "/")
601
601
602 def localpath(path):
602 def localpath(path):
603 return path.replace('/', '\\')
603 return path.replace('/', '\\')
604
604
605 def normpath(path):
605 def normpath(path):
606 return pconvert(os.path.normpath(path))
606 return pconvert(os.path.normpath(path))
607
607
608 makelock = _makelock_file
608 makelock = _makelock_file
609 readlock = _readlock_file
609 readlock = _readlock_file
610
610
611 def samestat(s1, s2):
611 def samestat(s1, s2):
612 return False
612 return False
613
613
614 def shellquote(s):
615 return '"%s"' % s.replace('"', '\\"')
616
614 def explain_exit(code):
617 def explain_exit(code):
615 return _("exited with status %d") % code, code
618 return _("exited with status %d") % code, code
616
619
617 try:
620 try:
618 # override functions with win32 versions if possible
621 # override functions with win32 versions if possible
619 from util_win32 import *
622 from util_win32 import *
620 if not is_win_9x():
623 if not is_win_9x():
621 posixfile = posixfile_nt
624 posixfile = posixfile_nt
622 except ImportError:
625 except ImportError:
623 pass
626 pass
624
627
625 else:
628 else:
626 nulldev = '/dev/null'
629 nulldev = '/dev/null'
627
630
628 def rcfiles(path):
631 def rcfiles(path):
629 rcs = [os.path.join(path, 'hgrc')]
632 rcs = [os.path.join(path, 'hgrc')]
630 rcdir = os.path.join(path, 'hgrc.d')
633 rcdir = os.path.join(path, 'hgrc.d')
631 try:
634 try:
632 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
635 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
633 if f.endswith(".rc")])
636 if f.endswith(".rc")])
634 except OSError, inst: pass
637 except OSError, inst: pass
635 return rcs
638 return rcs
636
639
637 def os_rcpath():
640 def os_rcpath():
638 '''return default os-specific hgrc search path'''
641 '''return default os-specific hgrc search path'''
639 path = []
642 path = []
640 # old mod_python does not set sys.argv
643 # old mod_python does not set sys.argv
641 if len(getattr(sys, 'argv', [])) > 0:
644 if len(getattr(sys, 'argv', [])) > 0:
642 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
645 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
643 '/../etc/mercurial'))
646 '/../etc/mercurial'))
644 path.extend(rcfiles('/etc/mercurial'))
647 path.extend(rcfiles('/etc/mercurial'))
645 path.append(os.path.expanduser('~/.hgrc'))
648 path.append(os.path.expanduser('~/.hgrc'))
646 path = [os.path.normpath(f) for f in path]
649 path = [os.path.normpath(f) for f in path]
647 return path
650 return path
648
651
649 def parse_patch_output(output_line):
652 def parse_patch_output(output_line):
650 """parses the output produced by patch and returns the file name"""
653 """parses the output produced by patch and returns the file name"""
651 pf = output_line[14:]
654 pf = output_line[14:]
652 if pf.startswith("'") and pf.endswith("'") and " " in pf:
655 if pf.startswith("'") and pf.endswith("'") and " " in pf:
653 pf = pf[1:-1] # Remove the quotes
656 pf = pf[1:-1] # Remove the quotes
654 return pf
657 return pf
655
658
656 def is_exec(f, last):
659 def is_exec(f, last):
657 """check whether a file is executable"""
660 """check whether a file is executable"""
658 return (os.lstat(f).st_mode & 0100 != 0)
661 return (os.lstat(f).st_mode & 0100 != 0)
659
662
660 def set_exec(f, mode):
663 def set_exec(f, mode):
661 s = os.lstat(f).st_mode
664 s = os.lstat(f).st_mode
662 if (s & 0100 != 0) == mode:
665 if (s & 0100 != 0) == mode:
663 return
666 return
664 if mode:
667 if mode:
665 # Turn on +x for every +r bit when making a file executable
668 # Turn on +x for every +r bit when making a file executable
666 # and obey umask.
669 # and obey umask.
667 umask = os.umask(0)
670 umask = os.umask(0)
668 os.umask(umask)
671 os.umask(umask)
669 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
672 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
670 else:
673 else:
671 os.chmod(f, s & 0666)
674 os.chmod(f, s & 0666)
672
675
673 def set_binary(fd):
676 def set_binary(fd):
674 pass
677 pass
675
678
676 def pconvert(path):
679 def pconvert(path):
677 return path
680 return path
678
681
679 def localpath(path):
682 def localpath(path):
680 return path
683 return path
681
684
682 normpath = os.path.normpath
685 normpath = os.path.normpath
683 samestat = os.path.samestat
686 samestat = os.path.samestat
684
687
685 def makelock(info, pathname):
688 def makelock(info, pathname):
686 try:
689 try:
687 os.symlink(info, pathname)
690 os.symlink(info, pathname)
688 except OSError, why:
691 except OSError, why:
689 if why.errno == errno.EEXIST:
692 if why.errno == errno.EEXIST:
690 raise
693 raise
691 else:
694 else:
692 _makelock_file(info, pathname)
695 _makelock_file(info, pathname)
693
696
694 def readlock(pathname):
697 def readlock(pathname):
695 try:
698 try:
696 return os.readlink(pathname)
699 return os.readlink(pathname)
697 except OSError, why:
700 except OSError, why:
698 if why.errno == errno.EINVAL:
701 if why.errno == errno.EINVAL:
699 return _readlock_file(pathname)
702 return _readlock_file(pathname)
700 else:
703 else:
701 raise
704 raise
702
705
706 def shellquote(s):
707 return "'%s'" % s.replace("'", "'\\''")
708
703 def testpid(pid):
709 def testpid(pid):
704 '''return False if pid dead, True if running or not sure'''
710 '''return False if pid dead, True if running or not sure'''
705 try:
711 try:
706 os.kill(pid, 0)
712 os.kill(pid, 0)
707 return True
713 return True
708 except OSError, inst:
714 except OSError, inst:
709 return inst.errno != errno.ESRCH
715 return inst.errno != errno.ESRCH
710
716
711 def explain_exit(code):
717 def explain_exit(code):
712 """return a 2-tuple (desc, code) describing a process's status"""
718 """return a 2-tuple (desc, code) describing a process's status"""
713 if os.WIFEXITED(code):
719 if os.WIFEXITED(code):
714 val = os.WEXITSTATUS(code)
720 val = os.WEXITSTATUS(code)
715 return _("exited with status %d") % val, val
721 return _("exited with status %d") % val, val
716 elif os.WIFSIGNALED(code):
722 elif os.WIFSIGNALED(code):
717 val = os.WTERMSIG(code)
723 val = os.WTERMSIG(code)
718 return _("killed by signal %d") % val, val
724 return _("killed by signal %d") % val, val
719 elif os.WIFSTOPPED(code):
725 elif os.WIFSTOPPED(code):
720 val = os.WSTOPSIG(code)
726 val = os.WSTOPSIG(code)
721 return _("stopped by signal %d") % val, val
727 return _("stopped by signal %d") % val, val
722 raise ValueError(_("invalid exit code"))
728 raise ValueError(_("invalid exit code"))
723
729
724 def opener(base, audit=True):
730 def opener(base, audit=True):
725 """
731 """
726 return a function that opens files relative to base
732 return a function that opens files relative to base
727
733
728 this function is used to hide the details of COW semantics and
734 this function is used to hide the details of COW semantics and
729 remote file access from higher level code.
735 remote file access from higher level code.
730 """
736 """
731 p = base
737 p = base
732 audit_p = audit
738 audit_p = audit
733
739
734 def mktempcopy(name):
740 def mktempcopy(name):
735 d, fn = os.path.split(name)
741 d, fn = os.path.split(name)
736 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
742 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
737 os.close(fd)
743 os.close(fd)
738 ofp = posixfile(temp, "wb")
744 ofp = posixfile(temp, "wb")
739 try:
745 try:
740 try:
746 try:
741 ifp = posixfile(name, "rb")
747 ifp = posixfile(name, "rb")
742 except IOError, inst:
748 except IOError, inst:
743 if not getattr(inst, 'filename', None):
749 if not getattr(inst, 'filename', None):
744 inst.filename = name
750 inst.filename = name
745 raise
751 raise
746 for chunk in filechunkiter(ifp):
752 for chunk in filechunkiter(ifp):
747 ofp.write(chunk)
753 ofp.write(chunk)
748 ifp.close()
754 ifp.close()
749 ofp.close()
755 ofp.close()
750 except:
756 except:
751 try: os.unlink(temp)
757 try: os.unlink(temp)
752 except: pass
758 except: pass
753 raise
759 raise
754 st = os.lstat(name)
760 st = os.lstat(name)
755 os.chmod(temp, st.st_mode)
761 os.chmod(temp, st.st_mode)
756 return temp
762 return temp
757
763
758 class atomictempfile(posixfile):
764 class atomictempfile(posixfile):
759 """the file will only be copied when rename is called"""
765 """the file will only be copied when rename is called"""
760 def __init__(self, name, mode):
766 def __init__(self, name, mode):
761 self.__name = name
767 self.__name = name
762 self.temp = mktempcopy(name)
768 self.temp = mktempcopy(name)
763 posixfile.__init__(self, self.temp, mode)
769 posixfile.__init__(self, self.temp, mode)
764 def rename(self):
770 def rename(self):
765 if not self.closed:
771 if not self.closed:
766 posixfile.close(self)
772 posixfile.close(self)
767 rename(self.temp, localpath(self.__name))
773 rename(self.temp, localpath(self.__name))
768 def __del__(self):
774 def __del__(self):
769 if not self.closed:
775 if not self.closed:
770 try:
776 try:
771 os.unlink(self.temp)
777 os.unlink(self.temp)
772 except: pass
778 except: pass
773 posixfile.close(self)
779 posixfile.close(self)
774
780
775 class atomicfile(atomictempfile):
781 class atomicfile(atomictempfile):
776 """the file will only be copied on close"""
782 """the file will only be copied on close"""
777 def __init__(self, name, mode):
783 def __init__(self, name, mode):
778 atomictempfile.__init__(self, name, mode)
784 atomictempfile.__init__(self, name, mode)
779 def close(self):
785 def close(self):
780 self.rename()
786 self.rename()
781 def __del__(self):
787 def __del__(self):
782 self.rename()
788 self.rename()
783
789
784 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
790 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
785 if audit_p:
791 if audit_p:
786 audit_path(path)
792 audit_path(path)
787 f = os.path.join(p, path)
793 f = os.path.join(p, path)
788
794
789 if not text:
795 if not text:
790 mode += "b" # for that other OS
796 mode += "b" # for that other OS
791
797
792 if mode[0] != "r":
798 if mode[0] != "r":
793 try:
799 try:
794 nlink = nlinks(f)
800 nlink = nlinks(f)
795 except OSError:
801 except OSError:
796 d = os.path.dirname(f)
802 d = os.path.dirname(f)
797 if not os.path.isdir(d):
803 if not os.path.isdir(d):
798 os.makedirs(d)
804 os.makedirs(d)
799 else:
805 else:
800 if atomic:
806 if atomic:
801 return atomicfile(f, mode)
807 return atomicfile(f, mode)
802 elif atomictemp:
808 elif atomictemp:
803 return atomictempfile(f, mode)
809 return atomictempfile(f, mode)
804 if nlink > 1:
810 if nlink > 1:
805 rename(mktempcopy(f), f)
811 rename(mktempcopy(f), f)
806 return posixfile(f, mode)
812 return posixfile(f, mode)
807
813
808 return o
814 return o
809
815
810 class chunkbuffer(object):
816 class chunkbuffer(object):
811 """Allow arbitrary sized chunks of data to be efficiently read from an
817 """Allow arbitrary sized chunks of data to be efficiently read from an
812 iterator over chunks of arbitrary size."""
818 iterator over chunks of arbitrary size."""
813
819
814 def __init__(self, in_iter, targetsize = 2**16):
820 def __init__(self, in_iter, targetsize = 2**16):
815 """in_iter is the iterator that's iterating over the input chunks.
821 """in_iter is the iterator that's iterating over the input chunks.
816 targetsize is how big a buffer to try to maintain."""
822 targetsize is how big a buffer to try to maintain."""
817 self.in_iter = iter(in_iter)
823 self.in_iter = iter(in_iter)
818 self.buf = ''
824 self.buf = ''
819 self.targetsize = int(targetsize)
825 self.targetsize = int(targetsize)
820 if self.targetsize <= 0:
826 if self.targetsize <= 0:
821 raise ValueError(_("targetsize must be greater than 0, was %d") %
827 raise ValueError(_("targetsize must be greater than 0, was %d") %
822 targetsize)
828 targetsize)
823 self.iterempty = False
829 self.iterempty = False
824
830
825 def fillbuf(self):
831 def fillbuf(self):
826 """Ignore target size; read every chunk from iterator until empty."""
832 """Ignore target size; read every chunk from iterator until empty."""
827 if not self.iterempty:
833 if not self.iterempty:
828 collector = cStringIO.StringIO()
834 collector = cStringIO.StringIO()
829 collector.write(self.buf)
835 collector.write(self.buf)
830 for ch in self.in_iter:
836 for ch in self.in_iter:
831 collector.write(ch)
837 collector.write(ch)
832 self.buf = collector.getvalue()
838 self.buf = collector.getvalue()
833 self.iterempty = True
839 self.iterempty = True
834
840
835 def read(self, l):
841 def read(self, l):
836 """Read L bytes of data from the iterator of chunks of data.
842 """Read L bytes of data from the iterator of chunks of data.
837 Returns less than L bytes if the iterator runs dry."""
843 Returns less than L bytes if the iterator runs dry."""
838 if l > len(self.buf) and not self.iterempty:
844 if l > len(self.buf) and not self.iterempty:
839 # Clamp to a multiple of self.targetsize
845 # Clamp to a multiple of self.targetsize
840 targetsize = self.targetsize * ((l // self.targetsize) + 1)
846 targetsize = self.targetsize * ((l // self.targetsize) + 1)
841 collector = cStringIO.StringIO()
847 collector = cStringIO.StringIO()
842 collector.write(self.buf)
848 collector.write(self.buf)
843 collected = len(self.buf)
849 collected = len(self.buf)
844 for chunk in self.in_iter:
850 for chunk in self.in_iter:
845 collector.write(chunk)
851 collector.write(chunk)
846 collected += len(chunk)
852 collected += len(chunk)
847 if collected >= targetsize:
853 if collected >= targetsize:
848 break
854 break
849 if collected < targetsize:
855 if collected < targetsize:
850 self.iterempty = True
856 self.iterempty = True
851 self.buf = collector.getvalue()
857 self.buf = collector.getvalue()
852 s, self.buf = self.buf[:l], buffer(self.buf, l)
858 s, self.buf = self.buf[:l], buffer(self.buf, l)
853 return s
859 return s
854
860
855 def filechunkiter(f, size=65536, limit=None):
861 def filechunkiter(f, size=65536, limit=None):
856 """Create a generator that produces the data in the file size
862 """Create a generator that produces the data in the file size
857 (default 65536) bytes at a time, up to optional limit (default is
863 (default 65536) bytes at a time, up to optional limit (default is
858 to read all data). Chunks may be less than size bytes if the
864 to read all data). Chunks may be less than size bytes if the
859 chunk is the last chunk in the file, or the file is a socket or
865 chunk is the last chunk in the file, or the file is a socket or
860 some other type of file that sometimes reads less data than is
866 some other type of file that sometimes reads less data than is
861 requested."""
867 requested."""
862 assert size >= 0
868 assert size >= 0
863 assert limit is None or limit >= 0
869 assert limit is None or limit >= 0
864 while True:
870 while True:
865 if limit is None: nbytes = size
871 if limit is None: nbytes = size
866 else: nbytes = min(limit, size)
872 else: nbytes = min(limit, size)
867 s = nbytes and f.read(nbytes)
873 s = nbytes and f.read(nbytes)
868 if not s: break
874 if not s: break
869 if limit: limit -= len(s)
875 if limit: limit -= len(s)
870 yield s
876 yield s
871
877
872 def makedate():
878 def makedate():
873 lt = time.localtime()
879 lt = time.localtime()
874 if lt[8] == 1 and time.daylight:
880 if lt[8] == 1 and time.daylight:
875 tz = time.altzone
881 tz = time.altzone
876 else:
882 else:
877 tz = time.timezone
883 tz = time.timezone
878 return time.mktime(lt), tz
884 return time.mktime(lt), tz
879
885
880 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
886 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
881 """represent a (unixtime, offset) tuple as a localized time.
887 """represent a (unixtime, offset) tuple as a localized time.
882 unixtime is seconds since the epoch, and offset is the time zone's
888 unixtime is seconds since the epoch, and offset is the time zone's
883 number of seconds away from UTC. if timezone is false, do not
889 number of seconds away from UTC. if timezone is false, do not
884 append time zone to string."""
890 append time zone to string."""
885 t, tz = date or makedate()
891 t, tz = date or makedate()
886 s = time.strftime(format, time.gmtime(float(t) - tz))
892 s = time.strftime(format, time.gmtime(float(t) - tz))
887 if timezone:
893 if timezone:
888 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
894 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
889 return s
895 return s
890
896
891 def strdate(string, format='%a %b %d %H:%M:%S %Y'):
897 def strdate(string, format='%a %b %d %H:%M:%S %Y'):
892 """parse a localized time string and return a (unixtime, offset) tuple.
898 """parse a localized time string and return a (unixtime, offset) tuple.
893 if the string cannot be parsed, ValueError is raised."""
899 if the string cannot be parsed, ValueError is raised."""
894 def hastimezone(string):
900 def hastimezone(string):
895 return (string[-4:].isdigit() and
901 return (string[-4:].isdigit() and
896 (string[-5] == '+' or string[-5] == '-') and
902 (string[-5] == '+' or string[-5] == '-') and
897 string[-6].isspace())
903 string[-6].isspace())
898
904
899 if hastimezone(string):
905 if hastimezone(string):
900 date, tz = string[:-6], string[-5:]
906 date, tz = string[:-6], string[-5:]
901 tz = int(tz)
907 tz = int(tz)
902 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
908 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
903 else:
909 else:
904 date, offset = string, 0
910 date, offset = string, 0
905 when = int(time.mktime(time.strptime(date, format))) + offset
911 when = int(time.mktime(time.strptime(date, format))) + offset
906 return when, offset
912 return when, offset
907
913
908 def parsedate(string, formats=None):
914 def parsedate(string, formats=None):
909 """parse a localized time string and return a (unixtime, offset) tuple.
915 """parse a localized time string and return a (unixtime, offset) tuple.
910 The date may be a "unixtime offset" string or in one of the specified
916 The date may be a "unixtime offset" string or in one of the specified
911 formats."""
917 formats."""
912 if not formats:
918 if not formats:
913 formats = defaultdateformats
919 formats = defaultdateformats
914 try:
920 try:
915 when, offset = map(int, string.split(' '))
921 when, offset = map(int, string.split(' '))
916 except ValueError:
922 except ValueError:
917 for format in formats:
923 for format in formats:
918 try:
924 try:
919 when, offset = strdate(string, format)
925 when, offset = strdate(string, format)
920 except ValueError:
926 except ValueError:
921 pass
927 pass
922 else:
928 else:
923 break
929 break
924 else:
930 else:
925 raise ValueError(_('invalid date: %r') % string)
931 raise ValueError(_('invalid date: %r') % string)
926 # validate explicit (probably user-specified) date and
932 # validate explicit (probably user-specified) date and
927 # time zone offset. values must fit in signed 32 bits for
933 # time zone offset. values must fit in signed 32 bits for
928 # current 32-bit linux runtimes. timezones go from UTC-12
934 # current 32-bit linux runtimes. timezones go from UTC-12
929 # to UTC+14
935 # to UTC+14
930 if abs(when) > 0x7fffffff:
936 if abs(when) > 0x7fffffff:
931 raise ValueError(_('date exceeds 32 bits: %d') % when)
937 raise ValueError(_('date exceeds 32 bits: %d') % when)
932 if offset < -50400 or offset > 43200:
938 if offset < -50400 or offset > 43200:
933 raise ValueError(_('impossible time zone offset: %d') % offset)
939 raise ValueError(_('impossible time zone offset: %d') % offset)
934 return when, offset
940 return when, offset
935
941
936 def shortuser(user):
942 def shortuser(user):
937 """Return a short representation of a user name or email address."""
943 """Return a short representation of a user name or email address."""
938 f = user.find('@')
944 f = user.find('@')
939 if f >= 0:
945 if f >= 0:
940 user = user[:f]
946 user = user[:f]
941 f = user.find('<')
947 f = user.find('<')
942 if f >= 0:
948 if f >= 0:
943 user = user[f+1:]
949 user = user[f+1:]
944 return user
950 return user
945
951
946 def walkrepos(path):
952 def walkrepos(path):
947 '''yield every hg repository under path, recursively.'''
953 '''yield every hg repository under path, recursively.'''
948 def errhandler(err):
954 def errhandler(err):
949 if err.filename == path:
955 if err.filename == path:
950 raise err
956 raise err
951
957
952 for root, dirs, files in os.walk(path, onerror=errhandler):
958 for root, dirs, files in os.walk(path, onerror=errhandler):
953 for d in dirs:
959 for d in dirs:
954 if d == '.hg':
960 if d == '.hg':
955 yield root
961 yield root
956 dirs[:] = []
962 dirs[:] = []
957 break
963 break
958
964
959 _rcpath = None
965 _rcpath = None
960
966
961 def rcpath():
967 def rcpath():
962 '''return hgrc search path. if env var HGRCPATH is set, use it.
968 '''return hgrc search path. if env var HGRCPATH is set, use it.
963 for each item in path, if directory, use files ending in .rc,
969 for each item in path, if directory, use files ending in .rc,
964 else use item.
970 else use item.
965 make HGRCPATH empty to only look in .hg/hgrc of current repo.
971 make HGRCPATH empty to only look in .hg/hgrc of current repo.
966 if no HGRCPATH, use default os-specific path.'''
972 if no HGRCPATH, use default os-specific path.'''
967 global _rcpath
973 global _rcpath
968 if _rcpath is None:
974 if _rcpath is None:
969 if 'HGRCPATH' in os.environ:
975 if 'HGRCPATH' in os.environ:
970 _rcpath = []
976 _rcpath = []
971 for p in os.environ['HGRCPATH'].split(os.pathsep):
977 for p in os.environ['HGRCPATH'].split(os.pathsep):
972 if not p: continue
978 if not p: continue
973 if os.path.isdir(p):
979 if os.path.isdir(p):
974 for f in os.listdir(p):
980 for f in os.listdir(p):
975 if f.endswith('.rc'):
981 if f.endswith('.rc'):
976 _rcpath.append(os.path.join(p, f))
982 _rcpath.append(os.path.join(p, f))
977 else:
983 else:
978 _rcpath.append(p)
984 _rcpath.append(p)
979 else:
985 else:
980 _rcpath = os_rcpath()
986 _rcpath = os_rcpath()
981 return _rcpath
987 return _rcpath
982
988
983 def bytecount(nbytes):
989 def bytecount(nbytes):
984 '''return byte count formatted as readable string, with units'''
990 '''return byte count formatted as readable string, with units'''
985
991
986 units = (
992 units = (
987 (100, 1<<30, _('%.0f GB')),
993 (100, 1<<30, _('%.0f GB')),
988 (10, 1<<30, _('%.1f GB')),
994 (10, 1<<30, _('%.1f GB')),
989 (1, 1<<30, _('%.2f GB')),
995 (1, 1<<30, _('%.2f GB')),
990 (100, 1<<20, _('%.0f MB')),
996 (100, 1<<20, _('%.0f MB')),
991 (10, 1<<20, _('%.1f MB')),
997 (10, 1<<20, _('%.1f MB')),
992 (1, 1<<20, _('%.2f MB')),
998 (1, 1<<20, _('%.2f MB')),
993 (100, 1<<10, _('%.0f KB')),
999 (100, 1<<10, _('%.0f KB')),
994 (10, 1<<10, _('%.1f KB')),
1000 (10, 1<<10, _('%.1f KB')),
995 (1, 1<<10, _('%.2f KB')),
1001 (1, 1<<10, _('%.2f KB')),
996 (1, 1, _('%.0f bytes')),
1002 (1, 1, _('%.0f bytes')),
997 )
1003 )
998
1004
999 for multiplier, divisor, format in units:
1005 for multiplier, divisor, format in units:
1000 if nbytes >= divisor * multiplier:
1006 if nbytes >= divisor * multiplier:
1001 return format % (nbytes / float(divisor))
1007 return format % (nbytes / float(divisor))
1002 return units[-1][2] % nbytes
1008 return units[-1][2] % nbytes
1003
1009
1004 def drop_scheme(scheme, path):
1010 def drop_scheme(scheme, path):
1005 sc = scheme + ':'
1011 sc = scheme + ':'
1006 if path.startswith(sc):
1012 if path.startswith(sc):
1007 path = path[len(sc):]
1013 path = path[len(sc):]
1008 if path.startswith('//'):
1014 if path.startswith('//'):
1009 path = path[2:]
1015 path = path[2:]
1010 return path
1016 return path
General Comments 0
You need to be logged in to leave comments. Login now