##// END OF EJS Templates
Merge with crew-stable
Alexis S. L. Carvalho -
r4134:9dc64c84 merge default
parent child Browse files
Show More
@@ -1,2219 +1,2222 b''
1 # queue.py - patch queues for mercurial
1 # queue.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 '''patch management and development
8 '''patch management and development
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial import commands, cmdutil, hg, patch, revlog, util, changegroup
33 from mercurial import commands, cmdutil, hg, patch, revlog, util, changegroup
34 import os, sys, re, errno
34 import os, sys, re, errno
35
35
36 commands.norepo += " qclone qversion"
36 commands.norepo += " qclone qversion"
37
37
38 # Patch names looks like unix-file names.
38 # Patch names looks like unix-file names.
39 # They must be joinable with queue directory and result in the patch path.
39 # They must be joinable with queue directory and result in the patch path.
40 normname = util.normpath
40 normname = util.normpath
41
41
42 class statusentry:
42 class statusentry:
43 def __init__(self, rev, name=None):
43 def __init__(self, rev, name=None):
44 if not name:
44 if not name:
45 fields = rev.split(':', 1)
45 fields = rev.split(':', 1)
46 if len(fields) == 2:
46 if len(fields) == 2:
47 self.rev, self.name = fields
47 self.rev, self.name = fields
48 else:
48 else:
49 self.rev, self.name = None, None
49 self.rev, self.name = None, None
50 else:
50 else:
51 self.rev, self.name = rev, name
51 self.rev, self.name = rev, name
52
52
53 def __str__(self):
53 def __str__(self):
54 return self.rev + ':' + self.name
54 return self.rev + ':' + self.name
55
55
56 class queue:
56 class queue:
57 def __init__(self, ui, path, patchdir=None):
57 def __init__(self, ui, path, patchdir=None):
58 self.basepath = path
58 self.basepath = path
59 self.path = patchdir or os.path.join(path, "patches")
59 self.path = patchdir or os.path.join(path, "patches")
60 self.opener = util.opener(self.path)
60 self.opener = util.opener(self.path)
61 self.ui = ui
61 self.ui = ui
62 self.applied = []
62 self.applied = []
63 self.full_series = []
63 self.full_series = []
64 self.applied_dirty = 0
64 self.applied_dirty = 0
65 self.series_dirty = 0
65 self.series_dirty = 0
66 self.series_path = "series"
66 self.series_path = "series"
67 self.status_path = "status"
67 self.status_path = "status"
68 self.guards_path = "guards"
68 self.guards_path = "guards"
69 self.active_guards = None
69 self.active_guards = None
70 self.guards_dirty = False
70 self.guards_dirty = False
71 self._diffopts = None
71 self._diffopts = None
72
72
73 if os.path.exists(self.join(self.series_path)):
73 if os.path.exists(self.join(self.series_path)):
74 self.full_series = self.opener(self.series_path).read().splitlines()
74 self.full_series = self.opener(self.series_path).read().splitlines()
75 self.parse_series()
75 self.parse_series()
76
76
77 if os.path.exists(self.join(self.status_path)):
77 if os.path.exists(self.join(self.status_path)):
78 lines = self.opener(self.status_path).read().splitlines()
78 lines = self.opener(self.status_path).read().splitlines()
79 self.applied = [statusentry(l) for l in lines]
79 self.applied = [statusentry(l) for l in lines]
80
80
81 def diffopts(self):
81 def diffopts(self):
82 if self._diffopts is None:
82 if self._diffopts is None:
83 self._diffopts = patch.diffopts(self.ui)
83 self._diffopts = patch.diffopts(self.ui)
84 return self._diffopts
84 return self._diffopts
85
85
86 def join(self, *p):
86 def join(self, *p):
87 return os.path.join(self.path, *p)
87 return os.path.join(self.path, *p)
88
88
89 def find_series(self, patch):
89 def find_series(self, patch):
90 pre = re.compile("(\s*)([^#]+)")
90 pre = re.compile("(\s*)([^#]+)")
91 index = 0
91 index = 0
92 for l in self.full_series:
92 for l in self.full_series:
93 m = pre.match(l)
93 m = pre.match(l)
94 if m:
94 if m:
95 s = m.group(2)
95 s = m.group(2)
96 s = s.rstrip()
96 s = s.rstrip()
97 if s == patch:
97 if s == patch:
98 return index
98 return index
99 index += 1
99 index += 1
100 return None
100 return None
101
101
102 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
102 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
103
103
104 def parse_series(self):
104 def parse_series(self):
105 self.series = []
105 self.series = []
106 self.series_guards = []
106 self.series_guards = []
107 for l in self.full_series:
107 for l in self.full_series:
108 h = l.find('#')
108 h = l.find('#')
109 if h == -1:
109 if h == -1:
110 patch = l
110 patch = l
111 comment = ''
111 comment = ''
112 elif h == 0:
112 elif h == 0:
113 continue
113 continue
114 else:
114 else:
115 patch = l[:h]
115 patch = l[:h]
116 comment = l[h:]
116 comment = l[h:]
117 patch = patch.strip()
117 patch = patch.strip()
118 if patch:
118 if patch:
119 if patch in self.series:
119 if patch in self.series:
120 raise util.Abort(_('%s appears more than once in %s') %
120 raise util.Abort(_('%s appears more than once in %s') %
121 (patch, self.join(self.series_path)))
121 (patch, self.join(self.series_path)))
122 self.series.append(patch)
122 self.series.append(patch)
123 self.series_guards.append(self.guard_re.findall(comment))
123 self.series_guards.append(self.guard_re.findall(comment))
124
124
125 def check_guard(self, guard):
125 def check_guard(self, guard):
126 bad_chars = '# \t\r\n\f'
126 bad_chars = '# \t\r\n\f'
127 first = guard[0]
127 first = guard[0]
128 for c in '-+':
128 for c in '-+':
129 if first == c:
129 if first == c:
130 return (_('guard %r starts with invalid character: %r') %
130 return (_('guard %r starts with invalid character: %r') %
131 (guard, c))
131 (guard, c))
132 for c in bad_chars:
132 for c in bad_chars:
133 if c in guard:
133 if c in guard:
134 return _('invalid character in guard %r: %r') % (guard, c)
134 return _('invalid character in guard %r: %r') % (guard, c)
135
135
136 def set_active(self, guards):
136 def set_active(self, guards):
137 for guard in guards:
137 for guard in guards:
138 bad = self.check_guard(guard)
138 bad = self.check_guard(guard)
139 if bad:
139 if bad:
140 raise util.Abort(bad)
140 raise util.Abort(bad)
141 guards = dict.fromkeys(guards).keys()
141 guards = dict.fromkeys(guards).keys()
142 guards.sort()
142 guards.sort()
143 self.ui.debug('active guards: %s\n' % ' '.join(guards))
143 self.ui.debug('active guards: %s\n' % ' '.join(guards))
144 self.active_guards = guards
144 self.active_guards = guards
145 self.guards_dirty = True
145 self.guards_dirty = True
146
146
147 def active(self):
147 def active(self):
148 if self.active_guards is None:
148 if self.active_guards is None:
149 self.active_guards = []
149 self.active_guards = []
150 try:
150 try:
151 guards = self.opener(self.guards_path).read().split()
151 guards = self.opener(self.guards_path).read().split()
152 except IOError, err:
152 except IOError, err:
153 if err.errno != errno.ENOENT: raise
153 if err.errno != errno.ENOENT: raise
154 guards = []
154 guards = []
155 for i, guard in enumerate(guards):
155 for i, guard in enumerate(guards):
156 bad = self.check_guard(guard)
156 bad = self.check_guard(guard)
157 if bad:
157 if bad:
158 self.ui.warn('%s:%d: %s\n' %
158 self.ui.warn('%s:%d: %s\n' %
159 (self.join(self.guards_path), i + 1, bad))
159 (self.join(self.guards_path), i + 1, bad))
160 else:
160 else:
161 self.active_guards.append(guard)
161 self.active_guards.append(guard)
162 return self.active_guards
162 return self.active_guards
163
163
164 def set_guards(self, idx, guards):
164 def set_guards(self, idx, guards):
165 for g in guards:
165 for g in guards:
166 if len(g) < 2:
166 if len(g) < 2:
167 raise util.Abort(_('guard %r too short') % g)
167 raise util.Abort(_('guard %r too short') % g)
168 if g[0] not in '-+':
168 if g[0] not in '-+':
169 raise util.Abort(_('guard %r starts with invalid char') % g)
169 raise util.Abort(_('guard %r starts with invalid char') % g)
170 bad = self.check_guard(g[1:])
170 bad = self.check_guard(g[1:])
171 if bad:
171 if bad:
172 raise util.Abort(bad)
172 raise util.Abort(bad)
173 drop = self.guard_re.sub('', self.full_series[idx])
173 drop = self.guard_re.sub('', self.full_series[idx])
174 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
174 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
175 self.parse_series()
175 self.parse_series()
176 self.series_dirty = True
176 self.series_dirty = True
177
177
178 def pushable(self, idx):
178 def pushable(self, idx):
179 if isinstance(idx, str):
179 if isinstance(idx, str):
180 idx = self.series.index(idx)
180 idx = self.series.index(idx)
181 patchguards = self.series_guards[idx]
181 patchguards = self.series_guards[idx]
182 if not patchguards:
182 if not patchguards:
183 return True, None
183 return True, None
184 default = False
184 default = False
185 guards = self.active()
185 guards = self.active()
186 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
186 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
187 if exactneg:
187 if exactneg:
188 return False, exactneg[0]
188 return False, exactneg[0]
189 pos = [g for g in patchguards if g[0] == '+']
189 pos = [g for g in patchguards if g[0] == '+']
190 exactpos = [g for g in pos if g[1:] in guards]
190 exactpos = [g for g in pos if g[1:] in guards]
191 if pos:
191 if pos:
192 if exactpos:
192 if exactpos:
193 return True, exactpos[0]
193 return True, exactpos[0]
194 return False, pos
194 return False, pos
195 return True, ''
195 return True, ''
196
196
197 def explain_pushable(self, idx, all_patches=False):
197 def explain_pushable(self, idx, all_patches=False):
198 write = all_patches and self.ui.write or self.ui.warn
198 write = all_patches and self.ui.write or self.ui.warn
199 if all_patches or self.ui.verbose:
199 if all_patches or self.ui.verbose:
200 if isinstance(idx, str):
200 if isinstance(idx, str):
201 idx = self.series.index(idx)
201 idx = self.series.index(idx)
202 pushable, why = self.pushable(idx)
202 pushable, why = self.pushable(idx)
203 if all_patches and pushable:
203 if all_patches and pushable:
204 if why is None:
204 if why is None:
205 write(_('allowing %s - no guards in effect\n') %
205 write(_('allowing %s - no guards in effect\n') %
206 self.series[idx])
206 self.series[idx])
207 else:
207 else:
208 if not why:
208 if not why:
209 write(_('allowing %s - no matching negative guards\n') %
209 write(_('allowing %s - no matching negative guards\n') %
210 self.series[idx])
210 self.series[idx])
211 else:
211 else:
212 write(_('allowing %s - guarded by %r\n') %
212 write(_('allowing %s - guarded by %r\n') %
213 (self.series[idx], why))
213 (self.series[idx], why))
214 if not pushable:
214 if not pushable:
215 if why:
215 if why:
216 write(_('skipping %s - guarded by %r\n') %
216 write(_('skipping %s - guarded by %r\n') %
217 (self.series[idx], why))
217 (self.series[idx], why))
218 else:
218 else:
219 write(_('skipping %s - no matching guards\n') %
219 write(_('skipping %s - no matching guards\n') %
220 self.series[idx])
220 self.series[idx])
221
221
222 def save_dirty(self):
222 def save_dirty(self):
223 def write_list(items, path):
223 def write_list(items, path):
224 fp = self.opener(path, 'w')
224 fp = self.opener(path, 'w')
225 for i in items:
225 for i in items:
226 print >> fp, i
226 print >> fp, i
227 fp.close()
227 fp.close()
228 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
228 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
229 if self.series_dirty: write_list(self.full_series, self.series_path)
229 if self.series_dirty: write_list(self.full_series, self.series_path)
230 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
230 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
231
231
232 def readheaders(self, patch):
232 def readheaders(self, patch):
233 def eatdiff(lines):
233 def eatdiff(lines):
234 while lines:
234 while lines:
235 l = lines[-1]
235 l = lines[-1]
236 if (l.startswith("diff -") or
236 if (l.startswith("diff -") or
237 l.startswith("Index:") or
237 l.startswith("Index:") or
238 l.startswith("===========")):
238 l.startswith("===========")):
239 del lines[-1]
239 del lines[-1]
240 else:
240 else:
241 break
241 break
242 def eatempty(lines):
242 def eatempty(lines):
243 while lines:
243 while lines:
244 l = lines[-1]
244 l = lines[-1]
245 if re.match('\s*$', l):
245 if re.match('\s*$', l):
246 del lines[-1]
246 del lines[-1]
247 else:
247 else:
248 break
248 break
249
249
250 pf = self.join(patch)
250 pf = self.join(patch)
251 message = []
251 message = []
252 comments = []
252 comments = []
253 user = None
253 user = None
254 date = None
254 date = None
255 format = None
255 format = None
256 subject = None
256 subject = None
257 diffstart = 0
257 diffstart = 0
258
258
259 for line in file(pf):
259 for line in file(pf):
260 line = line.rstrip()
260 line = line.rstrip()
261 if line.startswith('diff --git'):
261 if line.startswith('diff --git'):
262 diffstart = 2
262 diffstart = 2
263 break
263 break
264 if diffstart:
264 if diffstart:
265 if line.startswith('+++ '):
265 if line.startswith('+++ '):
266 diffstart = 2
266 diffstart = 2
267 break
267 break
268 if line.startswith("--- "):
268 if line.startswith("--- "):
269 diffstart = 1
269 diffstart = 1
270 continue
270 continue
271 elif format == "hgpatch":
271 elif format == "hgpatch":
272 # parse values when importing the result of an hg export
272 # parse values when importing the result of an hg export
273 if line.startswith("# User "):
273 if line.startswith("# User "):
274 user = line[7:]
274 user = line[7:]
275 elif line.startswith("# Date "):
275 elif line.startswith("# Date "):
276 date = line[7:]
276 date = line[7:]
277 elif not line.startswith("# ") and line:
277 elif not line.startswith("# ") and line:
278 message.append(line)
278 message.append(line)
279 format = None
279 format = None
280 elif line == '# HG changeset patch':
280 elif line == '# HG changeset patch':
281 format = "hgpatch"
281 format = "hgpatch"
282 elif (format != "tagdone" and (line.startswith("Subject: ") or
282 elif (format != "tagdone" and (line.startswith("Subject: ") or
283 line.startswith("subject: "))):
283 line.startswith("subject: "))):
284 subject = line[9:]
284 subject = line[9:]
285 format = "tag"
285 format = "tag"
286 elif (format != "tagdone" and (line.startswith("From: ") or
286 elif (format != "tagdone" and (line.startswith("From: ") or
287 line.startswith("from: "))):
287 line.startswith("from: "))):
288 user = line[6:]
288 user = line[6:]
289 format = "tag"
289 format = "tag"
290 elif format == "tag" and line == "":
290 elif format == "tag" and line == "":
291 # when looking for tags (subject: from: etc) they
291 # when looking for tags (subject: from: etc) they
292 # end once you find a blank line in the source
292 # end once you find a blank line in the source
293 format = "tagdone"
293 format = "tagdone"
294 elif message or line:
294 elif message or line:
295 message.append(line)
295 message.append(line)
296 comments.append(line)
296 comments.append(line)
297
297
298 eatdiff(message)
298 eatdiff(message)
299 eatdiff(comments)
299 eatdiff(comments)
300 eatempty(message)
300 eatempty(message)
301 eatempty(comments)
301 eatempty(comments)
302
302
303 # make sure message isn't empty
303 # make sure message isn't empty
304 if format and format.startswith("tag") and subject:
304 if format and format.startswith("tag") and subject:
305 message.insert(0, "")
305 message.insert(0, "")
306 message.insert(0, subject)
306 message.insert(0, subject)
307 return (message, comments, user, date, diffstart > 1)
307 return (message, comments, user, date, diffstart > 1)
308
308
309 def printdiff(self, repo, node1, node2=None, files=None,
309 def printdiff(self, repo, node1, node2=None, files=None,
310 fp=None, changes=None, opts={}):
310 fp=None, changes=None, opts={}):
311 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
311 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
312
312
313 patch.diff(repo, node1, node2, fns, match=matchfn,
313 patch.diff(repo, node1, node2, fns, match=matchfn,
314 fp=fp, changes=changes, opts=self.diffopts())
314 fp=fp, changes=changes, opts=self.diffopts())
315
315
316 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
316 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
317 # first try just applying the patch
317 # first try just applying the patch
318 (err, n) = self.apply(repo, [ patch ], update_status=False,
318 (err, n) = self.apply(repo, [ patch ], update_status=False,
319 strict=True, merge=rev, wlock=wlock)
319 strict=True, merge=rev, wlock=wlock)
320
320
321 if err == 0:
321 if err == 0:
322 return (err, n)
322 return (err, n)
323
323
324 if n is None:
324 if n is None:
325 raise util.Abort(_("apply failed for patch %s") % patch)
325 raise util.Abort(_("apply failed for patch %s") % patch)
326
326
327 self.ui.warn("patch didn't work out, merging %s\n" % patch)
327 self.ui.warn("patch didn't work out, merging %s\n" % patch)
328
328
329 # apply failed, strip away that rev and merge.
329 # apply failed, strip away that rev and merge.
330 hg.clean(repo, head, wlock=wlock)
330 hg.clean(repo, head, wlock=wlock)
331 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
331 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
332
332
333 ctx = repo.changectx(rev)
333 ctx = repo.changectx(rev)
334 ret = hg.merge(repo, rev, wlock=wlock)
334 ret = hg.merge(repo, rev, wlock=wlock)
335 if ret:
335 if ret:
336 raise util.Abort(_("update returned %d") % ret)
336 raise util.Abort(_("update returned %d") % ret)
337 n = repo.commit(None, ctx.description(), ctx.user(),
337 n = repo.commit(None, ctx.description(), ctx.user(),
338 force=1, wlock=wlock)
338 force=1, wlock=wlock)
339 if n == None:
339 if n == None:
340 raise util.Abort(_("repo commit failed"))
340 raise util.Abort(_("repo commit failed"))
341 try:
341 try:
342 message, comments, user, date, patchfound = mergeq.readheaders(patch)
342 message, comments, user, date, patchfound = mergeq.readheaders(patch)
343 except:
343 except:
344 raise util.Abort(_("unable to read %s") % patch)
344 raise util.Abort(_("unable to read %s") % patch)
345
345
346 patchf = self.opener(patch, "w")
346 patchf = self.opener(patch, "w")
347 if comments:
347 if comments:
348 comments = "\n".join(comments) + '\n\n'
348 comments = "\n".join(comments) + '\n\n'
349 patchf.write(comments)
349 patchf.write(comments)
350 self.printdiff(repo, head, n, fp=patchf)
350 self.printdiff(repo, head, n, fp=patchf)
351 patchf.close()
351 patchf.close()
352 return (0, n)
352 return (0, n)
353
353
354 def qparents(self, repo, rev=None):
354 def qparents(self, repo, rev=None):
355 if rev is None:
355 if rev is None:
356 (p1, p2) = repo.dirstate.parents()
356 (p1, p2) = repo.dirstate.parents()
357 if p2 == revlog.nullid:
357 if p2 == revlog.nullid:
358 return p1
358 return p1
359 if len(self.applied) == 0:
359 if len(self.applied) == 0:
360 return None
360 return None
361 return revlog.bin(self.applied[-1].rev)
361 return revlog.bin(self.applied[-1].rev)
362 pp = repo.changelog.parents(rev)
362 pp = repo.changelog.parents(rev)
363 if pp[1] != revlog.nullid:
363 if pp[1] != revlog.nullid:
364 arevs = [ x.rev for x in self.applied ]
364 arevs = [ x.rev for x in self.applied ]
365 p0 = revlog.hex(pp[0])
365 p0 = revlog.hex(pp[0])
366 p1 = revlog.hex(pp[1])
366 p1 = revlog.hex(pp[1])
367 if p0 in arevs:
367 if p0 in arevs:
368 return pp[0]
368 return pp[0]
369 if p1 in arevs:
369 if p1 in arevs:
370 return pp[1]
370 return pp[1]
371 return pp[0]
371 return pp[0]
372
372
373 def mergepatch(self, repo, mergeq, series, wlock):
373 def mergepatch(self, repo, mergeq, series, wlock):
374 if len(self.applied) == 0:
374 if len(self.applied) == 0:
375 # each of the patches merged in will have two parents. This
375 # each of the patches merged in will have two parents. This
376 # can confuse the qrefresh, qdiff, and strip code because it
376 # can confuse the qrefresh, qdiff, and strip code because it
377 # needs to know which parent is actually in the patch queue.
377 # needs to know which parent is actually in the patch queue.
378 # so, we insert a merge marker with only one parent. This way
378 # so, we insert a merge marker with only one parent. This way
379 # the first patch in the queue is never a merge patch
379 # the first patch in the queue is never a merge patch
380 #
380 #
381 pname = ".hg.patches.merge.marker"
381 pname = ".hg.patches.merge.marker"
382 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
382 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
383 wlock=wlock)
383 wlock=wlock)
384 self.applied.append(statusentry(revlog.hex(n), pname))
384 self.applied.append(statusentry(revlog.hex(n), pname))
385 self.applied_dirty = 1
385 self.applied_dirty = 1
386
386
387 head = self.qparents(repo)
387 head = self.qparents(repo)
388
388
389 for patch in series:
389 for patch in series:
390 patch = mergeq.lookup(patch, strict=True)
390 patch = mergeq.lookup(patch, strict=True)
391 if not patch:
391 if not patch:
392 self.ui.warn("patch %s does not exist\n" % patch)
392 self.ui.warn("patch %s does not exist\n" % patch)
393 return (1, None)
393 return (1, None)
394 pushable, reason = self.pushable(patch)
394 pushable, reason = self.pushable(patch)
395 if not pushable:
395 if not pushable:
396 self.explain_pushable(patch, all_patches=True)
396 self.explain_pushable(patch, all_patches=True)
397 continue
397 continue
398 info = mergeq.isapplied(patch)
398 info = mergeq.isapplied(patch)
399 if not info:
399 if not info:
400 self.ui.warn("patch %s is not applied\n" % patch)
400 self.ui.warn("patch %s is not applied\n" % patch)
401 return (1, None)
401 return (1, None)
402 rev = revlog.bin(info[1])
402 rev = revlog.bin(info[1])
403 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
403 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
404 if head:
404 if head:
405 self.applied.append(statusentry(revlog.hex(head), patch))
405 self.applied.append(statusentry(revlog.hex(head), patch))
406 self.applied_dirty = 1
406 self.applied_dirty = 1
407 if err:
407 if err:
408 return (err, head)
408 return (err, head)
409 return (0, head)
409 return (0, head)
410
410
411 def patch(self, repo, patchfile):
411 def patch(self, repo, patchfile):
412 '''Apply patchfile to the working directory.
412 '''Apply patchfile to the working directory.
413 patchfile: file name of patch'''
413 patchfile: file name of patch'''
414 files = {}
414 files = {}
415 try:
415 try:
416 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
416 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
417 files=files)
417 files=files)
418 except Exception, inst:
418 except Exception, inst:
419 self.ui.note(str(inst) + '\n')
419 self.ui.note(str(inst) + '\n')
420 if not self.ui.verbose:
420 if not self.ui.verbose:
421 self.ui.warn("patch failed, unable to continue (try -v)\n")
421 self.ui.warn("patch failed, unable to continue (try -v)\n")
422 return (False, files, False)
422 return (False, files, False)
423
423
424 return (True, files, fuzz)
424 return (True, files, fuzz)
425
425
426 def apply(self, repo, series, list=False, update_status=True,
426 def apply(self, repo, series, list=False, update_status=True,
427 strict=False, patchdir=None, merge=None, wlock=None):
427 strict=False, patchdir=None, merge=None, wlock=None):
428 # TODO unify with commands.py
428 # TODO unify with commands.py
429 if not patchdir:
429 if not patchdir:
430 patchdir = self.path
430 patchdir = self.path
431 err = 0
431 err = 0
432 if not wlock:
432 if not wlock:
433 wlock = repo.wlock()
433 wlock = repo.wlock()
434 lock = repo.lock()
434 lock = repo.lock()
435 tr = repo.transaction()
435 tr = repo.transaction()
436 n = None
436 n = None
437 for patchname in series:
437 for patchname in series:
438 pushable, reason = self.pushable(patchname)
438 pushable, reason = self.pushable(patchname)
439 if not pushable:
439 if not pushable:
440 self.explain_pushable(patchname, all_patches=True)
440 self.explain_pushable(patchname, all_patches=True)
441 continue
441 continue
442 self.ui.warn("applying %s\n" % patchname)
442 self.ui.warn("applying %s\n" % patchname)
443 pf = os.path.join(patchdir, patchname)
443 pf = os.path.join(patchdir, patchname)
444
444
445 try:
445 try:
446 message, comments, user, date, patchfound = self.readheaders(patchname)
446 message, comments, user, date, patchfound = self.readheaders(patchname)
447 except:
447 except:
448 self.ui.warn("Unable to read %s\n" % patchname)
448 self.ui.warn("Unable to read %s\n" % patchname)
449 err = 1
449 err = 1
450 break
450 break
451
451
452 if not message:
452 if not message:
453 message = "imported patch %s\n" % patchname
453 message = "imported patch %s\n" % patchname
454 else:
454 else:
455 if list:
455 if list:
456 message.append("\nimported patch %s" % patchname)
456 message.append("\nimported patch %s" % patchname)
457 message = '\n'.join(message)
457 message = '\n'.join(message)
458
458
459 (patcherr, files, fuzz) = self.patch(repo, pf)
459 (patcherr, files, fuzz) = self.patch(repo, pf)
460 patcherr = not patcherr
460 patcherr = not patcherr
461
461
462 if merge and files:
462 if merge and files:
463 # Mark as merged and update dirstate parent info
463 # Mark as merged and update dirstate parent info
464 repo.dirstate.update(repo.dirstate.filterfiles(files.keys()), 'm')
464 repo.dirstate.update(repo.dirstate.filterfiles(files.keys()), 'm')
465 p1, p2 = repo.dirstate.parents()
465 p1, p2 = repo.dirstate.parents()
466 repo.dirstate.setparents(p1, merge)
466 repo.dirstate.setparents(p1, merge)
467 files = patch.updatedir(self.ui, repo, files, wlock=wlock)
467 files = patch.updatedir(self.ui, repo, files, wlock=wlock)
468 n = repo.commit(files, message, user, date, force=1, lock=lock,
468 n = repo.commit(files, message, user, date, force=1, lock=lock,
469 wlock=wlock)
469 wlock=wlock)
470
470
471 if n == None:
471 if n == None:
472 raise util.Abort(_("repo commit failed"))
472 raise util.Abort(_("repo commit failed"))
473
473
474 if update_status:
474 if update_status:
475 self.applied.append(statusentry(revlog.hex(n), patchname))
475 self.applied.append(statusentry(revlog.hex(n), patchname))
476
476
477 if patcherr:
477 if patcherr:
478 if not patchfound:
478 if not patchfound:
479 self.ui.warn("patch %s is empty\n" % patchname)
479 self.ui.warn("patch %s is empty\n" % patchname)
480 err = 0
480 err = 0
481 else:
481 else:
482 self.ui.warn("patch failed, rejects left in working dir\n")
482 self.ui.warn("patch failed, rejects left in working dir\n")
483 err = 1
483 err = 1
484 break
484 break
485
485
486 if fuzz and strict:
486 if fuzz and strict:
487 self.ui.warn("fuzz found when applying patch, stopping\n")
487 self.ui.warn("fuzz found when applying patch, stopping\n")
488 err = 1
488 err = 1
489 break
489 break
490 tr.close()
490 tr.close()
491 return (err, n)
491 return (err, n)
492
492
493 def delete(self, repo, patches, opts):
493 def delete(self, repo, patches, opts):
494 realpatches = []
494 realpatches = []
495 for patch in patches:
495 for patch in patches:
496 patch = self.lookup(patch, strict=True)
496 patch = self.lookup(patch, strict=True)
497 info = self.isapplied(patch)
497 info = self.isapplied(patch)
498 if info:
498 if info:
499 raise util.Abort(_("cannot delete applied patch %s") % patch)
499 raise util.Abort(_("cannot delete applied patch %s") % patch)
500 if patch not in self.series:
500 if patch not in self.series:
501 raise util.Abort(_("patch %s not in series file") % patch)
501 raise util.Abort(_("patch %s not in series file") % patch)
502 realpatches.append(patch)
502 realpatches.append(patch)
503
503
504 appliedbase = 0
504 appliedbase = 0
505 if opts.get('rev'):
505 if opts.get('rev'):
506 if not self.applied:
506 if not self.applied:
507 raise util.Abort(_('no patches applied'))
507 raise util.Abort(_('no patches applied'))
508 revs = cmdutil.revrange(repo, opts['rev'])
508 revs = cmdutil.revrange(repo, opts['rev'])
509 if len(revs) > 1 and revs[0] > revs[1]:
509 if len(revs) > 1 and revs[0] > revs[1]:
510 revs.reverse()
510 revs.reverse()
511 for rev in revs:
511 for rev in revs:
512 if appliedbase >= len(self.applied):
512 if appliedbase >= len(self.applied):
513 raise util.Abort(_("revision %d is not managed") % rev)
513 raise util.Abort(_("revision %d is not managed") % rev)
514
514
515 base = revlog.bin(self.applied[appliedbase].rev)
515 base = revlog.bin(self.applied[appliedbase].rev)
516 node = repo.changelog.node(rev)
516 node = repo.changelog.node(rev)
517 if node != base:
517 if node != base:
518 raise util.Abort(_("cannot delete revision %d above "
518 raise util.Abort(_("cannot delete revision %d above "
519 "applied patches") % rev)
519 "applied patches") % rev)
520 realpatches.append(self.applied[appliedbase].name)
520 realpatches.append(self.applied[appliedbase].name)
521 appliedbase += 1
521 appliedbase += 1
522
522
523 if not opts.get('keep'):
523 if not opts.get('keep'):
524 r = self.qrepo()
524 r = self.qrepo()
525 if r:
525 if r:
526 r.remove(realpatches, True)
526 r.remove(realpatches, True)
527 else:
527 else:
528 for p in realpatches:
528 for p in realpatches:
529 os.unlink(self.join(p))
529 os.unlink(self.join(p))
530
530
531 if appliedbase:
531 if appliedbase:
532 del self.applied[:appliedbase]
532 del self.applied[:appliedbase]
533 self.applied_dirty = 1
533 self.applied_dirty = 1
534 indices = [self.find_series(p) for p in realpatches]
534 indices = [self.find_series(p) for p in realpatches]
535 indices.sort()
535 indices.sort()
536 for i in indices[-1::-1]:
536 for i in indices[-1::-1]:
537 del self.full_series[i]
537 del self.full_series[i]
538 self.parse_series()
538 self.parse_series()
539 self.series_dirty = 1
539 self.series_dirty = 1
540
540
541 def check_toppatch(self, repo):
541 def check_toppatch(self, repo):
542 if len(self.applied) > 0:
542 if len(self.applied) > 0:
543 top = revlog.bin(self.applied[-1].rev)
543 top = revlog.bin(self.applied[-1].rev)
544 pp = repo.dirstate.parents()
544 pp = repo.dirstate.parents()
545 if top not in pp:
545 if top not in pp:
546 raise util.Abort(_("queue top not at same revision as working directory"))
546 raise util.Abort(_("queue top not at same revision as working directory"))
547 return top
547 return top
548 return None
548 return None
549 def check_localchanges(self, repo, force=False, refresh=True):
549 def check_localchanges(self, repo, force=False, refresh=True):
550 m, a, r, d = repo.status()[:4]
550 m, a, r, d = repo.status()[:4]
551 if m or a or r or d:
551 if m or a or r or d:
552 if not force:
552 if not force:
553 if refresh:
553 if refresh:
554 raise util.Abort(_("local changes found, refresh first"))
554 raise util.Abort(_("local changes found, refresh first"))
555 else:
555 else:
556 raise util.Abort(_("local changes found"))
556 raise util.Abort(_("local changes found"))
557 return m, a, r, d
557 return m, a, r, d
558 def new(self, repo, patch, msg=None, force=None):
558 def new(self, repo, patch, msg=None, force=None):
559 if os.path.exists(self.join(patch)):
559 if os.path.exists(self.join(patch)):
560 raise util.Abort(_('patch "%s" already exists') % patch)
560 raise util.Abort(_('patch "%s" already exists') % patch)
561 m, a, r, d = self.check_localchanges(repo, force)
561 m, a, r, d = self.check_localchanges(repo, force)
562 commitfiles = m + a + r
562 commitfiles = m + a + r
563 self.check_toppatch(repo)
563 self.check_toppatch(repo)
564 wlock = repo.wlock()
564 wlock = repo.wlock()
565 insert = self.full_series_end()
565 insert = self.full_series_end()
566 if msg:
566 if msg:
567 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
567 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
568 wlock=wlock)
568 wlock=wlock)
569 else:
569 else:
570 n = repo.commit(commitfiles,
570 n = repo.commit(commitfiles,
571 "New patch: %s" % patch, force=True, wlock=wlock)
571 "New patch: %s" % patch, force=True, wlock=wlock)
572 if n == None:
572 if n == None:
573 raise util.Abort(_("repo commit failed"))
573 raise util.Abort(_("repo commit failed"))
574 self.full_series[insert:insert] = [patch]
574 self.full_series[insert:insert] = [patch]
575 self.applied.append(statusentry(revlog.hex(n), patch))
575 self.applied.append(statusentry(revlog.hex(n), patch))
576 self.parse_series()
576 self.parse_series()
577 self.series_dirty = 1
577 self.series_dirty = 1
578 self.applied_dirty = 1
578 self.applied_dirty = 1
579 p = self.opener(patch, "w")
579 p = self.opener(patch, "w")
580 if msg:
580 if msg:
581 msg = msg + "\n"
581 msg = msg + "\n"
582 p.write(msg)
582 p.write(msg)
583 p.close()
583 p.close()
584 wlock = None
584 wlock = None
585 r = self.qrepo()
585 r = self.qrepo()
586 if r: r.add([patch])
586 if r: r.add([patch])
587 if commitfiles:
587 if commitfiles:
588 self.refresh(repo, short=True)
588 self.refresh(repo, short=True)
589
589
590 def strip(self, repo, rev, update=True, backup="all", wlock=None):
590 def strip(self, repo, rev, update=True, backup="all", wlock=None):
591 def limitheads(chlog, stop):
591 def limitheads(chlog, stop):
592 """return the list of all nodes that have no children"""
592 """return the list of all nodes that have no children"""
593 p = {}
593 p = {}
594 h = []
594 h = []
595 stoprev = 0
595 stoprev = 0
596 if stop in chlog.nodemap:
596 if stop in chlog.nodemap:
597 stoprev = chlog.rev(stop)
597 stoprev = chlog.rev(stop)
598
598
599 for r in xrange(chlog.count() - 1, -1, -1):
599 for r in xrange(chlog.count() - 1, -1, -1):
600 n = chlog.node(r)
600 n = chlog.node(r)
601 if n not in p:
601 if n not in p:
602 h.append(n)
602 h.append(n)
603 if n == stop:
603 if n == stop:
604 break
604 break
605 if r < stoprev:
605 if r < stoprev:
606 break
606 break
607 for pn in chlog.parents(n):
607 for pn in chlog.parents(n):
608 p[pn] = 1
608 p[pn] = 1
609 return h
609 return h
610
610
611 def bundle(cg):
611 def bundle(cg):
612 backupdir = repo.join("strip-backup")
612 backupdir = repo.join("strip-backup")
613 if not os.path.isdir(backupdir):
613 if not os.path.isdir(backupdir):
614 os.mkdir(backupdir)
614 os.mkdir(backupdir)
615 name = os.path.join(backupdir, "%s" % revlog.short(rev))
615 name = os.path.join(backupdir, "%s" % revlog.short(rev))
616 name = savename(name)
616 name = savename(name)
617 self.ui.warn("saving bundle to %s\n" % name)
617 self.ui.warn("saving bundle to %s\n" % name)
618 return changegroup.writebundle(cg, name, "HG10BZ")
618 return changegroup.writebundle(cg, name, "HG10BZ")
619
619
620 def stripall(revnum):
620 def stripall(revnum):
621 mm = repo.changectx(rev).manifest()
621 mm = repo.changectx(rev).manifest()
622 seen = {}
622 seen = {}
623
623
624 for x in xrange(revnum, repo.changelog.count()):
624 for x in xrange(revnum, repo.changelog.count()):
625 for f in repo.changectx(x).files():
625 for f in repo.changectx(x).files():
626 if f in seen:
626 if f in seen:
627 continue
627 continue
628 seen[f] = 1
628 seen[f] = 1
629 if f in mm:
629 if f in mm:
630 filerev = mm[f]
630 filerev = mm[f]
631 else:
631 else:
632 filerev = 0
632 filerev = 0
633 seen[f] = filerev
633 seen[f] = filerev
634 # we go in two steps here so the strip loop happens in a
634 # we go in two steps here so the strip loop happens in a
635 # sensible order. When stripping many files, this helps keep
635 # sensible order. When stripping many files, this helps keep
636 # our disk access patterns under control.
636 # our disk access patterns under control.
637 seen_list = seen.keys()
637 seen_list = seen.keys()
638 seen_list.sort()
638 seen_list.sort()
639 for f in seen_list:
639 for f in seen_list:
640 ff = repo.file(f)
640 ff = repo.file(f)
641 filerev = seen[f]
641 filerev = seen[f]
642 if filerev != 0:
642 if filerev != 0:
643 if filerev in ff.nodemap:
643 if filerev in ff.nodemap:
644 filerev = ff.rev(filerev)
644 filerev = ff.rev(filerev)
645 else:
645 else:
646 filerev = 0
646 filerev = 0
647 ff.strip(filerev, revnum)
647 ff.strip(filerev, revnum)
648
648
649 if not wlock:
649 if not wlock:
650 wlock = repo.wlock()
650 wlock = repo.wlock()
651 lock = repo.lock()
651 lock = repo.lock()
652 chlog = repo.changelog
652 chlog = repo.changelog
653 # TODO delete the undo files, and handle undo of merge sets
653 # TODO delete the undo files, and handle undo of merge sets
654 pp = chlog.parents(rev)
654 pp = chlog.parents(rev)
655 revnum = chlog.rev(rev)
655 revnum = chlog.rev(rev)
656
656
657 if update:
657 if update:
658 self.check_localchanges(repo, refresh=False)
658 self.check_localchanges(repo, refresh=False)
659 urev = self.qparents(repo, rev)
659 urev = self.qparents(repo, rev)
660 hg.clean(repo, urev, wlock=wlock)
660 hg.clean(repo, urev, wlock=wlock)
661 repo.dirstate.write()
661 repo.dirstate.write()
662
662
663 # save is a list of all the branches we are truncating away
663 # save is a list of all the branches we are truncating away
664 # that we actually want to keep. changegroup will be used
664 # that we actually want to keep. changegroup will be used
665 # to preserve them and add them back after the truncate
665 # to preserve them and add them back after the truncate
666 saveheads = []
666 saveheads = []
667 savebases = {}
667 savebases = {}
668
668
669 heads = limitheads(chlog, rev)
669 heads = limitheads(chlog, rev)
670 seen = {}
670 seen = {}
671
671
672 # search through all the heads, finding those where the revision
672 # search through all the heads, finding those where the revision
673 # we want to strip away is an ancestor. Also look for merges
673 # we want to strip away is an ancestor. Also look for merges
674 # that might be turned into new heads by the strip.
674 # that might be turned into new heads by the strip.
675 while heads:
675 while heads:
676 h = heads.pop()
676 h = heads.pop()
677 n = h
677 n = h
678 while True:
678 while True:
679 seen[n] = 1
679 seen[n] = 1
680 pp = chlog.parents(n)
680 pp = chlog.parents(n)
681 if pp[1] != revlog.nullid:
681 if pp[1] != revlog.nullid:
682 for p in pp:
682 for p in pp:
683 if chlog.rev(p) > revnum and p not in seen:
683 if chlog.rev(p) > revnum and p not in seen:
684 heads.append(p)
684 heads.append(p)
685 if pp[0] == revlog.nullid:
685 if pp[0] == revlog.nullid:
686 break
686 break
687 if chlog.rev(pp[0]) < revnum:
687 if chlog.rev(pp[0]) < revnum:
688 break
688 break
689 n = pp[0]
689 n = pp[0]
690 if n == rev:
690 if n == rev:
691 break
691 break
692 r = chlog.reachable(h, rev)
692 r = chlog.reachable(h, rev)
693 if rev not in r:
693 if rev not in r:
694 saveheads.append(h)
694 saveheads.append(h)
695 for x in r:
695 for x in r:
696 if chlog.rev(x) > revnum:
696 if chlog.rev(x) > revnum:
697 savebases[x] = 1
697 savebases[x] = 1
698
698
699 # create a changegroup for all the branches we need to keep
699 # create a changegroup for all the branches we need to keep
700 if backup == "all":
700 if backup == "all":
701 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
701 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
702 bundle(backupch)
702 bundle(backupch)
703 if saveheads:
703 if saveheads:
704 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
704 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
705 chgrpfile = bundle(backupch)
705 chgrpfile = bundle(backupch)
706
706
707 stripall(revnum)
707 stripall(revnum)
708
708
709 change = chlog.read(rev)
709 change = chlog.read(rev)
710 chlog.strip(revnum, revnum)
710 chlog.strip(revnum, revnum)
711 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
711 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
712 if saveheads:
712 if saveheads:
713 self.ui.status("adding branch\n")
713 self.ui.status("adding branch\n")
714 commands.unbundle(self.ui, repo, "file:%s" % chgrpfile,
714 commands.unbundle(self.ui, repo, "file:%s" % chgrpfile,
715 update=False)
715 update=False)
716 if backup != "strip":
716 if backup != "strip":
717 os.unlink(chgrpfile)
717 os.unlink(chgrpfile)
718
718
719 def isapplied(self, patch):
719 def isapplied(self, patch):
720 """returns (index, rev, patch)"""
720 """returns (index, rev, patch)"""
721 for i in xrange(len(self.applied)):
721 for i in xrange(len(self.applied)):
722 a = self.applied[i]
722 a = self.applied[i]
723 if a.name == patch:
723 if a.name == patch:
724 return (i, a.rev, a.name)
724 return (i, a.rev, a.name)
725 return None
725 return None
726
726
727 # if the exact patch name does not exist, we try a few
727 # if the exact patch name does not exist, we try a few
728 # variations. If strict is passed, we try only #1
728 # variations. If strict is passed, we try only #1
729 #
729 #
730 # 1) a number to indicate an offset in the series file
730 # 1) a number to indicate an offset in the series file
731 # 2) a unique substring of the patch name was given
731 # 2) a unique substring of the patch name was given
732 # 3) patchname[-+]num to indicate an offset in the series file
732 # 3) patchname[-+]num to indicate an offset in the series file
733 def lookup(self, patch, strict=False):
733 def lookup(self, patch, strict=False):
734 patch = patch and str(patch)
734 patch = patch and str(patch)
735
735
736 def partial_name(s):
736 def partial_name(s):
737 if s in self.series:
737 if s in self.series:
738 return s
738 return s
739 matches = [x for x in self.series if s in x]
739 matches = [x for x in self.series if s in x]
740 if len(matches) > 1:
740 if len(matches) > 1:
741 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
741 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
742 for m in matches:
742 for m in matches:
743 self.ui.warn(' %s\n' % m)
743 self.ui.warn(' %s\n' % m)
744 return None
744 return None
745 if matches:
745 if matches:
746 return matches[0]
746 return matches[0]
747 if len(self.series) > 0 and len(self.applied) > 0:
747 if len(self.series) > 0 and len(self.applied) > 0:
748 if s == 'qtip':
748 if s == 'qtip':
749 return self.series[self.series_end(True)-1]
749 return self.series[self.series_end(True)-1]
750 if s == 'qbase':
750 if s == 'qbase':
751 return self.series[0]
751 return self.series[0]
752 return None
752 return None
753 if patch == None:
753 if patch == None:
754 return None
754 return None
755
755
756 # we don't want to return a partial match until we make
756 # we don't want to return a partial match until we make
757 # sure the file name passed in does not exist (checked below)
757 # sure the file name passed in does not exist (checked below)
758 res = partial_name(patch)
758 res = partial_name(patch)
759 if res and res == patch:
759 if res and res == patch:
760 return res
760 return res
761
761
762 if not os.path.isfile(self.join(patch)):
762 if not os.path.isfile(self.join(patch)):
763 try:
763 try:
764 sno = int(patch)
764 sno = int(patch)
765 except(ValueError, OverflowError):
765 except(ValueError, OverflowError):
766 pass
766 pass
767 else:
767 else:
768 if sno < len(self.series):
768 if sno < len(self.series):
769 return self.series[sno]
769 return self.series[sno]
770 if not strict:
770 if not strict:
771 # return any partial match made above
771 # return any partial match made above
772 if res:
772 if res:
773 return res
773 return res
774 minus = patch.rfind('-')
774 minus = patch.rfind('-')
775 if minus >= 0:
775 if minus >= 0:
776 res = partial_name(patch[:minus])
776 res = partial_name(patch[:minus])
777 if res:
777 if res:
778 i = self.series.index(res)
778 i = self.series.index(res)
779 try:
779 try:
780 off = int(patch[minus+1:] or 1)
780 off = int(patch[minus+1:] or 1)
781 except(ValueError, OverflowError):
781 except(ValueError, OverflowError):
782 pass
782 pass
783 else:
783 else:
784 if i - off >= 0:
784 if i - off >= 0:
785 return self.series[i - off]
785 return self.series[i - off]
786 plus = patch.rfind('+')
786 plus = patch.rfind('+')
787 if plus >= 0:
787 if plus >= 0:
788 res = partial_name(patch[:plus])
788 res = partial_name(patch[:plus])
789 if res:
789 if res:
790 i = self.series.index(res)
790 i = self.series.index(res)
791 try:
791 try:
792 off = int(patch[plus+1:] or 1)
792 off = int(patch[plus+1:] or 1)
793 except(ValueError, OverflowError):
793 except(ValueError, OverflowError):
794 pass
794 pass
795 else:
795 else:
796 if i + off < len(self.series):
796 if i + off < len(self.series):
797 return self.series[i + off]
797 return self.series[i + off]
798 raise util.Abort(_("patch %s not in series") % patch)
798 raise util.Abort(_("patch %s not in series") % patch)
799
799
800 def push(self, repo, patch=None, force=False, list=False,
800 def push(self, repo, patch=None, force=False, list=False,
801 mergeq=None, wlock=None):
801 mergeq=None, wlock=None):
802 if not wlock:
802 if not wlock:
803 wlock = repo.wlock()
803 wlock = repo.wlock()
804 patch = self.lookup(patch)
804 patch = self.lookup(patch)
805 # Suppose our series file is: A B C and the current 'top' patch is B.
805 # Suppose our series file is: A B C and the current 'top' patch is B.
806 # qpush C should be performed (moving forward)
806 # qpush C should be performed (moving forward)
807 # qpush B is a NOP (no change)
807 # qpush B is a NOP (no change)
808 # qpush A is an error (can't go backwards with qpush)
808 # qpush A is an error (can't go backwards with qpush)
809 if patch:
809 if patch:
810 info = self.isapplied(patch)
810 info = self.isapplied(patch)
811 if info:
811 if info:
812 if info[0] < len(self.applied) - 1:
812 if info[0] < len(self.applied) - 1:
813 raise util.Abort(_("cannot push to a previous patch: %s") %
813 raise util.Abort(_("cannot push to a previous patch: %s") %
814 patch)
814 patch)
815 if info[0] < len(self.series) - 1:
815 if info[0] < len(self.series) - 1:
816 self.ui.warn(_('qpush: %s is already at the top\n') % patch)
816 self.ui.warn(_('qpush: %s is already at the top\n') % patch)
817 else:
817 else:
818 self.ui.warn(_('all patches are currently applied\n'))
818 self.ui.warn(_('all patches are currently applied\n'))
819 return
819 return
820
820
821 # Following the above example, starting at 'top' of B:
821 # Following the above example, starting at 'top' of B:
822 # qpush should be performed (pushes C), but a subsequent qpush without
822 # qpush should be performed (pushes C), but a subsequent qpush without
823 # an argument is an error (nothing to apply). This allows a loop
823 # an argument is an error (nothing to apply). This allows a loop
824 # of "...while hg qpush..." to work as it detects an error when done
824 # of "...while hg qpush..." to work as it detects an error when done
825 if self.series_end() == len(self.series):
825 if self.series_end() == len(self.series):
826 self.ui.warn(_('patch series already fully applied\n'))
826 self.ui.warn(_('patch series already fully applied\n'))
827 return 1
827 return 1
828 if not force:
828 if not force:
829 self.check_localchanges(repo)
829 self.check_localchanges(repo)
830
830
831 self.applied_dirty = 1;
831 self.applied_dirty = 1;
832 start = self.series_end()
832 start = self.series_end()
833 if start > 0:
833 if start > 0:
834 self.check_toppatch(repo)
834 self.check_toppatch(repo)
835 if not patch:
835 if not patch:
836 patch = self.series[start]
836 patch = self.series[start]
837 end = start + 1
837 end = start + 1
838 else:
838 else:
839 end = self.series.index(patch, start) + 1
839 end = self.series.index(patch, start) + 1
840 s = self.series[start:end]
840 s = self.series[start:end]
841 if mergeq:
841 if mergeq:
842 ret = self.mergepatch(repo, mergeq, s, wlock)
842 ret = self.mergepatch(repo, mergeq, s, wlock)
843 else:
843 else:
844 ret = self.apply(repo, s, list, wlock=wlock)
844 ret = self.apply(repo, s, list, wlock=wlock)
845 top = self.applied[-1].name
845 top = self.applied[-1].name
846 if ret[0]:
846 if ret[0]:
847 self.ui.write("Errors during apply, please fix and refresh %s\n" %
847 self.ui.write("Errors during apply, please fix and refresh %s\n" %
848 top)
848 top)
849 else:
849 else:
850 self.ui.write("Now at: %s\n" % top)
850 self.ui.write("Now at: %s\n" % top)
851 return ret[0]
851 return ret[0]
852
852
853 def pop(self, repo, patch=None, force=False, update=True, all=False,
853 def pop(self, repo, patch=None, force=False, update=True, all=False,
854 wlock=None):
854 wlock=None):
855 def getfile(f, rev):
855 def getfile(f, rev):
856 t = repo.file(f).read(rev)
856 t = repo.file(f).read(rev)
857 repo.wfile(f, "w").write(t)
857 repo.wfile(f, "w").write(t)
858
858
859 if not wlock:
859 if not wlock:
860 wlock = repo.wlock()
860 wlock = repo.wlock()
861 if patch:
861 if patch:
862 # index, rev, patch
862 # index, rev, patch
863 info = self.isapplied(patch)
863 info = self.isapplied(patch)
864 if not info:
864 if not info:
865 patch = self.lookup(patch)
865 patch = self.lookup(patch)
866 info = self.isapplied(patch)
866 info = self.isapplied(patch)
867 if not info:
867 if not info:
868 raise util.Abort(_("patch %s is not applied") % patch)
868 raise util.Abort(_("patch %s is not applied") % patch)
869
869
870 if len(self.applied) == 0:
870 if len(self.applied) == 0:
871 # Allow qpop -a to work repeatedly,
871 # Allow qpop -a to work repeatedly,
872 # but not qpop without an argument
872 # but not qpop without an argument
873 self.ui.warn(_("no patches applied\n"))
873 self.ui.warn(_("no patches applied\n"))
874 return not all
874 return not all
875
875
876 if not update:
876 if not update:
877 parents = repo.dirstate.parents()
877 parents = repo.dirstate.parents()
878 rr = [ revlog.bin(x.rev) for x in self.applied ]
878 rr = [ revlog.bin(x.rev) for x in self.applied ]
879 for p in parents:
879 for p in parents:
880 if p in rr:
880 if p in rr:
881 self.ui.warn("qpop: forcing dirstate update\n")
881 self.ui.warn("qpop: forcing dirstate update\n")
882 update = True
882 update = True
883
883
884 if not force and update:
884 if not force and update:
885 self.check_localchanges(repo)
885 self.check_localchanges(repo)
886
886
887 self.applied_dirty = 1;
887 self.applied_dirty = 1;
888 end = len(self.applied)
888 end = len(self.applied)
889 if not patch:
889 if not patch:
890 if all:
890 if all:
891 popi = 0
891 popi = 0
892 else:
892 else:
893 popi = len(self.applied) - 1
893 popi = len(self.applied) - 1
894 else:
894 else:
895 popi = info[0] + 1
895 popi = info[0] + 1
896 if popi >= end:
896 if popi >= end:
897 self.ui.warn("qpop: %s is already at the top\n" % patch)
897 self.ui.warn("qpop: %s is already at the top\n" % patch)
898 return
898 return
899 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
899 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
900
900
901 start = info[0]
901 start = info[0]
902 rev = revlog.bin(info[1])
902 rev = revlog.bin(info[1])
903
903
904 # we know there are no local changes, so we can make a simplified
904 # we know there are no local changes, so we can make a simplified
905 # form of hg.update.
905 # form of hg.update.
906 if update:
906 if update:
907 top = self.check_toppatch(repo)
907 top = self.check_toppatch(repo)
908 qp = self.qparents(repo, rev)
908 qp = self.qparents(repo, rev)
909 changes = repo.changelog.read(qp)
909 changes = repo.changelog.read(qp)
910 mmap = repo.manifest.read(changes[0])
910 mmap = repo.manifest.read(changes[0])
911 m, a, r, d, u = repo.status(qp, top)[:5]
911 m, a, r, d, u = repo.status(qp, top)[:5]
912 if d:
912 if d:
913 raise util.Abort("deletions found between repo revs")
913 raise util.Abort("deletions found between repo revs")
914 for f in m:
914 for f in m:
915 getfile(f, mmap[f])
915 getfile(f, mmap[f])
916 for f in r:
916 for f in r:
917 getfile(f, mmap[f])
917 getfile(f, mmap[f])
918 util.set_exec(repo.wjoin(f), mmap.execf(f))
918 util.set_exec(repo.wjoin(f), mmap.execf(f))
919 repo.dirstate.update(m + r, 'n')
919 repo.dirstate.update(m + r, 'n')
920 for f in a:
920 for f in a:
921 try:
921 try:
922 os.unlink(repo.wjoin(f))
922 os.unlink(repo.wjoin(f))
923 except OSError, e:
923 except OSError, e:
924 if e.errno != errno.ENOENT:
924 if e.errno != errno.ENOENT:
925 raise
925 raise
926 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
926 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
927 except: pass
927 except: pass
928 if a:
928 if a:
929 repo.dirstate.forget(a)
929 repo.dirstate.forget(a)
930 repo.dirstate.setparents(qp, revlog.nullid)
930 repo.dirstate.setparents(qp, revlog.nullid)
931 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
931 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
932 del self.applied[start:end]
932 del self.applied[start:end]
933 if len(self.applied):
933 if len(self.applied):
934 self.ui.write("Now at: %s\n" % self.applied[-1].name)
934 self.ui.write("Now at: %s\n" % self.applied[-1].name)
935 else:
935 else:
936 self.ui.write("Patch queue now empty\n")
936 self.ui.write("Patch queue now empty\n")
937
937
938 def diff(self, repo, pats, opts):
938 def diff(self, repo, pats, opts):
939 top = self.check_toppatch(repo)
939 top = self.check_toppatch(repo)
940 if not top:
940 if not top:
941 self.ui.write("No patches applied\n")
941 self.ui.write("No patches applied\n")
942 return
942 return
943 qp = self.qparents(repo, top)
943 qp = self.qparents(repo, top)
944 if opts.get('git'):
944 if opts.get('git'):
945 self.diffopts().git = True
945 self.diffopts().git = True
946 self.printdiff(repo, qp, files=pats, opts=opts)
946 self.printdiff(repo, qp, files=pats, opts=opts)
947
947
948 def refresh(self, repo, pats=None, **opts):
948 def refresh(self, repo, pats=None, **opts):
949 if len(self.applied) == 0:
949 if len(self.applied) == 0:
950 self.ui.write("No patches applied\n")
950 self.ui.write("No patches applied\n")
951 return 1
951 return 1
952 wlock = repo.wlock()
952 wlock = repo.wlock()
953 self.check_toppatch(repo)
953 self.check_toppatch(repo)
954 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
954 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
955 top = revlog.bin(top)
955 top = revlog.bin(top)
956 cparents = repo.changelog.parents(top)
956 cparents = repo.changelog.parents(top)
957 patchparent = self.qparents(repo, top)
957 patchparent = self.qparents(repo, top)
958 message, comments, user, date, patchfound = self.readheaders(patchfn)
958 message, comments, user, date, patchfound = self.readheaders(patchfn)
959
959
960 patchf = self.opener(patchfn, "w")
960 patchf = self.opener(patchfn, "w")
961 msg = opts.get('msg', '').rstrip()
961 msg = opts.get('msg', '').rstrip()
962 if msg:
962 if msg:
963 if comments:
963 if comments:
964 # Remove existing message.
964 # Remove existing message.
965 ci = 0
965 ci = 0
966 for mi in xrange(len(message)):
966 for mi in xrange(len(message)):
967 while message[mi] != comments[ci]:
967 while message[mi] != comments[ci]:
968 ci += 1
968 ci += 1
969 del comments[ci]
969 del comments[ci]
970 comments.append(msg)
970 comments.append(msg)
971 if comments:
971 if comments:
972 comments = "\n".join(comments) + '\n\n'
972 comments = "\n".join(comments) + '\n\n'
973 patchf.write(comments)
973 patchf.write(comments)
974
974
975 if opts.get('git'):
975 if opts.get('git'):
976 self.diffopts().git = True
976 self.diffopts().git = True
977 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
977 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
978 tip = repo.changelog.tip()
978 tip = repo.changelog.tip()
979 if top == tip:
979 if top == tip:
980 # if the top of our patch queue is also the tip, there is an
980 # if the top of our patch queue is also the tip, there is an
981 # optimization here. We update the dirstate in place and strip
981 # optimization here. We update the dirstate in place and strip
982 # off the tip commit. Then just commit the current directory
982 # off the tip commit. Then just commit the current directory
983 # tree. We can also send repo.commit the list of files
983 # tree. We can also send repo.commit the list of files
984 # changed to speed up the diff
984 # changed to speed up the diff
985 #
985 #
986 # in short mode, we only diff the files included in the
986 # in short mode, we only diff the files included in the
987 # patch already
987 # patch already
988 #
988 #
989 # this should really read:
989 # this should really read:
990 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
990 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
991 # but we do it backwards to take advantage of manifest/chlog
991 # but we do it backwards to take advantage of manifest/chlog
992 # caching against the next repo.status call
992 # caching against the next repo.status call
993 #
993 #
994 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
994 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
995 changes = repo.changelog.read(tip)
995 changes = repo.changelog.read(tip)
996 man = repo.manifest.read(changes[0])
996 man = repo.manifest.read(changes[0])
997 aaa = aa[:]
997 aaa = aa[:]
998 if opts.get('short'):
998 if opts.get('short'):
999 filelist = mm + aa + dd
999 filelist = mm + aa + dd
1000 else:
1000 else:
1001 filelist = None
1001 filelist = None
1002 m, a, r, d, u = repo.status(files=filelist)[:5]
1002 m, a, r, d, u = repo.status(files=filelist)[:5]
1003
1003
1004 # we might end up with files that were added between tip and
1004 # we might end up with files that were added between tip and
1005 # the dirstate parent, but then changed in the local dirstate.
1005 # the dirstate parent, but then changed in the local dirstate.
1006 # in this case, we want them to only show up in the added section
1006 # in this case, we want them to only show up in the added section
1007 for x in m:
1007 for x in m:
1008 if x not in aa:
1008 if x not in aa:
1009 mm.append(x)
1009 mm.append(x)
1010 # we might end up with files added by the local dirstate that
1010 # we might end up with files added by the local dirstate that
1011 # were deleted by the patch. In this case, they should only
1011 # were deleted by the patch. In this case, they should only
1012 # show up in the changed section.
1012 # show up in the changed section.
1013 for x in a:
1013 for x in a:
1014 if x in dd:
1014 if x in dd:
1015 del dd[dd.index(x)]
1015 del dd[dd.index(x)]
1016 mm.append(x)
1016 mm.append(x)
1017 else:
1017 else:
1018 aa.append(x)
1018 aa.append(x)
1019 # make sure any files deleted in the local dirstate
1019 # make sure any files deleted in the local dirstate
1020 # are not in the add or change column of the patch
1020 # are not in the add or change column of the patch
1021 forget = []
1021 forget = []
1022 for x in d + r:
1022 for x in d + r:
1023 if x in aa:
1023 if x in aa:
1024 del aa[aa.index(x)]
1024 del aa[aa.index(x)]
1025 forget.append(x)
1025 forget.append(x)
1026 continue
1026 continue
1027 elif x in mm:
1027 elif x in mm:
1028 del mm[mm.index(x)]
1028 del mm[mm.index(x)]
1029 dd.append(x)
1029 dd.append(x)
1030
1030
1031 m = util.unique(mm)
1031 m = util.unique(mm)
1032 r = util.unique(dd)
1032 r = util.unique(dd)
1033 a = util.unique(aa)
1033 a = util.unique(aa)
1034 filelist = filter(matchfn, util.unique(m + r + a))
1034 filelist = filter(matchfn, util.unique(m + r + a))
1035 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1035 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1036 fp=patchf, changes=(m, a, r, [], u),
1036 fp=patchf, changes=(m, a, r, [], u),
1037 opts=self.diffopts())
1037 opts=self.diffopts())
1038 patchf.close()
1038 patchf.close()
1039
1039
1040 repo.dirstate.setparents(*cparents)
1040 repo.dirstate.setparents(*cparents)
1041 copies = {}
1041 copies = {}
1042 for dst in a:
1042 for dst in a:
1043 src = repo.dirstate.copied(dst)
1043 src = repo.dirstate.copied(dst)
1044 if src is None:
1044 if src is None:
1045 continue
1045 continue
1046 copies.setdefault(src, []).append(dst)
1046 copies.setdefault(src, []).append(dst)
1047 repo.dirstate.update(a, 'a')
1047 repo.dirstate.update(a, 'a')
1048 # remember the copies between patchparent and tip
1048 # remember the copies between patchparent and tip
1049 # this may be slow, so don't do it if we're not tracking copies
1049 # this may be slow, so don't do it if we're not tracking copies
1050 if self.diffopts().git:
1050 if self.diffopts().git:
1051 for dst in aaa:
1051 for dst in aaa:
1052 f = repo.file(dst)
1052 f = repo.file(dst)
1053 src = f.renamed(man[dst])
1053 src = f.renamed(man[dst])
1054 if src:
1054 if src:
1055 copies[src[0]] = copies.get(dst, [])
1055 copies[src[0]] = copies.get(dst, [])
1056 if dst in a:
1056 if dst in a:
1057 copies[src[0]].append(dst)
1057 copies[src[0]].append(dst)
1058 # we can't copy a file created by the patch itself
1058 # we can't copy a file created by the patch itself
1059 if dst in copies:
1059 if dst in copies:
1060 del copies[dst]
1060 del copies[dst]
1061 for src, dsts in copies.iteritems():
1061 for src, dsts in copies.iteritems():
1062 for dst in dsts:
1062 for dst in dsts:
1063 repo.dirstate.copy(src, dst)
1063 repo.dirstate.copy(src, dst)
1064 repo.dirstate.update(r, 'r')
1064 repo.dirstate.update(r, 'r')
1065 # if the patch excludes a modified file, mark that file with mtime=0
1065 # if the patch excludes a modified file, mark that file with mtime=0
1066 # so status can see it.
1066 # so status can see it.
1067 mm = []
1067 mm = []
1068 for i in xrange(len(m)-1, -1, -1):
1068 for i in xrange(len(m)-1, -1, -1):
1069 if not matchfn(m[i]):
1069 if not matchfn(m[i]):
1070 mm.append(m[i])
1070 mm.append(m[i])
1071 del m[i]
1071 del m[i]
1072 repo.dirstate.update(m, 'n')
1072 repo.dirstate.update(m, 'n')
1073 repo.dirstate.update(mm, 'n', st_mtime=0)
1073 repo.dirstate.update(mm, 'n', st_mtime=0)
1074 repo.dirstate.forget(forget)
1074 repo.dirstate.forget(forget)
1075
1075
1076 if not msg:
1076 if not msg:
1077 if not message:
1077 if not message:
1078 message = "patch queue: %s\n" % patchfn
1078 message = "patch queue: %s\n" % patchfn
1079 else:
1079 else:
1080 message = "\n".join(message)
1080 message = "\n".join(message)
1081 else:
1081 else:
1082 message = msg
1082 message = msg
1083
1083
1084 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
1084 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
1085 n = repo.commit(filelist, message, changes[1], force=1, wlock=wlock)
1085 n = repo.commit(filelist, message, changes[1], force=1, wlock=wlock)
1086 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1086 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1087 self.applied_dirty = 1
1087 self.applied_dirty = 1
1088 else:
1088 else:
1089 self.printdiff(repo, patchparent, fp=patchf)
1089 self.printdiff(repo, patchparent, fp=patchf)
1090 patchf.close()
1090 patchf.close()
1091 added = repo.status()[1]
1091 added = repo.status()[1]
1092 for a in added:
1092 for a in added:
1093 f = repo.wjoin(a)
1093 f = repo.wjoin(a)
1094 try:
1094 try:
1095 os.unlink(f)
1095 os.unlink(f)
1096 except OSError, e:
1096 except OSError, e:
1097 if e.errno != errno.ENOENT:
1097 if e.errno != errno.ENOENT:
1098 raise
1098 raise
1099 try: os.removedirs(os.path.dirname(f))
1099 try: os.removedirs(os.path.dirname(f))
1100 except: pass
1100 except: pass
1101 # forget the file copies in the dirstate
1101 # forget the file copies in the dirstate
1102 # push should readd the files later on
1102 # push should readd the files later on
1103 repo.dirstate.forget(added)
1103 repo.dirstate.forget(added)
1104 self.pop(repo, force=True, wlock=wlock)
1104 self.pop(repo, force=True, wlock=wlock)
1105 self.push(repo, force=True, wlock=wlock)
1105 self.push(repo, force=True, wlock=wlock)
1106
1106
1107 def init(self, repo, create=False):
1107 def init(self, repo, create=False):
1108 if not create and os.path.isdir(self.path):
1108 if not create and os.path.isdir(self.path):
1109 raise util.Abort(_("patch queue directory already exists"))
1109 raise util.Abort(_("patch queue directory already exists"))
1110 try:
1110 try:
1111 os.mkdir(self.path)
1111 os.mkdir(self.path)
1112 except OSError, inst:
1112 except OSError, inst:
1113 if inst.errno != errno.EEXIST or not create:
1113 if inst.errno != errno.EEXIST or not create:
1114 raise
1114 raise
1115 if create:
1115 if create:
1116 return self.qrepo(create=True)
1116 return self.qrepo(create=True)
1117
1117
1118 def unapplied(self, repo, patch=None):
1118 def unapplied(self, repo, patch=None):
1119 if patch and patch not in self.series:
1119 if patch and patch not in self.series:
1120 raise util.Abort(_("patch %s is not in series file") % patch)
1120 raise util.Abort(_("patch %s is not in series file") % patch)
1121 if not patch:
1121 if not patch:
1122 start = self.series_end()
1122 start = self.series_end()
1123 else:
1123 else:
1124 start = self.series.index(patch) + 1
1124 start = self.series.index(patch) + 1
1125 unapplied = []
1125 unapplied = []
1126 for i in xrange(start, len(self.series)):
1126 for i in xrange(start, len(self.series)):
1127 pushable, reason = self.pushable(i)
1127 pushable, reason = self.pushable(i)
1128 if pushable:
1128 if pushable:
1129 unapplied.append((i, self.series[i]))
1129 unapplied.append((i, self.series[i]))
1130 self.explain_pushable(i)
1130 self.explain_pushable(i)
1131 return unapplied
1131 return unapplied
1132
1132
1133 def qseries(self, repo, missing=None, start=0, length=0, status=None,
1133 def qseries(self, repo, missing=None, start=0, length=0, status=None,
1134 summary=False):
1134 summary=False):
1135 def displayname(patchname):
1135 def displayname(patchname):
1136 if summary:
1136 if summary:
1137 msg = self.readheaders(patchname)[0]
1137 msg = self.readheaders(patchname)[0]
1138 msg = msg and ': ' + msg[0] or ': '
1138 msg = msg and ': ' + msg[0] or ': '
1139 else:
1139 else:
1140 msg = ''
1140 msg = ''
1141 return '%s%s' % (patchname, msg)
1141 return '%s%s' % (patchname, msg)
1142
1142
1143 def pname(i):
1143 def pname(i):
1144 if status == 'A':
1144 if status == 'A':
1145 return self.applied[i].name
1145 return self.applied[i].name
1146 else:
1146 else:
1147 return self.series[i]
1147 return self.series[i]
1148
1148
1149 applied = dict.fromkeys([p.name for p in self.applied])
1149 applied = dict.fromkeys([p.name for p in self.applied])
1150 if not length:
1150 if not length:
1151 length = len(self.series) - start
1151 length = len(self.series) - start
1152 if not missing:
1152 if not missing:
1153 for i in xrange(start, start+length):
1153 for i in xrange(start, start+length):
1154 pfx = ''
1154 pfx = ''
1155 patch = pname(i)
1155 patch = pname(i)
1156 if self.ui.verbose:
1156 if self.ui.verbose:
1157 if patch in applied:
1157 if patch in applied:
1158 stat = 'A'
1158 stat = 'A'
1159 elif self.pushable(i)[0]:
1159 elif self.pushable(i)[0]:
1160 stat = 'U'
1160 stat = 'U'
1161 else:
1161 else:
1162 stat = 'G'
1162 stat = 'G'
1163 pfx = '%d %s ' % (i, stat)
1163 pfx = '%d %s ' % (i, stat)
1164 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1164 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1165 else:
1165 else:
1166 msng_list = []
1166 msng_list = []
1167 for root, dirs, files in os.walk(self.path):
1167 for root, dirs, files in os.walk(self.path):
1168 d = root[len(self.path) + 1:]
1168 d = root[len(self.path) + 1:]
1169 for f in files:
1169 for f in files:
1170 fl = os.path.join(d, f)
1170 fl = os.path.join(d, f)
1171 if (fl not in self.series and
1171 if (fl not in self.series and
1172 fl not in (self.status_path, self.series_path)
1172 fl not in (self.status_path, self.series_path)
1173 and not fl.startswith('.')):
1173 and not fl.startswith('.')):
1174 msng_list.append(fl)
1174 msng_list.append(fl)
1175 msng_list.sort()
1175 msng_list.sort()
1176 for x in msng_list:
1176 for x in msng_list:
1177 pfx = self.ui.verbose and ('D ') or ''
1177 pfx = self.ui.verbose and ('D ') or ''
1178 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1178 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1179
1179
1180 def issaveline(self, l):
1180 def issaveline(self, l):
1181 if l.name == '.hg.patches.save.line':
1181 if l.name == '.hg.patches.save.line':
1182 return True
1182 return True
1183
1183
1184 def qrepo(self, create=False):
1184 def qrepo(self, create=False):
1185 if create or os.path.isdir(self.join(".hg")):
1185 if create or os.path.isdir(self.join(".hg")):
1186 return hg.repository(self.ui, path=self.path, create=create)
1186 return hg.repository(self.ui, path=self.path, create=create)
1187
1187
1188 def restore(self, repo, rev, delete=None, qupdate=None):
1188 def restore(self, repo, rev, delete=None, qupdate=None):
1189 c = repo.changelog.read(rev)
1189 c = repo.changelog.read(rev)
1190 desc = c[4].strip()
1190 desc = c[4].strip()
1191 lines = desc.splitlines()
1191 lines = desc.splitlines()
1192 i = 0
1192 i = 0
1193 datastart = None
1193 datastart = None
1194 series = []
1194 series = []
1195 applied = []
1195 applied = []
1196 qpp = None
1196 qpp = None
1197 for i in xrange(0, len(lines)):
1197 for i in xrange(0, len(lines)):
1198 if lines[i] == 'Patch Data:':
1198 if lines[i] == 'Patch Data:':
1199 datastart = i + 1
1199 datastart = i + 1
1200 elif lines[i].startswith('Dirstate:'):
1200 elif lines[i].startswith('Dirstate:'):
1201 l = lines[i].rstrip()
1201 l = lines[i].rstrip()
1202 l = l[10:].split(' ')
1202 l = l[10:].split(' ')
1203 qpp = [ hg.bin(x) for x in l ]
1203 qpp = [ hg.bin(x) for x in l ]
1204 elif datastart != None:
1204 elif datastart != None:
1205 l = lines[i].rstrip()
1205 l = lines[i].rstrip()
1206 se = statusentry(l)
1206 se = statusentry(l)
1207 file_ = se.name
1207 file_ = se.name
1208 if se.rev:
1208 if se.rev:
1209 applied.append(se)
1209 applied.append(se)
1210 else:
1210 else:
1211 series.append(file_)
1211 series.append(file_)
1212 if datastart == None:
1212 if datastart == None:
1213 self.ui.warn("No saved patch data found\n")
1213 self.ui.warn("No saved patch data found\n")
1214 return 1
1214 return 1
1215 self.ui.warn("restoring status: %s\n" % lines[0])
1215 self.ui.warn("restoring status: %s\n" % lines[0])
1216 self.full_series = series
1216 self.full_series = series
1217 self.applied = applied
1217 self.applied = applied
1218 self.parse_series()
1218 self.parse_series()
1219 self.series_dirty = 1
1219 self.series_dirty = 1
1220 self.applied_dirty = 1
1220 self.applied_dirty = 1
1221 heads = repo.changelog.heads()
1221 heads = repo.changelog.heads()
1222 if delete:
1222 if delete:
1223 if rev not in heads:
1223 if rev not in heads:
1224 self.ui.warn("save entry has children, leaving it alone\n")
1224 self.ui.warn("save entry has children, leaving it alone\n")
1225 else:
1225 else:
1226 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1226 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1227 pp = repo.dirstate.parents()
1227 pp = repo.dirstate.parents()
1228 if rev in pp:
1228 if rev in pp:
1229 update = True
1229 update = True
1230 else:
1230 else:
1231 update = False
1231 update = False
1232 self.strip(repo, rev, update=update, backup='strip')
1232 self.strip(repo, rev, update=update, backup='strip')
1233 if qpp:
1233 if qpp:
1234 self.ui.warn("saved queue repository parents: %s %s\n" %
1234 self.ui.warn("saved queue repository parents: %s %s\n" %
1235 (hg.short(qpp[0]), hg.short(qpp[1])))
1235 (hg.short(qpp[0]), hg.short(qpp[1])))
1236 if qupdate:
1236 if qupdate:
1237 print "queue directory updating"
1237 print "queue directory updating"
1238 r = self.qrepo()
1238 r = self.qrepo()
1239 if not r:
1239 if not r:
1240 self.ui.warn("Unable to load queue repository\n")
1240 self.ui.warn("Unable to load queue repository\n")
1241 return 1
1241 return 1
1242 hg.clean(r, qpp[0])
1242 hg.clean(r, qpp[0])
1243
1243
1244 def save(self, repo, msg=None):
1244 def save(self, repo, msg=None):
1245 if len(self.applied) == 0:
1245 if len(self.applied) == 0:
1246 self.ui.warn("save: no patches applied, exiting\n")
1246 self.ui.warn("save: no patches applied, exiting\n")
1247 return 1
1247 return 1
1248 if self.issaveline(self.applied[-1]):
1248 if self.issaveline(self.applied[-1]):
1249 self.ui.warn("status is already saved\n")
1249 self.ui.warn("status is already saved\n")
1250 return 1
1250 return 1
1251
1251
1252 ar = [ ':' + x for x in self.full_series ]
1252 ar = [ ':' + x for x in self.full_series ]
1253 if not msg:
1253 if not msg:
1254 msg = "hg patches saved state"
1254 msg = "hg patches saved state"
1255 else:
1255 else:
1256 msg = "hg patches: " + msg.rstrip('\r\n')
1256 msg = "hg patches: " + msg.rstrip('\r\n')
1257 r = self.qrepo()
1257 r = self.qrepo()
1258 if r:
1258 if r:
1259 pp = r.dirstate.parents()
1259 pp = r.dirstate.parents()
1260 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1260 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1261 msg += "\n\nPatch Data:\n"
1261 msg += "\n\nPatch Data:\n"
1262 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1262 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1263 "\n".join(ar) + '\n' or "")
1263 "\n".join(ar) + '\n' or "")
1264 n = repo.commit(None, text, user=None, force=1)
1264 n = repo.commit(None, text, user=None, force=1)
1265 if not n:
1265 if not n:
1266 self.ui.warn("repo commit failed\n")
1266 self.ui.warn("repo commit failed\n")
1267 return 1
1267 return 1
1268 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1268 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1269 self.applied_dirty = 1
1269 self.applied_dirty = 1
1270
1270
1271 def full_series_end(self):
1271 def full_series_end(self):
1272 if len(self.applied) > 0:
1272 if len(self.applied) > 0:
1273 p = self.applied[-1].name
1273 p = self.applied[-1].name
1274 end = self.find_series(p)
1274 end = self.find_series(p)
1275 if end == None:
1275 if end == None:
1276 return len(self.full_series)
1276 return len(self.full_series)
1277 return end + 1
1277 return end + 1
1278 return 0
1278 return 0
1279
1279
1280 def series_end(self, all_patches=False):
1280 def series_end(self, all_patches=False):
1281 end = 0
1281 end = 0
1282 def next(start):
1282 def next(start):
1283 if all_patches:
1283 if all_patches:
1284 return start
1284 return start
1285 i = start
1285 i = start
1286 while i < len(self.series):
1286 while i < len(self.series):
1287 p, reason = self.pushable(i)
1287 p, reason = self.pushable(i)
1288 if p:
1288 if p:
1289 break
1289 break
1290 self.explain_pushable(i)
1290 self.explain_pushable(i)
1291 i += 1
1291 i += 1
1292 return i
1292 return i
1293 if len(self.applied) > 0:
1293 if len(self.applied) > 0:
1294 p = self.applied[-1].name
1294 p = self.applied[-1].name
1295 try:
1295 try:
1296 end = self.series.index(p)
1296 end = self.series.index(p)
1297 except ValueError:
1297 except ValueError:
1298 return 0
1298 return 0
1299 return next(end + 1)
1299 return next(end + 1)
1300 return next(end)
1300 return next(end)
1301
1301
1302 def appliedname(self, index):
1302 def appliedname(self, index):
1303 pname = self.applied[index].name
1303 pname = self.applied[index].name
1304 if not self.ui.verbose:
1304 if not self.ui.verbose:
1305 p = pname
1305 p = pname
1306 else:
1306 else:
1307 p = str(self.series.index(pname)) + " " + pname
1307 p = str(self.series.index(pname)) + " " + pname
1308 return p
1308 return p
1309
1309
1310 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1310 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1311 force=None, git=False):
1311 force=None, git=False):
1312 def checkseries(patchname):
1312 def checkseries(patchname):
1313 if patchname in self.series:
1313 if patchname in self.series:
1314 raise util.Abort(_('patch %s is already in the series file')
1314 raise util.Abort(_('patch %s is already in the series file')
1315 % patchname)
1315 % patchname)
1316 def checkfile(patchname):
1316 def checkfile(patchname):
1317 if not force and os.path.exists(self.join(patchname)):
1317 if not force and os.path.exists(self.join(patchname)):
1318 raise util.Abort(_('patch "%s" already exists')
1318 raise util.Abort(_('patch "%s" already exists')
1319 % patchname)
1319 % patchname)
1320
1320
1321 if rev:
1321 if rev:
1322 if files:
1322 if files:
1323 raise util.Abort(_('option "-r" not valid when importing '
1323 raise util.Abort(_('option "-r" not valid when importing '
1324 'files'))
1324 'files'))
1325 rev = cmdutil.revrange(repo, rev)
1325 rev = cmdutil.revrange(repo, rev)
1326 rev.sort(lambda x, y: cmp(y, x))
1326 rev.sort(lambda x, y: cmp(y, x))
1327 if (len(files) > 1 or len(rev) > 1) and patchname:
1327 if (len(files) > 1 or len(rev) > 1) and patchname:
1328 raise util.Abort(_('option "-n" not valid when importing multiple '
1328 raise util.Abort(_('option "-n" not valid when importing multiple '
1329 'patches'))
1329 'patches'))
1330 i = 0
1330 i = 0
1331 added = []
1331 added = []
1332 if rev:
1332 if rev:
1333 # If mq patches are applied, we can only import revisions
1333 # If mq patches are applied, we can only import revisions
1334 # that form a linear path to qbase.
1334 # that form a linear path to qbase.
1335 # Otherwise, they should form a linear path to a head.
1335 # Otherwise, they should form a linear path to a head.
1336 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1336 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1337 if len(heads) > 1:
1337 if len(heads) > 1:
1338 raise util.Abort(_('revision %d is the root of more than one '
1338 raise util.Abort(_('revision %d is the root of more than one '
1339 'branch') % rev[-1])
1339 'branch') % rev[-1])
1340 if self.applied:
1340 if self.applied:
1341 base = revlog.hex(repo.changelog.node(rev[0]))
1341 base = revlog.hex(repo.changelog.node(rev[0]))
1342 if base in [n.rev for n in self.applied]:
1342 if base in [n.rev for n in self.applied]:
1343 raise util.Abort(_('revision %d is already managed')
1343 raise util.Abort(_('revision %d is already managed')
1344 % rev[0])
1344 % rev[0])
1345 if heads != [revlog.bin(self.applied[-1].rev)]:
1345 if heads != [revlog.bin(self.applied[-1].rev)]:
1346 raise util.Abort(_('revision %d is not the parent of '
1346 raise util.Abort(_('revision %d is not the parent of '
1347 'the queue') % rev[0])
1347 'the queue') % rev[0])
1348 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1348 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1349 lastparent = repo.changelog.parentrevs(base)[0]
1349 lastparent = repo.changelog.parentrevs(base)[0]
1350 else:
1350 else:
1351 if heads != [repo.changelog.node(rev[0])]:
1351 if heads != [repo.changelog.node(rev[0])]:
1352 raise util.Abort(_('revision %d has unmanaged children')
1352 raise util.Abort(_('revision %d has unmanaged children')
1353 % rev[0])
1353 % rev[0])
1354 lastparent = None
1354 lastparent = None
1355
1355
1356 if git:
1356 if git:
1357 self.diffopts().git = True
1357 self.diffopts().git = True
1358
1358
1359 for r in rev:
1359 for r in rev:
1360 p1, p2 = repo.changelog.parentrevs(r)
1360 p1, p2 = repo.changelog.parentrevs(r)
1361 n = repo.changelog.node(r)
1361 n = repo.changelog.node(r)
1362 if p2 != revlog.nullrev:
1362 if p2 != revlog.nullrev:
1363 raise util.Abort(_('cannot import merge revision %d') % r)
1363 raise util.Abort(_('cannot import merge revision %d') % r)
1364 if lastparent and lastparent != r:
1364 if lastparent and lastparent != r:
1365 raise util.Abort(_('revision %d is not the parent of %d')
1365 raise util.Abort(_('revision %d is not the parent of %d')
1366 % (r, lastparent))
1366 % (r, lastparent))
1367 lastparent = p1
1367 lastparent = p1
1368
1368
1369 if not patchname:
1369 if not patchname:
1370 patchname = normname('%d.diff' % r)
1370 patchname = normname('%d.diff' % r)
1371 checkseries(patchname)
1371 checkseries(patchname)
1372 checkfile(patchname)
1372 checkfile(patchname)
1373 self.full_series.insert(0, patchname)
1373 self.full_series.insert(0, patchname)
1374
1374
1375 patchf = self.opener(patchname, "w")
1375 patchf = self.opener(patchname, "w")
1376 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1376 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1377 patchf.close()
1377 patchf.close()
1378
1378
1379 se = statusentry(revlog.hex(n), patchname)
1379 se = statusentry(revlog.hex(n), patchname)
1380 self.applied.insert(0, se)
1380 self.applied.insert(0, se)
1381
1381
1382 added.append(patchname)
1382 added.append(patchname)
1383 patchname = None
1383 patchname = None
1384 self.parse_series()
1384 self.parse_series()
1385 self.applied_dirty = 1
1385 self.applied_dirty = 1
1386
1386
1387 for filename in files:
1387 for filename in files:
1388 if existing:
1388 if existing:
1389 if filename == '-':
1389 if filename == '-':
1390 raise util.Abort(_('-e is incompatible with import from -'))
1390 raise util.Abort(_('-e is incompatible with import from -'))
1391 if not patchname:
1391 if not patchname:
1392 patchname = normname(filename)
1392 patchname = normname(filename)
1393 if not os.path.isfile(self.join(patchname)):
1393 if not os.path.isfile(self.join(patchname)):
1394 raise util.Abort(_("patch %s does not exist") % patchname)
1394 raise util.Abort(_("patch %s does not exist") % patchname)
1395 else:
1395 else:
1396 try:
1396 try:
1397 if filename == '-':
1397 if filename == '-':
1398 if not patchname:
1398 if not patchname:
1399 raise util.Abort(_('need --name to import a patch from -'))
1399 raise util.Abort(_('need --name to import a patch from -'))
1400 text = sys.stdin.read()
1400 text = sys.stdin.read()
1401 else:
1401 else:
1402 text = file(filename).read()
1402 text = file(filename).read()
1403 except IOError:
1403 except IOError:
1404 raise util.Abort(_("unable to read %s") % patchname)
1404 raise util.Abort(_("unable to read %s") % patchname)
1405 if not patchname:
1405 if not patchname:
1406 patchname = normname(os.path.basename(filename))
1406 patchname = normname(os.path.basename(filename))
1407 checkfile(patchname)
1407 checkfile(patchname)
1408 patchf = self.opener(patchname, "w")
1408 patchf = self.opener(patchname, "w")
1409 patchf.write(text)
1409 patchf.write(text)
1410 checkseries(patchname)
1410 checkseries(patchname)
1411 index = self.full_series_end() + i
1411 index = self.full_series_end() + i
1412 self.full_series[index:index] = [patchname]
1412 self.full_series[index:index] = [patchname]
1413 self.parse_series()
1413 self.parse_series()
1414 self.ui.warn("adding %s to series file\n" % patchname)
1414 self.ui.warn("adding %s to series file\n" % patchname)
1415 i += 1
1415 i += 1
1416 added.append(patchname)
1416 added.append(patchname)
1417 patchname = None
1417 patchname = None
1418 self.series_dirty = 1
1418 self.series_dirty = 1
1419 qrepo = self.qrepo()
1419 qrepo = self.qrepo()
1420 if qrepo:
1420 if qrepo:
1421 qrepo.add(added)
1421 qrepo.add(added)
1422
1422
1423 def delete(ui, repo, *patches, **opts):
1423 def delete(ui, repo, *patches, **opts):
1424 """remove patches from queue
1424 """remove patches from queue
1425
1425
1426 With --rev, mq will stop managing the named revisions. The
1426 With --rev, mq will stop managing the named revisions. The
1427 patches must be applied and at the base of the stack. This option
1427 patches must be applied and at the base of the stack. This option
1428 is useful when the patches have been applied upstream.
1428 is useful when the patches have been applied upstream.
1429
1429
1430 Otherwise, the patches must not be applied.
1430 Otherwise, the patches must not be applied.
1431
1431
1432 With --keep, the patch files are preserved in the patch directory."""
1432 With --keep, the patch files are preserved in the patch directory."""
1433 q = repo.mq
1433 q = repo.mq
1434 q.delete(repo, patches, opts)
1434 q.delete(repo, patches, opts)
1435 q.save_dirty()
1435 q.save_dirty()
1436 return 0
1436 return 0
1437
1437
1438 def applied(ui, repo, patch=None, **opts):
1438 def applied(ui, repo, patch=None, **opts):
1439 """print the patches already applied"""
1439 """print the patches already applied"""
1440 q = repo.mq
1440 q = repo.mq
1441 if patch:
1441 if patch:
1442 if patch not in q.series:
1442 if patch not in q.series:
1443 raise util.Abort(_("patch %s is not in series file") % patch)
1443 raise util.Abort(_("patch %s is not in series file") % patch)
1444 end = q.series.index(patch) + 1
1444 end = q.series.index(patch) + 1
1445 else:
1445 else:
1446 end = len(q.applied)
1446 end = len(q.applied)
1447 if not end:
1447 if not end:
1448 return
1448 return
1449
1449
1450 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1450 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1451
1451
1452 def unapplied(ui, repo, patch=None, **opts):
1452 def unapplied(ui, repo, patch=None, **opts):
1453 """print the patches not yet applied"""
1453 """print the patches not yet applied"""
1454 q = repo.mq
1454 q = repo.mq
1455 if patch:
1455 if patch:
1456 if patch not in q.series:
1456 if patch not in q.series:
1457 raise util.Abort(_("patch %s is not in series file") % patch)
1457 raise util.Abort(_("patch %s is not in series file") % patch)
1458 start = q.series.index(patch) + 1
1458 start = q.series.index(patch) + 1
1459 else:
1459 else:
1460 start = q.series_end()
1460 start = q.series_end()
1461 q.qseries(repo, start=start, summary=opts.get('summary'))
1461 q.qseries(repo, start=start, summary=opts.get('summary'))
1462
1462
1463 def qimport(ui, repo, *filename, **opts):
1463 def qimport(ui, repo, *filename, **opts):
1464 """import a patch
1464 """import a patch
1465
1465
1466 The patch will have the same name as its source file unless you
1466 The patch will have the same name as its source file unless you
1467 give it a new one with --name.
1467 give it a new one with --name.
1468
1468
1469 You can register an existing patch inside the patch directory
1469 You can register an existing patch inside the patch directory
1470 with the --existing flag.
1470 with the --existing flag.
1471
1471
1472 With --force, an existing patch of the same name will be overwritten.
1472 With --force, an existing patch of the same name will be overwritten.
1473
1473
1474 An existing changeset may be placed under mq control with --rev
1474 An existing changeset may be placed under mq control with --rev
1475 (e.g. qimport --rev tip -n patch will place tip under mq control).
1475 (e.g. qimport --rev tip -n patch will place tip under mq control).
1476 With --git, patches imported with --rev will use the git diff
1476 With --git, patches imported with --rev will use the git diff
1477 format.
1477 format.
1478 """
1478 """
1479 q = repo.mq
1479 q = repo.mq
1480 q.qimport(repo, filename, patchname=opts['name'],
1480 q.qimport(repo, filename, patchname=opts['name'],
1481 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1481 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1482 git=opts['git'])
1482 git=opts['git'])
1483 q.save_dirty()
1483 q.save_dirty()
1484 return 0
1484 return 0
1485
1485
1486 def init(ui, repo, **opts):
1486 def init(ui, repo, **opts):
1487 """init a new queue repository
1487 """init a new queue repository
1488
1488
1489 The queue repository is unversioned by default. If -c is
1489 The queue repository is unversioned by default. If -c is
1490 specified, qinit will create a separate nested repository
1490 specified, qinit will create a separate nested repository
1491 for patches. Use qcommit to commit changes to this queue
1491 for patches. Use qcommit to commit changes to this queue
1492 repository."""
1492 repository."""
1493 q = repo.mq
1493 q = repo.mq
1494 r = q.init(repo, create=opts['create_repo'])
1494 r = q.init(repo, create=opts['create_repo'])
1495 q.save_dirty()
1495 q.save_dirty()
1496 if r:
1496 if r:
1497 if not os.path.exists(r.wjoin('.hgignore')):
1497 if not os.path.exists(r.wjoin('.hgignore')):
1498 fp = r.wopener('.hgignore', 'w')
1498 fp = r.wopener('.hgignore', 'w')
1499 fp.write('syntax: glob\n')
1499 fp.write('syntax: glob\n')
1500 fp.write('status\n')
1500 fp.write('status\n')
1501 fp.write('guards\n')
1501 fp.write('guards\n')
1502 fp.close()
1502 fp.close()
1503 if not os.path.exists(r.wjoin('series')):
1503 if not os.path.exists(r.wjoin('series')):
1504 r.wopener('series', 'w').close()
1504 r.wopener('series', 'w').close()
1505 r.add(['.hgignore', 'series'])
1505 r.add(['.hgignore', 'series'])
1506 commands.add(ui, r)
1506 commands.add(ui, r)
1507 return 0
1507 return 0
1508
1508
1509 def clone(ui, source, dest=None, **opts):
1509 def clone(ui, source, dest=None, **opts):
1510 '''clone main and patch repository at same time
1510 '''clone main and patch repository at same time
1511
1511
1512 If source is local, destination will have no patches applied. If
1512 If source is local, destination will have no patches applied. If
1513 source is remote, this command can not check if patches are
1513 source is remote, this command can not check if patches are
1514 applied in source, so cannot guarantee that patches are not
1514 applied in source, so cannot guarantee that patches are not
1515 applied in destination. If you clone remote repository, be sure
1515 applied in destination. If you clone remote repository, be sure
1516 before that it has no patches applied.
1516 before that it has no patches applied.
1517
1517
1518 Source patch repository is looked for in <src>/.hg/patches by
1518 Source patch repository is looked for in <src>/.hg/patches by
1519 default. Use -p <url> to change.
1519 default. Use -p <url> to change.
1520 '''
1520 '''
1521 commands.setremoteconfig(ui, opts)
1521 commands.setremoteconfig(ui, opts)
1522 if dest is None:
1522 if dest is None:
1523 dest = hg.defaultdest(source)
1523 dest = hg.defaultdest(source)
1524 sr = hg.repository(ui, ui.expandpath(source))
1524 sr = hg.repository(ui, ui.expandpath(source))
1525 qbase, destrev = None, None
1525 qbase, destrev = None, None
1526 if sr.local():
1526 if sr.local():
1527 if sr.mq.applied:
1527 if sr.mq.applied:
1528 qbase = revlog.bin(sr.mq.applied[0].rev)
1528 qbase = revlog.bin(sr.mq.applied[0].rev)
1529 if not hg.islocal(dest):
1529 if not hg.islocal(dest):
1530 destrev = sr.parents(qbase)[0]
1530 destrev = sr.parents(qbase)[0]
1531 ui.note(_('cloning main repo\n'))
1531 ui.note(_('cloning main repo\n'))
1532 sr, dr = hg.clone(ui, sr, dest,
1532 sr, dr = hg.clone(ui, sr, dest,
1533 pull=opts['pull'],
1533 pull=opts['pull'],
1534 rev=destrev,
1534 rev=destrev,
1535 update=False,
1535 update=False,
1536 stream=opts['uncompressed'])
1536 stream=opts['uncompressed'])
1537 ui.note(_('cloning patch repo\n'))
1537 ui.note(_('cloning patch repo\n'))
1538 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1538 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1539 dr.url() + '/.hg/patches',
1539 dr.url() + '/.hg/patches',
1540 pull=opts['pull'],
1540 pull=opts['pull'],
1541 update=not opts['noupdate'],
1541 update=not opts['noupdate'],
1542 stream=opts['uncompressed'])
1542 stream=opts['uncompressed'])
1543 if dr.local():
1543 if dr.local():
1544 if qbase:
1544 if qbase:
1545 ui.note(_('stripping applied patches from destination repo\n'))
1545 ui.note(_('stripping applied patches from destination repo\n'))
1546 dr.mq.strip(dr, qbase, update=False, backup=None)
1546 dr.mq.strip(dr, qbase, update=False, backup=None)
1547 if not opts['noupdate']:
1547 if not opts['noupdate']:
1548 ui.note(_('updating destination repo\n'))
1548 ui.note(_('updating destination repo\n'))
1549 hg.update(dr, dr.changelog.tip())
1549 hg.update(dr, dr.changelog.tip())
1550
1550
1551 def commit(ui, repo, *pats, **opts):
1551 def commit(ui, repo, *pats, **opts):
1552 """commit changes in the queue repository"""
1552 """commit changes in the queue repository"""
1553 q = repo.mq
1553 q = repo.mq
1554 r = q.qrepo()
1554 r = q.qrepo()
1555 if not r: raise util.Abort('no queue repository')
1555 if not r: raise util.Abort('no queue repository')
1556 commands.commit(r.ui, r, *pats, **opts)
1556 commands.commit(r.ui, r, *pats, **opts)
1557
1557
1558 def series(ui, repo, **opts):
1558 def series(ui, repo, **opts):
1559 """print the entire series file"""
1559 """print the entire series file"""
1560 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1560 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1561 return 0
1561 return 0
1562
1562
1563 def top(ui, repo, **opts):
1563 def top(ui, repo, **opts):
1564 """print the name of the current patch"""
1564 """print the name of the current patch"""
1565 q = repo.mq
1565 q = repo.mq
1566 t = len(q.applied)
1566 t = len(q.applied)
1567 if t:
1567 if t:
1568 return q.qseries(repo, start=t-1, length=1, status='A',
1568 return q.qseries(repo, start=t-1, length=1, status='A',
1569 summary=opts.get('summary'))
1569 summary=opts.get('summary'))
1570 else:
1570 else:
1571 ui.write("No patches applied\n")
1571 ui.write("No patches applied\n")
1572 return 1
1572 return 1
1573
1573
1574 def next(ui, repo, **opts):
1574 def next(ui, repo, **opts):
1575 """print the name of the next patch"""
1575 """print the name of the next patch"""
1576 q = repo.mq
1576 q = repo.mq
1577 end = q.series_end()
1577 end = q.series_end()
1578 if end == len(q.series):
1578 if end == len(q.series):
1579 ui.write("All patches applied\n")
1579 ui.write("All patches applied\n")
1580 return 1
1580 return 1
1581 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1581 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1582
1582
1583 def prev(ui, repo, **opts):
1583 def prev(ui, repo, **opts):
1584 """print the name of the previous patch"""
1584 """print the name of the previous patch"""
1585 q = repo.mq
1585 q = repo.mq
1586 l = len(q.applied)
1586 l = len(q.applied)
1587 if l == 1:
1587 if l == 1:
1588 ui.write("Only one patch applied\n")
1588 ui.write("Only one patch applied\n")
1589 return 1
1589 return 1
1590 if not l:
1590 if not l:
1591 ui.write("No patches applied\n")
1591 ui.write("No patches applied\n")
1592 return 1
1592 return 1
1593 return q.qseries(repo, start=l-2, length=1, status='A',
1593 return q.qseries(repo, start=l-2, length=1, status='A',
1594 summary=opts.get('summary'))
1594 summary=opts.get('summary'))
1595
1595
1596 def new(ui, repo, patch, **opts):
1596 def new(ui, repo, patch, **opts):
1597 """create a new patch
1597 """create a new patch
1598
1598
1599 qnew creates a new patch on top of the currently-applied patch
1599 qnew creates a new patch on top of the currently-applied patch
1600 (if any). It will refuse to run if there are any outstanding
1600 (if any). It will refuse to run if there are any outstanding
1601 changes unless -f is specified, in which case the patch will
1601 changes unless -f is specified, in which case the patch will
1602 be initialised with them.
1602 be initialised with them.
1603
1603
1604 -e, -m or -l set the patch header as well as the commit message.
1604 -e, -m or -l set the patch header as well as the commit message.
1605 If none is specified, the patch header is empty and the
1605 If none is specified, the patch header is empty and the
1606 commit message is 'New patch: PATCH'"""
1606 commit message is 'New patch: PATCH'"""
1607 q = repo.mq
1607 q = repo.mq
1608 message = commands.logmessage(opts)
1608 message = commands.logmessage(opts)
1609 if opts['edit']:
1609 if opts['edit']:
1610 message = ui.edit(message, ui.username())
1610 message = ui.edit(message, ui.username())
1611 q.new(repo, patch, msg=message, force=opts['force'])
1611 q.new(repo, patch, msg=message, force=opts['force'])
1612 q.save_dirty()
1612 q.save_dirty()
1613 return 0
1613 return 0
1614
1614
1615 def refresh(ui, repo, *pats, **opts):
1615 def refresh(ui, repo, *pats, **opts):
1616 """update the current patch
1616 """update the current patch
1617
1617
1618 If any file patterns are provided, the refreshed patch will contain only
1618 If any file patterns are provided, the refreshed patch will contain only
1619 the modifications that match those patterns; the remaining modifications
1619 the modifications that match those patterns; the remaining modifications
1620 will remain in the working directory.
1620 will remain in the working directory.
1621
1621
1622 hg add/remove/copy/rename work as usual, though you might want to use
1622 hg add/remove/copy/rename work as usual, though you might want to use
1623 git-style patches (--git or [diff] git=1) to track copies and renames.
1623 git-style patches (--git or [diff] git=1) to track copies and renames.
1624 """
1624 """
1625 q = repo.mq
1625 q = repo.mq
1626 message = commands.logmessage(opts)
1626 message = commands.logmessage(opts)
1627 if opts['edit']:
1627 if opts['edit']:
1628 if message:
1628 if message:
1629 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1629 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1630 patch = q.applied[-1].name
1630 patch = q.applied[-1].name
1631 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1631 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1632 message = ui.edit('\n'.join(message), user or ui.username())
1632 message = ui.edit('\n'.join(message), user or ui.username())
1633 ret = q.refresh(repo, pats, msg=message, **opts)
1633 ret = q.refresh(repo, pats, msg=message, **opts)
1634 q.save_dirty()
1634 q.save_dirty()
1635 return ret
1635 return ret
1636
1636
1637 def diff(ui, repo, *pats, **opts):
1637 def diff(ui, repo, *pats, **opts):
1638 """diff of the current patch"""
1638 """diff of the current patch"""
1639 repo.mq.diff(repo, pats, opts)
1639 repo.mq.diff(repo, pats, opts)
1640 return 0
1640 return 0
1641
1641
1642 def fold(ui, repo, *files, **opts):
1642 def fold(ui, repo, *files, **opts):
1643 """fold the named patches into the current patch
1643 """fold the named patches into the current patch
1644
1644
1645 Patches must not yet be applied. Each patch will be successively
1645 Patches must not yet be applied. Each patch will be successively
1646 applied to the current patch in the order given. If all the
1646 applied to the current patch in the order given. If all the
1647 patches apply successfully, the current patch will be refreshed
1647 patches apply successfully, the current patch will be refreshed
1648 with the new cumulative patch, and the folded patches will
1648 with the new cumulative patch, and the folded patches will
1649 be deleted. With -k/--keep, the folded patch files will not
1649 be deleted. With -k/--keep, the folded patch files will not
1650 be removed afterwards.
1650 be removed afterwards.
1651
1651
1652 The header for each folded patch will be concatenated with
1652 The header for each folded patch will be concatenated with
1653 the current patch header, separated by a line of '* * *'."""
1653 the current patch header, separated by a line of '* * *'."""
1654
1654
1655 q = repo.mq
1655 q = repo.mq
1656
1656
1657 if not files:
1657 if not files:
1658 raise util.Abort(_('qfold requires at least one patch name'))
1658 raise util.Abort(_('qfold requires at least one patch name'))
1659 if not q.check_toppatch(repo):
1659 if not q.check_toppatch(repo):
1660 raise util.Abort(_('No patches applied'))
1660 raise util.Abort(_('No patches applied'))
1661
1661
1662 message = commands.logmessage(opts)
1662 message = commands.logmessage(opts)
1663 if opts['edit']:
1663 if opts['edit']:
1664 if message:
1664 if message:
1665 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1665 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1666
1666
1667 parent = q.lookup('qtip')
1667 parent = q.lookup('qtip')
1668 patches = []
1668 patches = []
1669 messages = []
1669 messages = []
1670 for f in files:
1670 for f in files:
1671 p = q.lookup(f)
1671 p = q.lookup(f)
1672 if p in patches or p == parent:
1672 if p in patches or p == parent:
1673 ui.warn(_('Skipping already folded patch %s') % p)
1673 ui.warn(_('Skipping already folded patch %s') % p)
1674 if q.isapplied(p):
1674 if q.isapplied(p):
1675 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1675 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1676 patches.append(p)
1676 patches.append(p)
1677
1677
1678 for p in patches:
1678 for p in patches:
1679 if not message:
1679 if not message:
1680 messages.append(q.readheaders(p)[0])
1680 messages.append(q.readheaders(p)[0])
1681 pf = q.join(p)
1681 pf = q.join(p)
1682 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1682 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1683 if not patchsuccess:
1683 if not patchsuccess:
1684 raise util.Abort(_('Error folding patch %s') % p)
1684 raise util.Abort(_('Error folding patch %s') % p)
1685 patch.updatedir(ui, repo, files)
1685 patch.updatedir(ui, repo, files)
1686
1686
1687 if not message:
1687 if not message:
1688 message, comments, user = q.readheaders(parent)[0:3]
1688 message, comments, user = q.readheaders(parent)[0:3]
1689 for msg in messages:
1689 for msg in messages:
1690 message.append('* * *')
1690 message.append('* * *')
1691 message.extend(msg)
1691 message.extend(msg)
1692 message = '\n'.join(message)
1692 message = '\n'.join(message)
1693
1693
1694 if opts['edit']:
1694 if opts['edit']:
1695 message = ui.edit(message, user or ui.username())
1695 message = ui.edit(message, user or ui.username())
1696
1696
1697 q.refresh(repo, msg=message)
1697 q.refresh(repo, msg=message)
1698 q.delete(repo, patches, opts)
1698 q.delete(repo, patches, opts)
1699 q.save_dirty()
1699 q.save_dirty()
1700
1700
1701 def guard(ui, repo, *args, **opts):
1701 def guard(ui, repo, *args, **opts):
1702 '''set or print guards for a patch
1702 '''set or print guards for a patch
1703
1703
1704 Guards control whether a patch can be pushed. A patch with no
1704 Guards control whether a patch can be pushed. A patch with no
1705 guards is always pushed. A patch with a positive guard ("+foo") is
1705 guards is always pushed. A patch with a positive guard ("+foo") is
1706 pushed only if the qselect command has activated it. A patch with
1706 pushed only if the qselect command has activated it. A patch with
1707 a negative guard ("-foo") is never pushed if the qselect command
1707 a negative guard ("-foo") is never pushed if the qselect command
1708 has activated it.
1708 has activated it.
1709
1709
1710 With no arguments, print the currently active guards.
1710 With no arguments, print the currently active guards.
1711 With arguments, set guards for the named patch.
1711 With arguments, set guards for the named patch.
1712
1712
1713 To set a negative guard "-foo" on topmost patch ("--" is needed so
1713 To set a negative guard "-foo" on topmost patch ("--" is needed so
1714 hg will not interpret "-foo" as an option):
1714 hg will not interpret "-foo" as an option):
1715 hg qguard -- -foo
1715 hg qguard -- -foo
1716
1716
1717 To set guards on another patch:
1717 To set guards on another patch:
1718 hg qguard other.patch +2.6.17 -stable
1718 hg qguard other.patch +2.6.17 -stable
1719 '''
1719 '''
1720 def status(idx):
1720 def status(idx):
1721 guards = q.series_guards[idx] or ['unguarded']
1721 guards = q.series_guards[idx] or ['unguarded']
1722 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1722 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1723 q = repo.mq
1723 q = repo.mq
1724 patch = None
1724 patch = None
1725 args = list(args)
1725 args = list(args)
1726 if opts['list']:
1726 if opts['list']:
1727 if args or opts['none']:
1727 if args or opts['none']:
1728 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1728 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1729 for i in xrange(len(q.series)):
1729 for i in xrange(len(q.series)):
1730 status(i)
1730 status(i)
1731 return
1731 return
1732 if not args or args[0][0:1] in '-+':
1732 if not args or args[0][0:1] in '-+':
1733 if not q.applied:
1733 if not q.applied:
1734 raise util.Abort(_('no patches applied'))
1734 raise util.Abort(_('no patches applied'))
1735 patch = q.applied[-1].name
1735 patch = q.applied[-1].name
1736 if patch is None and args[0][0:1] not in '-+':
1736 if patch is None and args[0][0:1] not in '-+':
1737 patch = args.pop(0)
1737 patch = args.pop(0)
1738 if patch is None:
1738 if patch is None:
1739 raise util.Abort(_('no patch to work with'))
1739 raise util.Abort(_('no patch to work with'))
1740 if args or opts['none']:
1740 if args or opts['none']:
1741 q.set_guards(q.find_series(patch), args)
1741 idx = q.find_series(patch)
1742 if idx is None:
1743 raise util.Abort(_('no patch named %s') % patch)
1744 q.set_guards(idx, args)
1742 q.save_dirty()
1745 q.save_dirty()
1743 else:
1746 else:
1744 status(q.series.index(q.lookup(patch)))
1747 status(q.series.index(q.lookup(patch)))
1745
1748
1746 def header(ui, repo, patch=None):
1749 def header(ui, repo, patch=None):
1747 """Print the header of the topmost or specified patch"""
1750 """Print the header of the topmost or specified patch"""
1748 q = repo.mq
1751 q = repo.mq
1749
1752
1750 if patch:
1753 if patch:
1751 patch = q.lookup(patch)
1754 patch = q.lookup(patch)
1752 else:
1755 else:
1753 if not q.applied:
1756 if not q.applied:
1754 ui.write('No patches applied\n')
1757 ui.write('No patches applied\n')
1755 return 1
1758 return 1
1756 patch = q.lookup('qtip')
1759 patch = q.lookup('qtip')
1757 message = repo.mq.readheaders(patch)[0]
1760 message = repo.mq.readheaders(patch)[0]
1758
1761
1759 ui.write('\n'.join(message) + '\n')
1762 ui.write('\n'.join(message) + '\n')
1760
1763
1761 def lastsavename(path):
1764 def lastsavename(path):
1762 (directory, base) = os.path.split(path)
1765 (directory, base) = os.path.split(path)
1763 names = os.listdir(directory)
1766 names = os.listdir(directory)
1764 namere = re.compile("%s.([0-9]+)" % base)
1767 namere = re.compile("%s.([0-9]+)" % base)
1765 maxindex = None
1768 maxindex = None
1766 maxname = None
1769 maxname = None
1767 for f in names:
1770 for f in names:
1768 m = namere.match(f)
1771 m = namere.match(f)
1769 if m:
1772 if m:
1770 index = int(m.group(1))
1773 index = int(m.group(1))
1771 if maxindex == None or index > maxindex:
1774 if maxindex == None or index > maxindex:
1772 maxindex = index
1775 maxindex = index
1773 maxname = f
1776 maxname = f
1774 if maxname:
1777 if maxname:
1775 return (os.path.join(directory, maxname), maxindex)
1778 return (os.path.join(directory, maxname), maxindex)
1776 return (None, None)
1779 return (None, None)
1777
1780
1778 def savename(path):
1781 def savename(path):
1779 (last, index) = lastsavename(path)
1782 (last, index) = lastsavename(path)
1780 if last is None:
1783 if last is None:
1781 index = 0
1784 index = 0
1782 newpath = path + ".%d" % (index + 1)
1785 newpath = path + ".%d" % (index + 1)
1783 return newpath
1786 return newpath
1784
1787
1785 def push(ui, repo, patch=None, **opts):
1788 def push(ui, repo, patch=None, **opts):
1786 """push the next patch onto the stack"""
1789 """push the next patch onto the stack"""
1787 q = repo.mq
1790 q = repo.mq
1788 mergeq = None
1791 mergeq = None
1789
1792
1790 if opts['all']:
1793 if opts['all']:
1791 if not q.series:
1794 if not q.series:
1792 ui.warn(_('no patches in series\n'))
1795 ui.warn(_('no patches in series\n'))
1793 return 0
1796 return 0
1794 patch = q.series[-1]
1797 patch = q.series[-1]
1795 if opts['merge']:
1798 if opts['merge']:
1796 if opts['name']:
1799 if opts['name']:
1797 newpath = opts['name']
1800 newpath = opts['name']
1798 else:
1801 else:
1799 newpath, i = lastsavename(q.path)
1802 newpath, i = lastsavename(q.path)
1800 if not newpath:
1803 if not newpath:
1801 ui.warn("no saved queues found, please use -n\n")
1804 ui.warn("no saved queues found, please use -n\n")
1802 return 1
1805 return 1
1803 mergeq = queue(ui, repo.join(""), newpath)
1806 mergeq = queue(ui, repo.join(""), newpath)
1804 ui.warn("merging with queue at: %s\n" % mergeq.path)
1807 ui.warn("merging with queue at: %s\n" % mergeq.path)
1805 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1808 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1806 mergeq=mergeq)
1809 mergeq=mergeq)
1807 q.save_dirty()
1810 q.save_dirty()
1808 return ret
1811 return ret
1809
1812
1810 def pop(ui, repo, patch=None, **opts):
1813 def pop(ui, repo, patch=None, **opts):
1811 """pop the current patch off the stack"""
1814 """pop the current patch off the stack"""
1812 localupdate = True
1815 localupdate = True
1813 if opts['name']:
1816 if opts['name']:
1814 q = queue(ui, repo.join(""), repo.join(opts['name']))
1817 q = queue(ui, repo.join(""), repo.join(opts['name']))
1815 ui.warn('using patch queue: %s\n' % q.path)
1818 ui.warn('using patch queue: %s\n' % q.path)
1816 localupdate = False
1819 localupdate = False
1817 else:
1820 else:
1818 q = repo.mq
1821 q = repo.mq
1819 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1822 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1820 all=opts['all'])
1823 all=opts['all'])
1821 q.save_dirty()
1824 q.save_dirty()
1822 return ret
1825 return ret
1823
1826
1824 def rename(ui, repo, patch, name=None, **opts):
1827 def rename(ui, repo, patch, name=None, **opts):
1825 """rename a patch
1828 """rename a patch
1826
1829
1827 With one argument, renames the current patch to PATCH1.
1830 With one argument, renames the current patch to PATCH1.
1828 With two arguments, renames PATCH1 to PATCH2."""
1831 With two arguments, renames PATCH1 to PATCH2."""
1829
1832
1830 q = repo.mq
1833 q = repo.mq
1831
1834
1832 if not name:
1835 if not name:
1833 name = patch
1836 name = patch
1834 patch = None
1837 patch = None
1835
1838
1836 if patch:
1839 if patch:
1837 patch = q.lookup(patch)
1840 patch = q.lookup(patch)
1838 else:
1841 else:
1839 if not q.applied:
1842 if not q.applied:
1840 ui.write(_('No patches applied\n'))
1843 ui.write(_('No patches applied\n'))
1841 return
1844 return
1842 patch = q.lookup('qtip')
1845 patch = q.lookup('qtip')
1843 absdest = q.join(name)
1846 absdest = q.join(name)
1844 if os.path.isdir(absdest):
1847 if os.path.isdir(absdest):
1845 name = normname(os.path.join(name, os.path.basename(patch)))
1848 name = normname(os.path.join(name, os.path.basename(patch)))
1846 absdest = q.join(name)
1849 absdest = q.join(name)
1847 if os.path.exists(absdest):
1850 if os.path.exists(absdest):
1848 raise util.Abort(_('%s already exists') % absdest)
1851 raise util.Abort(_('%s already exists') % absdest)
1849
1852
1850 if name in q.series:
1853 if name in q.series:
1851 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1854 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1852
1855
1853 if ui.verbose:
1856 if ui.verbose:
1854 ui.write('Renaming %s to %s\n' % (patch, name))
1857 ui.write('Renaming %s to %s\n' % (patch, name))
1855 i = q.find_series(patch)
1858 i = q.find_series(patch)
1856 guards = q.guard_re.findall(q.full_series[i])
1859 guards = q.guard_re.findall(q.full_series[i])
1857 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1860 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1858 q.parse_series()
1861 q.parse_series()
1859 q.series_dirty = 1
1862 q.series_dirty = 1
1860
1863
1861 info = q.isapplied(patch)
1864 info = q.isapplied(patch)
1862 if info:
1865 if info:
1863 q.applied[info[0]] = statusentry(info[1], name)
1866 q.applied[info[0]] = statusentry(info[1], name)
1864 q.applied_dirty = 1
1867 q.applied_dirty = 1
1865
1868
1866 util.rename(q.join(patch), absdest)
1869 util.rename(q.join(patch), absdest)
1867 r = q.qrepo()
1870 r = q.qrepo()
1868 if r:
1871 if r:
1869 wlock = r.wlock()
1872 wlock = r.wlock()
1870 if r.dirstate.state(name) == 'r':
1873 if r.dirstate.state(name) == 'r':
1871 r.undelete([name], wlock)
1874 r.undelete([name], wlock)
1872 r.copy(patch, name, wlock)
1875 r.copy(patch, name, wlock)
1873 r.remove([patch], False, wlock)
1876 r.remove([patch], False, wlock)
1874
1877
1875 q.save_dirty()
1878 q.save_dirty()
1876
1879
1877 def restore(ui, repo, rev, **opts):
1880 def restore(ui, repo, rev, **opts):
1878 """restore the queue state saved by a rev"""
1881 """restore the queue state saved by a rev"""
1879 rev = repo.lookup(rev)
1882 rev = repo.lookup(rev)
1880 q = repo.mq
1883 q = repo.mq
1881 q.restore(repo, rev, delete=opts['delete'],
1884 q.restore(repo, rev, delete=opts['delete'],
1882 qupdate=opts['update'])
1885 qupdate=opts['update'])
1883 q.save_dirty()
1886 q.save_dirty()
1884 return 0
1887 return 0
1885
1888
1886 def save(ui, repo, **opts):
1889 def save(ui, repo, **opts):
1887 """save current queue state"""
1890 """save current queue state"""
1888 q = repo.mq
1891 q = repo.mq
1889 message = commands.logmessage(opts)
1892 message = commands.logmessage(opts)
1890 ret = q.save(repo, msg=message)
1893 ret = q.save(repo, msg=message)
1891 if ret:
1894 if ret:
1892 return ret
1895 return ret
1893 q.save_dirty()
1896 q.save_dirty()
1894 if opts['copy']:
1897 if opts['copy']:
1895 path = q.path
1898 path = q.path
1896 if opts['name']:
1899 if opts['name']:
1897 newpath = os.path.join(q.basepath, opts['name'])
1900 newpath = os.path.join(q.basepath, opts['name'])
1898 if os.path.exists(newpath):
1901 if os.path.exists(newpath):
1899 if not os.path.isdir(newpath):
1902 if not os.path.isdir(newpath):
1900 raise util.Abort(_('destination %s exists and is not '
1903 raise util.Abort(_('destination %s exists and is not '
1901 'a directory') % newpath)
1904 'a directory') % newpath)
1902 if not opts['force']:
1905 if not opts['force']:
1903 raise util.Abort(_('destination %s exists, '
1906 raise util.Abort(_('destination %s exists, '
1904 'use -f to force') % newpath)
1907 'use -f to force') % newpath)
1905 else:
1908 else:
1906 newpath = savename(path)
1909 newpath = savename(path)
1907 ui.warn("copy %s to %s\n" % (path, newpath))
1910 ui.warn("copy %s to %s\n" % (path, newpath))
1908 util.copyfiles(path, newpath)
1911 util.copyfiles(path, newpath)
1909 if opts['empty']:
1912 if opts['empty']:
1910 try:
1913 try:
1911 os.unlink(q.join(q.status_path))
1914 os.unlink(q.join(q.status_path))
1912 except:
1915 except:
1913 pass
1916 pass
1914 return 0
1917 return 0
1915
1918
1916 def strip(ui, repo, rev, **opts):
1919 def strip(ui, repo, rev, **opts):
1917 """strip a revision and all later revs on the same branch"""
1920 """strip a revision and all later revs on the same branch"""
1918 rev = repo.lookup(rev)
1921 rev = repo.lookup(rev)
1919 backup = 'all'
1922 backup = 'all'
1920 if opts['backup']:
1923 if opts['backup']:
1921 backup = 'strip'
1924 backup = 'strip'
1922 elif opts['nobackup']:
1925 elif opts['nobackup']:
1923 backup = 'none'
1926 backup = 'none'
1924 update = repo.dirstate.parents()[0] != revlog.nullid
1927 update = repo.dirstate.parents()[0] != revlog.nullid
1925 repo.mq.strip(repo, rev, backup=backup, update=update)
1928 repo.mq.strip(repo, rev, backup=backup, update=update)
1926 return 0
1929 return 0
1927
1930
1928 def select(ui, repo, *args, **opts):
1931 def select(ui, repo, *args, **opts):
1929 '''set or print guarded patches to push
1932 '''set or print guarded patches to push
1930
1933
1931 Use the qguard command to set or print guards on patch, then use
1934 Use the qguard command to set or print guards on patch, then use
1932 qselect to tell mq which guards to use. A patch will be pushed if it
1935 qselect to tell mq which guards to use. A patch will be pushed if it
1933 has no guards or any positive guards match the currently selected guard,
1936 has no guards or any positive guards match the currently selected guard,
1934 but will not be pushed if any negative guards match the current guard.
1937 but will not be pushed if any negative guards match the current guard.
1935 For example:
1938 For example:
1936
1939
1937 qguard foo.patch -stable (negative guard)
1940 qguard foo.patch -stable (negative guard)
1938 qguard bar.patch +stable (positive guard)
1941 qguard bar.patch +stable (positive guard)
1939 qselect stable
1942 qselect stable
1940
1943
1941 This activates the "stable" guard. mq will skip foo.patch (because
1944 This activates the "stable" guard. mq will skip foo.patch (because
1942 it has a negative match) but push bar.patch (because it
1945 it has a negative match) but push bar.patch (because it
1943 has a positive match).
1946 has a positive match).
1944
1947
1945 With no arguments, prints the currently active guards.
1948 With no arguments, prints the currently active guards.
1946 With one argument, sets the active guard.
1949 With one argument, sets the active guard.
1947
1950
1948 Use -n/--none to deactivate guards (no other arguments needed).
1951 Use -n/--none to deactivate guards (no other arguments needed).
1949 When no guards are active, patches with positive guards are skipped
1952 When no guards are active, patches with positive guards are skipped
1950 and patches with negative guards are pushed.
1953 and patches with negative guards are pushed.
1951
1954
1952 qselect can change the guards on applied patches. It does not pop
1955 qselect can change the guards on applied patches. It does not pop
1953 guarded patches by default. Use --pop to pop back to the last applied
1956 guarded patches by default. Use --pop to pop back to the last applied
1954 patch that is not guarded. Use --reapply (which implies --pop) to push
1957 patch that is not guarded. Use --reapply (which implies --pop) to push
1955 back to the current patch afterwards, but skip guarded patches.
1958 back to the current patch afterwards, but skip guarded patches.
1956
1959
1957 Use -s/--series to print a list of all guards in the series file (no
1960 Use -s/--series to print a list of all guards in the series file (no
1958 other arguments needed). Use -v for more information.'''
1961 other arguments needed). Use -v for more information.'''
1959
1962
1960 q = repo.mq
1963 q = repo.mq
1961 guards = q.active()
1964 guards = q.active()
1962 if args or opts['none']:
1965 if args or opts['none']:
1963 old_unapplied = q.unapplied(repo)
1966 old_unapplied = q.unapplied(repo)
1964 old_guarded = [i for i in xrange(len(q.applied)) if
1967 old_guarded = [i for i in xrange(len(q.applied)) if
1965 not q.pushable(i)[0]]
1968 not q.pushable(i)[0]]
1966 q.set_active(args)
1969 q.set_active(args)
1967 q.save_dirty()
1970 q.save_dirty()
1968 if not args:
1971 if not args:
1969 ui.status(_('guards deactivated\n'))
1972 ui.status(_('guards deactivated\n'))
1970 if not opts['pop'] and not opts['reapply']:
1973 if not opts['pop'] and not opts['reapply']:
1971 unapplied = q.unapplied(repo)
1974 unapplied = q.unapplied(repo)
1972 guarded = [i for i in xrange(len(q.applied))
1975 guarded = [i for i in xrange(len(q.applied))
1973 if not q.pushable(i)[0]]
1976 if not q.pushable(i)[0]]
1974 if len(unapplied) != len(old_unapplied):
1977 if len(unapplied) != len(old_unapplied):
1975 ui.status(_('number of unguarded, unapplied patches has '
1978 ui.status(_('number of unguarded, unapplied patches has '
1976 'changed from %d to %d\n') %
1979 'changed from %d to %d\n') %
1977 (len(old_unapplied), len(unapplied)))
1980 (len(old_unapplied), len(unapplied)))
1978 if len(guarded) != len(old_guarded):
1981 if len(guarded) != len(old_guarded):
1979 ui.status(_('number of guarded, applied patches has changed '
1982 ui.status(_('number of guarded, applied patches has changed '
1980 'from %d to %d\n') %
1983 'from %d to %d\n') %
1981 (len(old_guarded), len(guarded)))
1984 (len(old_guarded), len(guarded)))
1982 elif opts['series']:
1985 elif opts['series']:
1983 guards = {}
1986 guards = {}
1984 noguards = 0
1987 noguards = 0
1985 for gs in q.series_guards:
1988 for gs in q.series_guards:
1986 if not gs:
1989 if not gs:
1987 noguards += 1
1990 noguards += 1
1988 for g in gs:
1991 for g in gs:
1989 guards.setdefault(g, 0)
1992 guards.setdefault(g, 0)
1990 guards[g] += 1
1993 guards[g] += 1
1991 if ui.verbose:
1994 if ui.verbose:
1992 guards['NONE'] = noguards
1995 guards['NONE'] = noguards
1993 guards = guards.items()
1996 guards = guards.items()
1994 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
1997 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
1995 if guards:
1998 if guards:
1996 ui.note(_('guards in series file:\n'))
1999 ui.note(_('guards in series file:\n'))
1997 for guard, count in guards:
2000 for guard, count in guards:
1998 ui.note('%2d ' % count)
2001 ui.note('%2d ' % count)
1999 ui.write(guard, '\n')
2002 ui.write(guard, '\n')
2000 else:
2003 else:
2001 ui.note(_('no guards in series file\n'))
2004 ui.note(_('no guards in series file\n'))
2002 else:
2005 else:
2003 if guards:
2006 if guards:
2004 ui.note(_('active guards:\n'))
2007 ui.note(_('active guards:\n'))
2005 for g in guards:
2008 for g in guards:
2006 ui.write(g, '\n')
2009 ui.write(g, '\n')
2007 else:
2010 else:
2008 ui.write(_('no active guards\n'))
2011 ui.write(_('no active guards\n'))
2009 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2012 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2010 popped = False
2013 popped = False
2011 if opts['pop'] or opts['reapply']:
2014 if opts['pop'] or opts['reapply']:
2012 for i in xrange(len(q.applied)):
2015 for i in xrange(len(q.applied)):
2013 pushable, reason = q.pushable(i)
2016 pushable, reason = q.pushable(i)
2014 if not pushable:
2017 if not pushable:
2015 ui.status(_('popping guarded patches\n'))
2018 ui.status(_('popping guarded patches\n'))
2016 popped = True
2019 popped = True
2017 if i == 0:
2020 if i == 0:
2018 q.pop(repo, all=True)
2021 q.pop(repo, all=True)
2019 else:
2022 else:
2020 q.pop(repo, i-1)
2023 q.pop(repo, i-1)
2021 break
2024 break
2022 if popped:
2025 if popped:
2023 try:
2026 try:
2024 if reapply:
2027 if reapply:
2025 ui.status(_('reapplying unguarded patches\n'))
2028 ui.status(_('reapplying unguarded patches\n'))
2026 q.push(repo, reapply)
2029 q.push(repo, reapply)
2027 finally:
2030 finally:
2028 q.save_dirty()
2031 q.save_dirty()
2029
2032
2030 def reposetup(ui, repo):
2033 def reposetup(ui, repo):
2031 class mqrepo(repo.__class__):
2034 class mqrepo(repo.__class__):
2032 def abort_if_wdir_patched(self, errmsg, force=False):
2035 def abort_if_wdir_patched(self, errmsg, force=False):
2033 if self.mq.applied and not force:
2036 if self.mq.applied and not force:
2034 parent = revlog.hex(self.dirstate.parents()[0])
2037 parent = revlog.hex(self.dirstate.parents()[0])
2035 if parent in [s.rev for s in self.mq.applied]:
2038 if parent in [s.rev for s in self.mq.applied]:
2036 raise util.Abort(errmsg)
2039 raise util.Abort(errmsg)
2037
2040
2038 def commit(self, *args, **opts):
2041 def commit(self, *args, **opts):
2039 if len(args) >= 6:
2042 if len(args) >= 6:
2040 force = args[5]
2043 force = args[5]
2041 else:
2044 else:
2042 force = opts.get('force')
2045 force = opts.get('force')
2043 self.abort_if_wdir_patched(
2046 self.abort_if_wdir_patched(
2044 _('cannot commit over an applied mq patch'),
2047 _('cannot commit over an applied mq patch'),
2045 force)
2048 force)
2046
2049
2047 return super(mqrepo, self).commit(*args, **opts)
2050 return super(mqrepo, self).commit(*args, **opts)
2048
2051
2049 def push(self, remote, force=False, revs=None):
2052 def push(self, remote, force=False, revs=None):
2050 if self.mq.applied and not force and not revs:
2053 if self.mq.applied and not force and not revs:
2051 raise util.Abort(_('source has mq patches applied'))
2054 raise util.Abort(_('source has mq patches applied'))
2052 return super(mqrepo, self).push(remote, force, revs)
2055 return super(mqrepo, self).push(remote, force, revs)
2053
2056
2054 def tags(self):
2057 def tags(self):
2055 if self.tagscache:
2058 if self.tagscache:
2056 return self.tagscache
2059 return self.tagscache
2057
2060
2058 tagscache = super(mqrepo, self).tags()
2061 tagscache = super(mqrepo, self).tags()
2059
2062
2060 q = self.mq
2063 q = self.mq
2061 if not q.applied:
2064 if not q.applied:
2062 return tagscache
2065 return tagscache
2063
2066
2064 mqtags = [(patch.rev, patch.name) for patch in q.applied]
2067 mqtags = [(patch.rev, patch.name) for patch in q.applied]
2065 mqtags.append((mqtags[-1][0], 'qtip'))
2068 mqtags.append((mqtags[-1][0], 'qtip'))
2066 mqtags.append((mqtags[0][0], 'qbase'))
2069 mqtags.append((mqtags[0][0], 'qbase'))
2067 for patch in mqtags:
2070 for patch in mqtags:
2068 if patch[1] in tagscache:
2071 if patch[1] in tagscache:
2069 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2072 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2070 else:
2073 else:
2071 tagscache[patch[1]] = revlog.bin(patch[0])
2074 tagscache[patch[1]] = revlog.bin(patch[0])
2072
2075
2073 return tagscache
2076 return tagscache
2074
2077
2075 def _branchtags(self):
2078 def _branchtags(self):
2076 q = self.mq
2079 q = self.mq
2077 if not q.applied:
2080 if not q.applied:
2078 return super(mqrepo, self)._branchtags()
2081 return super(mqrepo, self)._branchtags()
2079
2082
2080 self.branchcache = {} # avoid recursion in changectx
2083 self.branchcache = {} # avoid recursion in changectx
2081 cl = self.changelog
2084 cl = self.changelog
2082 partial, last, lrev = self._readbranchcache()
2085 partial, last, lrev = self._readbranchcache()
2083
2086
2084 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2087 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2085 start = lrev + 1
2088 start = lrev + 1
2086 if start < qbase:
2089 if start < qbase:
2087 # update the cache (excluding the patches) and save it
2090 # update the cache (excluding the patches) and save it
2088 self._updatebranchcache(partial, lrev+1, qbase)
2091 self._updatebranchcache(partial, lrev+1, qbase)
2089 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2092 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2090 start = qbase
2093 start = qbase
2091 # if start = qbase, the cache is as updated as it should be.
2094 # if start = qbase, the cache is as updated as it should be.
2092 # if start > qbase, the cache includes (part of) the patches.
2095 # if start > qbase, the cache includes (part of) the patches.
2093 # we might as well use it, but we won't save it.
2096 # we might as well use it, but we won't save it.
2094
2097
2095 # update the cache up to the tip
2098 # update the cache up to the tip
2096 self._updatebranchcache(partial, start, cl.count())
2099 self._updatebranchcache(partial, start, cl.count())
2097
2100
2098 return partial
2101 return partial
2099
2102
2100 if repo.local():
2103 if repo.local():
2101 repo.__class__ = mqrepo
2104 repo.__class__ = mqrepo
2102 repo.mq = queue(ui, repo.join(""))
2105 repo.mq = queue(ui, repo.join(""))
2103
2106
2104 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2107 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2105
2108
2106 cmdtable = {
2109 cmdtable = {
2107 "qapplied": (applied, [] + seriesopts, 'hg qapplied [-s] [PATCH]'),
2110 "qapplied": (applied, [] + seriesopts, 'hg qapplied [-s] [PATCH]'),
2108 "qclone": (clone,
2111 "qclone": (clone,
2109 [('', 'pull', None, _('use pull protocol to copy metadata')),
2112 [('', 'pull', None, _('use pull protocol to copy metadata')),
2110 ('U', 'noupdate', None, _('do not update the new working directories')),
2113 ('U', 'noupdate', None, _('do not update the new working directories')),
2111 ('', 'uncompressed', None,
2114 ('', 'uncompressed', None,
2112 _('use uncompressed transfer (fast over LAN)')),
2115 _('use uncompressed transfer (fast over LAN)')),
2113 ('e', 'ssh', '', _('specify ssh command to use')),
2116 ('e', 'ssh', '', _('specify ssh command to use')),
2114 ('p', 'patches', '', _('location of source patch repo')),
2117 ('p', 'patches', '', _('location of source patch repo')),
2115 ('', 'remotecmd', '',
2118 ('', 'remotecmd', '',
2116 _('specify hg command to run on the remote side'))],
2119 _('specify hg command to run on the remote side'))],
2117 'hg qclone [OPTION]... SOURCE [DEST]'),
2120 'hg qclone [OPTION]... SOURCE [DEST]'),
2118 "qcommit|qci":
2121 "qcommit|qci":
2119 (commit,
2122 (commit,
2120 commands.table["^commit|ci"][1],
2123 commands.table["^commit|ci"][1],
2121 'hg qcommit [OPTION]... [FILE]...'),
2124 'hg qcommit [OPTION]... [FILE]...'),
2122 "^qdiff": (diff,
2125 "^qdiff": (diff,
2123 [('g', 'git', None, _('use git extended diff format')),
2126 [('g', 'git', None, _('use git extended diff format')),
2124 ('I', 'include', [], _('include names matching the given patterns')),
2127 ('I', 'include', [], _('include names matching the given patterns')),
2125 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2128 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2126 'hg qdiff [-I] [-X] [FILE]...'),
2129 'hg qdiff [-I] [-X] [FILE]...'),
2127 "qdelete|qremove|qrm":
2130 "qdelete|qremove|qrm":
2128 (delete,
2131 (delete,
2129 [('k', 'keep', None, _('keep patch file')),
2132 [('k', 'keep', None, _('keep patch file')),
2130 ('r', 'rev', [], _('stop managing a revision'))],
2133 ('r', 'rev', [], _('stop managing a revision'))],
2131 'hg qdelete [-k] [-r REV]... PATCH...'),
2134 'hg qdelete [-k] [-r REV]... PATCH...'),
2132 'qfold':
2135 'qfold':
2133 (fold,
2136 (fold,
2134 [('e', 'edit', None, _('edit patch header')),
2137 [('e', 'edit', None, _('edit patch header')),
2135 ('k', 'keep', None, _('keep folded patch files'))
2138 ('k', 'keep', None, _('keep folded patch files'))
2136 ] + commands.commitopts,
2139 ] + commands.commitopts,
2137 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
2140 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
2138 'qguard': (guard, [('l', 'list', None, _('list all patches and guards')),
2141 'qguard': (guard, [('l', 'list', None, _('list all patches and guards')),
2139 ('n', 'none', None, _('drop all guards'))],
2142 ('n', 'none', None, _('drop all guards'))],
2140 'hg qguard [PATCH] [+GUARD...] [-GUARD...]'),
2143 'hg qguard [PATCH] [+GUARD...] [-GUARD...]'),
2141 'qheader': (header, [],
2144 'qheader': (header, [],
2142 _('hg qheader [PATCH]')),
2145 _('hg qheader [PATCH]')),
2143 "^qimport":
2146 "^qimport":
2144 (qimport,
2147 (qimport,
2145 [('e', 'existing', None, 'import file in patch dir'),
2148 [('e', 'existing', None, 'import file in patch dir'),
2146 ('n', 'name', '', 'patch file name'),
2149 ('n', 'name', '', 'patch file name'),
2147 ('f', 'force', None, 'overwrite existing files'),
2150 ('f', 'force', None, 'overwrite existing files'),
2148 ('r', 'rev', [], 'place existing revisions under mq control'),
2151 ('r', 'rev', [], 'place existing revisions under mq control'),
2149 ('g', 'git', None, _('use git extended diff format'))],
2152 ('g', 'git', None, _('use git extended diff format'))],
2150 'hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...'),
2153 'hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...'),
2151 "^qinit":
2154 "^qinit":
2152 (init,
2155 (init,
2153 [('c', 'create-repo', None, 'create queue repository')],
2156 [('c', 'create-repo', None, 'create queue repository')],
2154 'hg qinit [-c]'),
2157 'hg qinit [-c]'),
2155 "qnew":
2158 "qnew":
2156 (new,
2159 (new,
2157 [('e', 'edit', None, _('edit commit message')),
2160 [('e', 'edit', None, _('edit commit message')),
2158 ('f', 'force', None, _('import uncommitted changes into patch'))
2161 ('f', 'force', None, _('import uncommitted changes into patch'))
2159 ] + commands.commitopts,
2162 ] + commands.commitopts,
2160 'hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH'),
2163 'hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH'),
2161 "qnext": (next, [] + seriesopts, 'hg qnext [-s]'),
2164 "qnext": (next, [] + seriesopts, 'hg qnext [-s]'),
2162 "qprev": (prev, [] + seriesopts, 'hg qprev [-s]'),
2165 "qprev": (prev, [] + seriesopts, 'hg qprev [-s]'),
2163 "^qpop":
2166 "^qpop":
2164 (pop,
2167 (pop,
2165 [('a', 'all', None, 'pop all patches'),
2168 [('a', 'all', None, 'pop all patches'),
2166 ('n', 'name', '', 'queue name to pop'),
2169 ('n', 'name', '', 'queue name to pop'),
2167 ('f', 'force', None, 'forget any local changes')],
2170 ('f', 'force', None, 'forget any local changes')],
2168 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
2171 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
2169 "^qpush":
2172 "^qpush":
2170 (push,
2173 (push,
2171 [('f', 'force', None, 'apply if the patch has rejects'),
2174 [('f', 'force', None, 'apply if the patch has rejects'),
2172 ('l', 'list', None, 'list patch name in commit text'),
2175 ('l', 'list', None, 'list patch name in commit text'),
2173 ('a', 'all', None, 'apply all patches'),
2176 ('a', 'all', None, 'apply all patches'),
2174 ('m', 'merge', None, 'merge from another queue'),
2177 ('m', 'merge', None, 'merge from another queue'),
2175 ('n', 'name', '', 'merge queue name')],
2178 ('n', 'name', '', 'merge queue name')],
2176 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
2179 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
2177 "^qrefresh":
2180 "^qrefresh":
2178 (refresh,
2181 (refresh,
2179 [('e', 'edit', None, _('edit commit message')),
2182 [('e', 'edit', None, _('edit commit message')),
2180 ('g', 'git', None, _('use git extended diff format')),
2183 ('g', 'git', None, _('use git extended diff format')),
2181 ('s', 'short', None, 'refresh only files already in the patch'),
2184 ('s', 'short', None, 'refresh only files already in the patch'),
2182 ('I', 'include', [], _('include names matching the given patterns')),
2185 ('I', 'include', [], _('include names matching the given patterns')),
2183 ('X', 'exclude', [], _('exclude names matching the given patterns'))
2186 ('X', 'exclude', [], _('exclude names matching the given patterns'))
2184 ] + commands.commitopts,
2187 ] + commands.commitopts,
2185 'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] FILES...'),
2188 'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] FILES...'),
2186 'qrename|qmv':
2189 'qrename|qmv':
2187 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
2190 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
2188 "qrestore":
2191 "qrestore":
2189 (restore,
2192 (restore,
2190 [('d', 'delete', None, 'delete save entry'),
2193 [('d', 'delete', None, 'delete save entry'),
2191 ('u', 'update', None, 'update queue working dir')],
2194 ('u', 'update', None, 'update queue working dir')],
2192 'hg qrestore [-d] [-u] REV'),
2195 'hg qrestore [-d] [-u] REV'),
2193 "qsave":
2196 "qsave":
2194 (save,
2197 (save,
2195 [('c', 'copy', None, 'copy patch directory'),
2198 [('c', 'copy', None, 'copy patch directory'),
2196 ('n', 'name', '', 'copy directory name'),
2199 ('n', 'name', '', 'copy directory name'),
2197 ('e', 'empty', None, 'clear queue status file'),
2200 ('e', 'empty', None, 'clear queue status file'),
2198 ('f', 'force', None, 'force copy')] + commands.commitopts,
2201 ('f', 'force', None, 'force copy')] + commands.commitopts,
2199 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
2202 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
2200 "qselect": (select,
2203 "qselect": (select,
2201 [('n', 'none', None, _('disable all guards')),
2204 [('n', 'none', None, _('disable all guards')),
2202 ('s', 'series', None, _('list all guards in series file')),
2205 ('s', 'series', None, _('list all guards in series file')),
2203 ('', 'pop', None,
2206 ('', 'pop', None,
2204 _('pop to before first guarded applied patch')),
2207 _('pop to before first guarded applied patch')),
2205 ('', 'reapply', None, _('pop, then reapply patches'))],
2208 ('', 'reapply', None, _('pop, then reapply patches'))],
2206 'hg qselect [OPTION...] [GUARD...]'),
2209 'hg qselect [OPTION...] [GUARD...]'),
2207 "qseries":
2210 "qseries":
2208 (series,
2211 (series,
2209 [('m', 'missing', None, 'print patches not in series')] + seriesopts,
2212 [('m', 'missing', None, 'print patches not in series')] + seriesopts,
2210 'hg qseries [-ms]'),
2213 'hg qseries [-ms]'),
2211 "^strip":
2214 "^strip":
2212 (strip,
2215 (strip,
2213 [('f', 'force', None, 'force multi-head removal'),
2216 [('f', 'force', None, 'force multi-head removal'),
2214 ('b', 'backup', None, 'bundle unrelated changesets'),
2217 ('b', 'backup', None, 'bundle unrelated changesets'),
2215 ('n', 'nobackup', None, 'no backups')],
2218 ('n', 'nobackup', None, 'no backups')],
2216 'hg strip [-f] [-b] [-n] REV'),
2219 'hg strip [-f] [-b] [-n] REV'),
2217 "qtop": (top, [] + seriesopts, 'hg qtop [-s]'),
2220 "qtop": (top, [] + seriesopts, 'hg qtop [-s]'),
2218 "qunapplied": (unapplied, [] + seriesopts, 'hg qunapplied [-s] [PATCH]'),
2221 "qunapplied": (unapplied, [] + seriesopts, 'hg qunapplied [-s] [PATCH]'),
2219 }
2222 }
@@ -1,373 +1,373 b''
1 /*
1 /*
2 bdiff.c - efficient binary diff extension for Mercurial
2 bdiff.c - efficient binary diff extension for Mercurial
3
3
4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms of
6 This software may be used and distributed according to the terms of
7 the GNU General Public License, incorporated herein by reference.
7 the GNU General Public License, incorporated herein by reference.
8
8
9 Based roughly on Python difflib
9 Based roughly on Python difflib
10 */
10 */
11
11
12 #include <Python.h>
12 #include <Python.h>
13 #include <stdlib.h>
13 #include <stdlib.h>
14 #include <string.h>
14 #include <string.h>
15
15
16 #if defined __hpux || defined __SUNPRO_C || defined _AIX
16 #if defined __hpux || defined __SUNPRO_C || defined _AIX
17 # define inline
17 # define inline
18 #endif
18 #endif
19
19
20 #ifdef _WIN32
20 #ifdef _WIN32
21 #ifdef _MSC_VER
21 #ifdef _MSC_VER
22 #define inline __inline
22 #define inline __inline
23 typedef unsigned long uint32_t;
23 typedef unsigned long uint32_t;
24 #else
24 #else
25 #include <stdint.h>
25 #include <stdint.h>
26 #endif
26 #endif
27 static uint32_t htonl(uint32_t x)
27 static uint32_t htonl(uint32_t x)
28 {
28 {
29 return ((x & 0x000000ffUL) << 24) |
29 return ((x & 0x000000ffUL) << 24) |
30 ((x & 0x0000ff00UL) << 8) |
30 ((x & 0x0000ff00UL) << 8) |
31 ((x & 0x00ff0000UL) >> 8) |
31 ((x & 0x00ff0000UL) >> 8) |
32 ((x & 0xff000000UL) >> 24);
32 ((x & 0xff000000UL) >> 24);
33 }
33 }
34 #else
34 #else
35 #include <sys/types.h>
35 #include <sys/types.h>
36 #ifdef __BEOS__
36 #ifdef __BEOS__
37 #include <ByteOrder.h>
37 #include <ByteOrder.h>
38 #else
38 #else
39 #include <arpa/inet.h>
39 #include <arpa/inet.h>
40 #endif
40 #endif
41 #include <inttypes.h>
41 #include <inttypes.h>
42 #endif
42 #endif
43
43
44 struct line {
44 struct line {
45 int h, len, n, e;
45 int h, len, n, e;
46 const char *l;
46 const char *l;
47 };
47 };
48
48
49 struct pos {
49 struct pos {
50 int pos, len;
50 int pos, len;
51 };
51 };
52
52
53 struct hunk {
53 struct hunk {
54 int a1, a2, b1, b2;
54 int a1, a2, b1, b2;
55 };
55 };
56
56
57 struct hunklist {
57 struct hunklist {
58 struct hunk *base, *head;
58 struct hunk *base, *head;
59 };
59 };
60
60
61 static inline uint32_t rol32(uint32_t word, unsigned int shift)
61 static inline uint32_t rol32(uint32_t word, unsigned int shift)
62 {
62 {
63 return (word << shift) | (word >> (32 - shift));
63 return (word << shift) | (word >> (32 - shift));
64 }
64 }
65
65
66 int splitlines(const char *a, int len, struct line **lr)
66 int splitlines(const char *a, int len, struct line **lr)
67 {
67 {
68 int g, h, i;
68 int g, h, i;
69 const char *p, *b = a;
69 const char *p, *b = a;
70 struct line *l;
70 struct line *l;
71
71
72 /* count the lines */
72 /* count the lines */
73 i = 1; /* extra line for sentinel */
73 i = 1; /* extra line for sentinel */
74 for (p = a; p < a + len; p++)
74 for (p = a; p < a + len; p++)
75 if (*p == '\n' || p == a + len - 1)
75 if (*p == '\n' || p == a + len - 1)
76 i++;
76 i++;
77
77
78 *lr = l = (struct line *)malloc(sizeof(struct line) * i);
78 *lr = l = (struct line *)malloc(sizeof(struct line) * i);
79 if (!l)
79 if (!l)
80 return -1;
80 return -1;
81
81
82 /* build the line array and calculate hashes */
82 /* build the line array and calculate hashes */
83 h = 0;
83 h = 0;
84 for (p = a; p < a + len; p++) {
84 for (p = a; p < a + len; p++) {
85 /*
85 /*
86 * a simple hash from GNU diff, with better collision
86 * a simple hash from GNU diff, with better collision
87 * resistance from hashpjw. this slows down common
87 * resistance from hashpjw. this slows down common
88 * case by 10%, but speeds up worst case by 100x.
88 * case by 10%, but speeds up worst case by 100x.
89 */
89 */
90 h = *p + rol32(h, 7);
90 h = *p + rol32(h, 7);
91 if ((g = h & 0xf0000000)) {
91 if ((g = h & 0xf0000000)) {
92 h ^= g >> 24;
92 h ^= g >> 24;
93 h ^= g;
93 h ^= g;
94 }
94 }
95 if (*p == '\n' || p == a + len - 1) {
95 if (*p == '\n' || p == a + len - 1) {
96 l->len = p - b + 1;
96 l->len = p - b + 1;
97 l->h = h * l->len;
97 l->h = h * l->len;
98 l->l = b;
98 l->l = b;
99 l->n = -1;
99 l->n = -1;
100 l++;
100 l++;
101 b = p + 1;
101 b = p + 1;
102 h = 0;
102 h = 0;
103 }
103 }
104 }
104 }
105
105
106 /* set up a sentinel */
106 /* set up a sentinel */
107 l->h = l->len = 0;
107 l->h = l->len = 0;
108 l->l = a + len;
108 l->l = a + len;
109 return i - 1;
109 return i - 1;
110 }
110 }
111
111
112 int inline cmp(struct line *a, struct line *b)
112 int inline cmp(struct line *a, struct line *b)
113 {
113 {
114 return a->h != b->h || a->len != b->len || memcmp(a->l, b->l, a->len);
114 return a->h != b->h || a->len != b->len || memcmp(a->l, b->l, a->len);
115 }
115 }
116
116
117 static int equatelines(struct line *a, int an, struct line *b, int bn)
117 static int equatelines(struct line *a, int an, struct line *b, int bn)
118 {
118 {
119 int i, j, buckets = 1, t;
119 int i, j, buckets = 1, t;
120 struct pos *h;
120 struct pos *h;
121
121
122 /* build a hash table of the next highest power of 2 */
122 /* build a hash table of the next highest power of 2 */
123 while (buckets < bn + 1)
123 while (buckets < bn + 1)
124 buckets *= 2;
124 buckets *= 2;
125
125
126 h = (struct pos *)malloc(buckets * sizeof(struct pos));
126 h = (struct pos *)malloc(buckets * sizeof(struct pos));
127 buckets = buckets - 1;
127 buckets = buckets - 1;
128 if (!h)
128 if (!h)
129 return 0;
129 return 0;
130
130
131 /* clear the hash table */
131 /* clear the hash table */
132 for (i = 0; i <= buckets; i++) {
132 for (i = 0; i <= buckets; i++) {
133 h[i].pos = -1;
133 h[i].pos = -1;
134 h[i].len = 0;
134 h[i].len = 0;
135 }
135 }
136
136
137 /* add lines to the hash table chains */
137 /* add lines to the hash table chains */
138 for (i = bn - 1; i >= 0; i--) {
138 for (i = bn - 1; i >= 0; i--) {
139 /* find the equivalence class */
139 /* find the equivalence class */
140 for (j = b[i].h & buckets; h[j].pos != -1;
140 for (j = b[i].h & buckets; h[j].pos != -1;
141 j = (j + 1) & buckets)
141 j = (j + 1) & buckets)
142 if (!cmp(b + i, b + h[j].pos))
142 if (!cmp(b + i, b + h[j].pos))
143 break;
143 break;
144
144
145 /* add to the head of the equivalence class */
145 /* add to the head of the equivalence class */
146 b[i].n = h[j].pos;
146 b[i].n = h[j].pos;
147 b[i].e = j;
147 b[i].e = j;
148 h[j].pos = i;
148 h[j].pos = i;
149 h[j].len++; /* keep track of popularity */
149 h[j].len++; /* keep track of popularity */
150 }
150 }
151
151
152 /* compute popularity threshold */
152 /* compute popularity threshold */
153 t = (bn >= 200) ? bn / 100 : bn + 1;
153 t = (bn >= 200) ? bn / 100 : bn + 1;
154
154
155 /* match items in a to their equivalence class in b */
155 /* match items in a to their equivalence class in b */
156 for (i = 0; i < an; i++) {
156 for (i = 0; i < an; i++) {
157 /* find the equivalence class */
157 /* find the equivalence class */
158 for (j = a[i].h & buckets; h[j].pos != -1;
158 for (j = a[i].h & buckets; h[j].pos != -1;
159 j = (j + 1) & buckets)
159 j = (j + 1) & buckets)
160 if (!cmp(a + i, b + h[j].pos))
160 if (!cmp(a + i, b + h[j].pos))
161 break;
161 break;
162
162
163 a[i].e = j; /* use equivalence class for quick compare */
163 a[i].e = j; /* use equivalence class for quick compare */
164 if (h[j].len <= t)
164 if (h[j].len <= t)
165 a[i].n = h[j].pos; /* point to head of match list */
165 a[i].n = h[j].pos; /* point to head of match list */
166 else
166 else
167 a[i].n = -1; /* too popular */
167 a[i].n = -1; /* too popular */
168 }
168 }
169
169
170 /* discard hash tables */
170 /* discard hash tables */
171 free(h);
171 free(h);
172 return 1;
172 return 1;
173 }
173 }
174
174
175 static int longest_match(struct line *a, struct line *b, struct pos *pos,
175 static int longest_match(struct line *a, struct line *b, struct pos *pos,
176 int a1, int a2, int b1, int b2, int *omi, int *omj)
176 int a1, int a2, int b1, int b2, int *omi, int *omj)
177 {
177 {
178 int mi = a1, mj = b1, mk = 0, mb = 0, i, j, k;
178 int mi = a1, mj = b1, mk = 0, mb = 0, i, j, k;
179
179
180 for (i = a1; i < a2; i++) {
180 for (i = a1; i < a2; i++) {
181 /* skip things before the current block */
181 /* skip things before the current block */
182 for (j = a[i].n; j != -1 && j < b1; j = b[j].n)
182 for (j = a[i].n; j != -1 && j < b1; j = b[j].n)
183 ;
183 ;
184
184
185 /* loop through all lines match a[i] in b */
185 /* loop through all lines match a[i] in b */
186 for (; j != -1 && j < b2; j = b[j].n) {
186 for (; j != -1 && j < b2; j = b[j].n) {
187 /* does this extend an earlier match? */
187 /* does this extend an earlier match? */
188 if (i > a1 && j > b1 && pos[j - 1].pos == i - 1)
188 if (i > a1 && j > b1 && pos[j - 1].pos == i - 1)
189 k = pos[j - 1].len + 1;
189 k = pos[j - 1].len + 1;
190 else
190 else
191 k = 1;
191 k = 1;
192 pos[j].pos = i;
192 pos[j].pos = i;
193 pos[j].len = k;
193 pos[j].len = k;
194
194
195 /* best match so far? */
195 /* best match so far? */
196 if (k > mk) {
196 if (k > mk) {
197 mi = i;
197 mi = i;
198 mj = j;
198 mj = j;
199 mk = k;
199 mk = k;
200 }
200 }
201 }
201 }
202 }
202 }
203
203
204 if (mk) {
204 if (mk) {
205 mi = mi - mk + 1;
205 mi = mi - mk + 1;
206 mj = mj - mk + 1;
206 mj = mj - mk + 1;
207 }
207 }
208
208
209 /* expand match to include neighboring popular lines */
209 /* expand match to include neighboring popular lines */
210 while (mi - mb > a1 && mj - mb > b1 &&
210 while (mi - mb > a1 && mj - mb > b1 &&
211 a[mi - mb - 1].e == b[mj - mb - 1].e)
211 a[mi - mb - 1].e == b[mj - mb - 1].e)
212 mb++;
212 mb++;
213 while (mi + mk < a2 && mj + mk < b2 &&
213 while (mi + mk < a2 && mj + mk < b2 &&
214 a[mi + mk].e == b[mj + mk].e)
214 a[mi + mk].e == b[mj + mk].e)
215 mk++;
215 mk++;
216
216
217 *omi = mi - mb;
217 *omi = mi - mb;
218 *omj = mj - mb;
218 *omj = mj - mb;
219 return mk + mb;
219 return mk + mb;
220 }
220 }
221
221
222 static void recurse(struct line *a, struct line *b, struct pos *pos,
222 static void recurse(struct line *a, struct line *b, struct pos *pos,
223 int a1, int a2, int b1, int b2, struct hunklist *l)
223 int a1, int a2, int b1, int b2, struct hunklist *l)
224 {
224 {
225 int i, j, k;
225 int i, j, k;
226
226
227 /* find the longest match in this chunk */
227 /* find the longest match in this chunk */
228 k = longest_match(a, b, pos, a1, a2, b1, b2, &i, &j);
228 k = longest_match(a, b, pos, a1, a2, b1, b2, &i, &j);
229 if (!k)
229 if (!k)
230 return;
230 return;
231
231
232 /* and recurse on the remaining chunks on either side */
232 /* and recurse on the remaining chunks on either side */
233 recurse(a, b, pos, a1, i, b1, j, l);
233 recurse(a, b, pos, a1, i, b1, j, l);
234 l->head->a1 = i;
234 l->head->a1 = i;
235 l->head->a2 = i + k;
235 l->head->a2 = i + k;
236 l->head->b1 = j;
236 l->head->b1 = j;
237 l->head->b2 = j + k;
237 l->head->b2 = j + k;
238 l->head++;
238 l->head++;
239 recurse(a, b, pos, i + k, a2, j + k, b2, l);
239 recurse(a, b, pos, i + k, a2, j + k, b2, l);
240 }
240 }
241
241
242 static struct hunklist diff(struct line *a, int an, struct line *b, int bn)
242 static struct hunklist diff(struct line *a, int an, struct line *b, int bn)
243 {
243 {
244 struct hunklist l;
244 struct hunklist l;
245 struct pos *pos;
245 struct pos *pos;
246 int t;
246 int t;
247
247
248 /* allocate and fill arrays */
248 /* allocate and fill arrays */
249 t = equatelines(a, an, b, bn);
249 t = equatelines(a, an, b, bn);
250 pos = (struct pos *)calloc(bn, sizeof(struct pos));
250 pos = (struct pos *)calloc(bn, sizeof(struct pos));
251 /* we can't have more matches than lines in the shorter file */
251 /* we can't have more matches than lines in the shorter file */
252 l.head = l.base = (struct hunk *)malloc(sizeof(struct hunk) *
252 l.head = l.base = (struct hunk *)malloc(sizeof(struct hunk) *
253 ((an<bn ? an:bn) + 1));
253 ((an<bn ? an:bn) + 1));
254
254
255 if (pos && l.base && t) {
255 if (pos && l.base && t) {
256 /* generate the matching block list */
256 /* generate the matching block list */
257 recurse(a, b, pos, 0, an, 0, bn, &l);
257 recurse(a, b, pos, 0, an, 0, bn, &l);
258 l.head->a1 = an;
258 l.head->a1 = l.head->a2 = an;
259 l.head->b1 = bn;
259 l.head->b1 = l.head->b2 = bn;
260 l.head++;
260 l.head++;
261 }
261 }
262
262
263 free(pos);
263 free(pos);
264 return l;
264 return l;
265 }
265 }
266
266
267 static PyObject *blocks(PyObject *self, PyObject *args)
267 static PyObject *blocks(PyObject *self, PyObject *args)
268 {
268 {
269 PyObject *sa, *sb, *rl = NULL, *m;
269 PyObject *sa, *sb, *rl = NULL, *m;
270 struct line *a, *b;
270 struct line *a, *b;
271 struct hunklist l = {NULL, NULL};
271 struct hunklist l = {NULL, NULL};
272 struct hunk *h;
272 struct hunk *h;
273 int an, bn, pos = 0;
273 int an, bn, pos = 0;
274
274
275 if (!PyArg_ParseTuple(args, "SS:bdiff", &sa, &sb))
275 if (!PyArg_ParseTuple(args, "SS:bdiff", &sa, &sb))
276 return NULL;
276 return NULL;
277
277
278 an = splitlines(PyString_AsString(sa), PyString_Size(sa), &a);
278 an = splitlines(PyString_AsString(sa), PyString_Size(sa), &a);
279 bn = splitlines(PyString_AsString(sb), PyString_Size(sb), &b);
279 bn = splitlines(PyString_AsString(sb), PyString_Size(sb), &b);
280 if (!a || !b)
280 if (!a || !b)
281 goto nomem;
281 goto nomem;
282
282
283 l = diff(a, an, b, bn);
283 l = diff(a, an, b, bn);
284 rl = PyList_New(l.head - l.base);
284 rl = PyList_New(l.head - l.base);
285 if (!l.head || !rl)
285 if (!l.head || !rl)
286 goto nomem;
286 goto nomem;
287
287
288 for (h = l.base; h != l.head; h++) {
288 for (h = l.base; h != l.head; h++) {
289 m = Py_BuildValue("iiii", h->a1, h->a2, h->b1, h->b2);
289 m = Py_BuildValue("iiii", h->a1, h->a2, h->b1, h->b2);
290 PyList_SetItem(rl, pos, m);
290 PyList_SetItem(rl, pos, m);
291 pos++;
291 pos++;
292 }
292 }
293
293
294 nomem:
294 nomem:
295 free(a);
295 free(a);
296 free(b);
296 free(b);
297 free(l.base);
297 free(l.base);
298 return rl ? rl : PyErr_NoMemory();
298 return rl ? rl : PyErr_NoMemory();
299 }
299 }
300
300
301 static PyObject *bdiff(PyObject *self, PyObject *args)
301 static PyObject *bdiff(PyObject *self, PyObject *args)
302 {
302 {
303 char *sa, *sb;
303 char *sa, *sb;
304 PyObject *result = NULL;
304 PyObject *result = NULL;
305 struct line *al, *bl;
305 struct line *al, *bl;
306 struct hunklist l = {NULL, NULL};
306 struct hunklist l = {NULL, NULL};
307 struct hunk *h;
307 struct hunk *h;
308 char encode[12], *rb;
308 char encode[12], *rb;
309 int an, bn, len = 0, la, lb;
309 int an, bn, len = 0, la, lb;
310
310
311 if (!PyArg_ParseTuple(args, "s#s#:bdiff", &sa, &la, &sb, &lb))
311 if (!PyArg_ParseTuple(args, "s#s#:bdiff", &sa, &la, &sb, &lb))
312 return NULL;
312 return NULL;
313
313
314 an = splitlines(sa, la, &al);
314 an = splitlines(sa, la, &al);
315 bn = splitlines(sb, lb, &bl);
315 bn = splitlines(sb, lb, &bl);
316 if (!al || !bl)
316 if (!al || !bl)
317 goto nomem;
317 goto nomem;
318
318
319 l = diff(al, an, bl, bn);
319 l = diff(al, an, bl, bn);
320 if (!l.head)
320 if (!l.head)
321 goto nomem;
321 goto nomem;
322
322
323 /* calculate length of output */
323 /* calculate length of output */
324 la = lb = 0;
324 la = lb = 0;
325 for (h = l.base; h != l.head; h++) {
325 for (h = l.base; h != l.head; h++) {
326 if (h->a1 != la || h->b1 != lb)
326 if (h->a1 != la || h->b1 != lb)
327 len += 12 + bl[h->b1].l - bl[lb].l;
327 len += 12 + bl[h->b1].l - bl[lb].l;
328 la = h->a2;
328 la = h->a2;
329 lb = h->b2;
329 lb = h->b2;
330 }
330 }
331
331
332 result = PyString_FromStringAndSize(NULL, len);
332 result = PyString_FromStringAndSize(NULL, len);
333 if (!result)
333 if (!result)
334 goto nomem;
334 goto nomem;
335
335
336 /* build binary patch */
336 /* build binary patch */
337 rb = PyString_AsString(result);
337 rb = PyString_AsString(result);
338 la = lb = 0;
338 la = lb = 0;
339
339
340 for (h = l.base; h != l.head; h++) {
340 for (h = l.base; h != l.head; h++) {
341 if (h->a1 != la || h->b1 != lb) {
341 if (h->a1 != la || h->b1 != lb) {
342 len = bl[h->b1].l - bl[lb].l;
342 len = bl[h->b1].l - bl[lb].l;
343 *(uint32_t *)(encode) = htonl(al[la].l - al->l);
343 *(uint32_t *)(encode) = htonl(al[la].l - al->l);
344 *(uint32_t *)(encode + 4) = htonl(al[h->a1].l - al->l);
344 *(uint32_t *)(encode + 4) = htonl(al[h->a1].l - al->l);
345 *(uint32_t *)(encode + 8) = htonl(len);
345 *(uint32_t *)(encode + 8) = htonl(len);
346 memcpy(rb, encode, 12);
346 memcpy(rb, encode, 12);
347 memcpy(rb + 12, bl[lb].l, len);
347 memcpy(rb + 12, bl[lb].l, len);
348 rb += 12 + len;
348 rb += 12 + len;
349 }
349 }
350 la = h->a2;
350 la = h->a2;
351 lb = h->b2;
351 lb = h->b2;
352 }
352 }
353
353
354 nomem:
354 nomem:
355 free(al);
355 free(al);
356 free(bl);
356 free(bl);
357 free(l.base);
357 free(l.base);
358 return result ? result : PyErr_NoMemory();
358 return result ? result : PyErr_NoMemory();
359 }
359 }
360
360
361 static char mdiff_doc[] = "Efficient binary diff.";
361 static char mdiff_doc[] = "Efficient binary diff.";
362
362
363 static PyMethodDef methods[] = {
363 static PyMethodDef methods[] = {
364 {"bdiff", bdiff, METH_VARARGS, "calculate a binary diff\n"},
364 {"bdiff", bdiff, METH_VARARGS, "calculate a binary diff\n"},
365 {"blocks", blocks, METH_VARARGS, "find a list of matching lines\n"},
365 {"blocks", blocks, METH_VARARGS, "find a list of matching lines\n"},
366 {NULL, NULL}
366 {NULL, NULL}
367 };
367 };
368
368
369 PyMODINIT_FUNC initbdiff(void)
369 PyMODINIT_FUNC initbdiff(void)
370 {
370 {
371 Py_InitModule3("bdiff", methods, mdiff_doc);
371 Py_InitModule3("bdiff", methods, mdiff_doc);
372 }
372 }
373
373
@@ -1,246 +1,251 b''
1 # hgweb/server.py - The standalone hg web server.
1 # hgweb/server.py - The standalone hg web server.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
9 import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
10 from mercurial import ui, hg, util, templater
10 from mercurial import ui, hg, util, templater
11 from hgweb_mod import hgweb
11 from hgweb_mod import hgweb
12 from hgwebdir_mod import hgwebdir
12 from hgwebdir_mod import hgwebdir
13 from request import wsgiapplication
13 from request import wsgiapplication
14 from mercurial.i18n import gettext as _
14 from mercurial.i18n import gettext as _
15
15
16 def _splitURI(uri):
16 def _splitURI(uri):
17 """ Return path and query splited from uri
17 """ Return path and query splited from uri
18
18
19 Just like CGI environment, the path is unquoted, the query is
19 Just like CGI environment, the path is unquoted, the query is
20 not.
20 not.
21 """
21 """
22 if '?' in uri:
22 if '?' in uri:
23 path, query = uri.split('?', 1)
23 path, query = uri.split('?', 1)
24 else:
24 else:
25 path, query = uri, ''
25 path, query = uri, ''
26 return urllib.unquote(path), query
26 return urllib.unquote(path), query
27
27
28 class _error_logger(object):
28 class _error_logger(object):
29 def __init__(self, handler):
29 def __init__(self, handler):
30 self.handler = handler
30 self.handler = handler
31 def flush(self):
31 def flush(self):
32 pass
32 pass
33 def write(self, str):
33 def write(self, str):
34 self.writelines(str.split('\n'))
34 self.writelines(str.split('\n'))
35 def writelines(self, seq):
35 def writelines(self, seq):
36 for msg in seq:
36 for msg in seq:
37 self.handler.log_error("HG error: %s", msg)
37 self.handler.log_error("HG error: %s", msg)
38
38
39 class _hgwebhandler(object, BaseHTTPServer.BaseHTTPRequestHandler):
39 class _hgwebhandler(object, BaseHTTPServer.BaseHTTPRequestHandler):
40 def __init__(self, *args, **kargs):
40 def __init__(self, *args, **kargs):
41 self.protocol_version = 'HTTP/1.1'
41 self.protocol_version = 'HTTP/1.1'
42 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
42 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
43
43
44 def log_error(self, format, *args):
44 def log_error(self, format, *args):
45 errorlog = self.server.errorlog
45 errorlog = self.server.errorlog
46 errorlog.write("%s - - [%s] %s\n" % (self.address_string(),
46 errorlog.write("%s - - [%s] %s\n" % (self.address_string(),
47 self.log_date_time_string(),
47 self.log_date_time_string(),
48 format % args))
48 format % args))
49
49
50 def log_message(self, format, *args):
50 def log_message(self, format, *args):
51 accesslog = self.server.accesslog
51 accesslog = self.server.accesslog
52 accesslog.write("%s - - [%s] %s\n" % (self.address_string(),
52 accesslog.write("%s - - [%s] %s\n" % (self.address_string(),
53 self.log_date_time_string(),
53 self.log_date_time_string(),
54 format % args))
54 format % args))
55
55
56 def do_POST(self):
56 def do_POST(self):
57 try:
57 try:
58 try:
58 try:
59 self.do_hgweb()
59 self.do_hgweb()
60 except socket.error, inst:
60 except socket.error, inst:
61 if inst[0] != errno.EPIPE:
61 if inst[0] != errno.EPIPE:
62 raise
62 raise
63 except StandardError, inst:
63 except StandardError, inst:
64 self._start_response("500 Internal Server Error", [])
64 self._start_response("500 Internal Server Error", [])
65 self._write("Internal Server Error")
65 self._write("Internal Server Error")
66 tb = "".join(traceback.format_exception(*sys.exc_info()))
66 tb = "".join(traceback.format_exception(*sys.exc_info()))
67 self.log_error("Exception happened during processing request '%s':\n%s",
67 self.log_error("Exception happened during processing request '%s':\n%s",
68 self.path, tb)
68 self.path, tb)
69
69
70 def do_GET(self):
70 def do_GET(self):
71 self.do_POST()
71 self.do_POST()
72
72
73 def do_hgweb(self):
73 def do_hgweb(self):
74 path_info, query = _splitURI(self.path)
74 path_info, query = _splitURI(self.path)
75
75
76 env = {}
76 env = {}
77 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
77 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
78 env['REQUEST_METHOD'] = self.command
78 env['REQUEST_METHOD'] = self.command
79 env['SERVER_NAME'] = self.server.server_name
79 env['SERVER_NAME'] = self.server.server_name
80 env['SERVER_PORT'] = str(self.server.server_port)
80 env['SERVER_PORT'] = str(self.server.server_port)
81 env['REQUEST_URI'] = self.path
81 env['REQUEST_URI'] = self.path
82 env['PATH_INFO'] = path_info
82 env['PATH_INFO'] = path_info
83 if query:
83 if query:
84 env['QUERY_STRING'] = query
84 env['QUERY_STRING'] = query
85 host = self.address_string()
85 host = self.address_string()
86 if host != self.client_address[0]:
86 if host != self.client_address[0]:
87 env['REMOTE_HOST'] = host
87 env['REMOTE_HOST'] = host
88 env['REMOTE_ADDR'] = self.client_address[0]
88 env['REMOTE_ADDR'] = self.client_address[0]
89
89
90 if self.headers.typeheader is None:
90 if self.headers.typeheader is None:
91 env['CONTENT_TYPE'] = self.headers.type
91 env['CONTENT_TYPE'] = self.headers.type
92 else:
92 else:
93 env['CONTENT_TYPE'] = self.headers.typeheader
93 env['CONTENT_TYPE'] = self.headers.typeheader
94 length = self.headers.getheader('content-length')
94 length = self.headers.getheader('content-length')
95 if length:
95 if length:
96 env['CONTENT_LENGTH'] = length
96 env['CONTENT_LENGTH'] = length
97 for header in [h for h in self.headers.keys() \
97 for header in [h for h in self.headers.keys() \
98 if h not in ('content-type', 'content-length')]:
98 if h not in ('content-type', 'content-length')]:
99 hkey = 'HTTP_' + header.replace('-', '_').upper()
99 hkey = 'HTTP_' + header.replace('-', '_').upper()
100 hval = self.headers.getheader(header)
100 hval = self.headers.getheader(header)
101 hval = hval.replace('\n', '').strip()
101 hval = hval.replace('\n', '').strip()
102 if hval:
102 if hval:
103 env[hkey] = hval
103 env[hkey] = hval
104 env['SERVER_PROTOCOL'] = self.request_version
104 env['SERVER_PROTOCOL'] = self.request_version
105 env['wsgi.version'] = (1, 0)
105 env['wsgi.version'] = (1, 0)
106 env['wsgi.url_scheme'] = 'http'
106 env['wsgi.url_scheme'] = 'http'
107 env['wsgi.input'] = self.rfile
107 env['wsgi.input'] = self.rfile
108 env['wsgi.errors'] = _error_logger(self)
108 env['wsgi.errors'] = _error_logger(self)
109 env['wsgi.multithread'] = isinstance(self.server,
109 env['wsgi.multithread'] = isinstance(self.server,
110 SocketServer.ThreadingMixIn)
110 SocketServer.ThreadingMixIn)
111 env['wsgi.multiprocess'] = isinstance(self.server,
111 env['wsgi.multiprocess'] = isinstance(self.server,
112 SocketServer.ForkingMixIn)
112 SocketServer.ForkingMixIn)
113 env['wsgi.run_once'] = 0
113 env['wsgi.run_once'] = 0
114
114
115 self.close_connection = True
115 self.close_connection = True
116 self.saved_status = None
116 self.saved_status = None
117 self.saved_headers = []
117 self.saved_headers = []
118 self.sent_headers = False
118 self.sent_headers = False
119 self.length = None
119 self.length = None
120 req = self.server.reqmaker(env, self._start_response)
120 req = self.server.reqmaker(env, self._start_response)
121 for data in req:
121 for data in req:
122 if data:
122 if data:
123 self._write(data)
123 self._write(data)
124
124
125 def send_headers(self):
125 def send_headers(self):
126 if not self.saved_status:
126 if not self.saved_status:
127 raise AssertionError("Sending headers before start_response() called")
127 raise AssertionError("Sending headers before start_response() called")
128 saved_status = self.saved_status.split(None, 1)
128 saved_status = self.saved_status.split(None, 1)
129 saved_status[0] = int(saved_status[0])
129 saved_status[0] = int(saved_status[0])
130 self.send_response(*saved_status)
130 self.send_response(*saved_status)
131 should_close = True
131 should_close = True
132 for h in self.saved_headers:
132 for h in self.saved_headers:
133 self.send_header(*h)
133 self.send_header(*h)
134 if h[0].lower() == 'content-length':
134 if h[0].lower() == 'content-length':
135 should_close = False
135 should_close = False
136 self.length = int(h[1])
136 self.length = int(h[1])
137 # The value of the Connection header is a list of case-insensitive
137 # The value of the Connection header is a list of case-insensitive
138 # tokens separated by commas and optional whitespace.
138 # tokens separated by commas and optional whitespace.
139 if 'close' in [token.strip().lower() for token in
139 if 'close' in [token.strip().lower() for token in
140 self.headers.get('connection', '').split(',')]:
140 self.headers.get('connection', '').split(',')]:
141 should_close = True
141 should_close = True
142 if should_close:
142 if should_close:
143 self.send_header('Connection', 'close')
143 self.send_header('Connection', 'close')
144 self.close_connection = should_close
144 self.close_connection = should_close
145 self.end_headers()
145 self.end_headers()
146 self.sent_headers = True
146 self.sent_headers = True
147
147
148 def _start_response(self, http_status, headers, exc_info=None):
148 def _start_response(self, http_status, headers, exc_info=None):
149 code, msg = http_status.split(None, 1)
149 code, msg = http_status.split(None, 1)
150 code = int(code)
150 code = int(code)
151 self.saved_status = http_status
151 self.saved_status = http_status
152 bad_headers = ('connection', 'transfer-encoding')
152 bad_headers = ('connection', 'transfer-encoding')
153 self.saved_headers = [ h for h in headers \
153 self.saved_headers = [ h for h in headers \
154 if h[0].lower() not in bad_headers ]
154 if h[0].lower() not in bad_headers ]
155 return self._write
155 return self._write
156
156
157 def _write(self, data):
157 def _write(self, data):
158 if not self.saved_status:
158 if not self.saved_status:
159 raise AssertionError("data written before start_response() called")
159 raise AssertionError("data written before start_response() called")
160 elif not self.sent_headers:
160 elif not self.sent_headers:
161 self.send_headers()
161 self.send_headers()
162 if self.length is not None:
162 if self.length is not None:
163 if len(data) > self.length:
163 if len(data) > self.length:
164 raise AssertionError("Content-length header sent, but more bytes than specified are being written.")
164 raise AssertionError("Content-length header sent, but more bytes than specified are being written.")
165 self.length = self.length - len(data)
165 self.length = self.length - len(data)
166 self.wfile.write(data)
166 self.wfile.write(data)
167 self.wfile.flush()
167 self.wfile.flush()
168
168
169 def create_server(ui, repo):
169 def create_server(ui, repo):
170 use_threads = True
170 use_threads = True
171
171
172 def openlog(opt, default):
172 def openlog(opt, default):
173 if opt and opt != '-':
173 if opt and opt != '-':
174 return open(opt, 'w')
174 return open(opt, 'w')
175 return default
175 return default
176
176
177 address = ui.config("web", "address", "")
177 address = ui.config("web", "address", "")
178 port = int(ui.config("web", "port", 8000))
178 port = int(ui.config("web", "port", 8000))
179 use_ipv6 = ui.configbool("web", "ipv6")
179 use_ipv6 = ui.configbool("web", "ipv6")
180 webdir_conf = ui.config("web", "webdir_conf")
180 webdir_conf = ui.config("web", "webdir_conf")
181 accesslog = openlog(ui.config("web", "accesslog", "-"), sys.stdout)
181 accesslog = openlog(ui.config("web", "accesslog", "-"), sys.stdout)
182 errorlog = openlog(ui.config("web", "errorlog", "-"), sys.stderr)
182 errorlog = openlog(ui.config("web", "errorlog", "-"), sys.stderr)
183
183
184 if use_threads:
184 if use_threads:
185 try:
185 try:
186 from threading import activeCount
186 from threading import activeCount
187 except ImportError:
187 except ImportError:
188 use_threads = False
188 use_threads = False
189
189
190 if use_threads:
190 if use_threads:
191 _mixin = SocketServer.ThreadingMixIn
191 _mixin = SocketServer.ThreadingMixIn
192 else:
192 else:
193 if hasattr(os, "fork"):
193 if hasattr(os, "fork"):
194 _mixin = SocketServer.ForkingMixIn
194 _mixin = SocketServer.ForkingMixIn
195 else:
195 else:
196 class _mixin:
196 class _mixin:
197 pass
197 pass
198
198
199 class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
199 class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
200
201 # SO_REUSEADDR has broken semantics on windows
202 if os.name == 'nt':
203 allow_reuse_address = 0
204
200 def __init__(self, *args, **kargs):
205 def __init__(self, *args, **kargs):
201 BaseHTTPServer.HTTPServer.__init__(self, *args, **kargs)
206 BaseHTTPServer.HTTPServer.__init__(self, *args, **kargs)
202 self.accesslog = accesslog
207 self.accesslog = accesslog
203 self.errorlog = errorlog
208 self.errorlog = errorlog
204 self.repo = repo
209 self.repo = repo
205 self.webdir_conf = webdir_conf
210 self.webdir_conf = webdir_conf
206 self.webdirmaker = hgwebdir
211 self.webdirmaker = hgwebdir
207 self.repoviewmaker = hgweb
212 self.repoviewmaker = hgweb
208 self.reqmaker = wsgiapplication(self.make_handler)
213 self.reqmaker = wsgiapplication(self.make_handler)
209 self.daemon_threads = True
214 self.daemon_threads = True
210
215
211 addr, port = self.socket.getsockname()[:2]
216 addr, port = self.socket.getsockname()[:2]
212 if addr in ('0.0.0.0', '::'):
217 if addr in ('0.0.0.0', '::'):
213 addr = socket.gethostname()
218 addr = socket.gethostname()
214 else:
219 else:
215 try:
220 try:
216 addr = socket.gethostbyaddr(addr)[0]
221 addr = socket.gethostbyaddr(addr)[0]
217 except socket.error:
222 except socket.error:
218 pass
223 pass
219 self.addr, self.port = addr, port
224 self.addr, self.port = addr, port
220
225
221 def make_handler(self):
226 def make_handler(self):
222 if self.webdir_conf:
227 if self.webdir_conf:
223 hgwebobj = self.webdirmaker(self.webdir_conf, ui)
228 hgwebobj = self.webdirmaker(self.webdir_conf, ui)
224 elif self.repo is not None:
229 elif self.repo is not None:
225 hgwebobj = self.repoviewmaker(hg.repository(repo.ui,
230 hgwebobj = self.repoviewmaker(hg.repository(repo.ui,
226 repo.root))
231 repo.root))
227 else:
232 else:
228 raise hg.RepoError(_("There is no Mercurial repository here"
233 raise hg.RepoError(_("There is no Mercurial repository here"
229 " (.hg not found)"))
234 " (.hg not found)"))
230 return hgwebobj
235 return hgwebobj
231
236
232 class IPv6HTTPServer(MercurialHTTPServer):
237 class IPv6HTTPServer(MercurialHTTPServer):
233 address_family = getattr(socket, 'AF_INET6', None)
238 address_family = getattr(socket, 'AF_INET6', None)
234
239
235 def __init__(self, *args, **kwargs):
240 def __init__(self, *args, **kwargs):
236 if self.address_family is None:
241 if self.address_family is None:
237 raise hg.RepoError(_('IPv6 not available on this system'))
242 raise hg.RepoError(_('IPv6 not available on this system'))
238 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
243 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
239
244
240 try:
245 try:
241 if use_ipv6:
246 if use_ipv6:
242 return IPv6HTTPServer((address, port), _hgwebhandler)
247 return IPv6HTTPServer((address, port), _hgwebhandler)
243 else:
248 else:
244 return MercurialHTTPServer((address, port), _hgwebhandler)
249 return MercurialHTTPServer((address, port), _hgwebhandler)
245 except socket.error, inst:
250 except socket.error, inst:
246 raise util.Abort(_('cannot start server: %s') % inst.args[1])
251 raise util.Abort(_('cannot start server: %s') % inst.args[1])
@@ -1,386 +1,393 b''
1 # httprepo.py - HTTP repository proxy classes for mercurial
1 # httprepo.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 from node import *
9 from node import *
10 from remoterepo import *
10 from remoterepo import *
11 from i18n import _
11 from i18n import _
12 import hg, os, urllib, urllib2, urlparse, zlib, util, httplib
12 import hg, os, urllib, urllib2, urlparse, zlib, util, httplib
13 import errno, keepalive, tempfile, socket, changegroup
13 import errno, keepalive, tempfile, socket, changegroup
14
14
15 class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
15 class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
16 def __init__(self, ui):
16 def __init__(self, ui):
17 urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
17 urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
18 self.ui = ui
18 self.ui = ui
19
19
20 def find_user_password(self, realm, authuri):
20 def find_user_password(self, realm, authuri):
21 authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
21 authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
22 self, realm, authuri)
22 self, realm, authuri)
23 user, passwd = authinfo
23 user, passwd = authinfo
24 if user and passwd:
24 if user and passwd:
25 return (user, passwd)
25 return (user, passwd)
26
26
27 if not self.ui.interactive:
27 if not self.ui.interactive:
28 raise util.Abort(_('http authorization required'))
28 raise util.Abort(_('http authorization required'))
29
29
30 self.ui.write(_("http authorization required\n"))
30 self.ui.write(_("http authorization required\n"))
31 self.ui.status(_("realm: %s\n") % realm)
31 self.ui.status(_("realm: %s\n") % realm)
32 if user:
32 if user:
33 self.ui.status(_("user: %s\n") % user)
33 self.ui.status(_("user: %s\n") % user)
34 else:
34 else:
35 user = self.ui.prompt(_("user:"), default=None)
35 user = self.ui.prompt(_("user:"), default=None)
36
36
37 if not passwd:
37 if not passwd:
38 passwd = self.ui.getpass()
38 passwd = self.ui.getpass()
39
39
40 self.add_password(realm, authuri, user, passwd)
40 self.add_password(realm, authuri, user, passwd)
41 return (user, passwd)
41 return (user, passwd)
42
42
43 def netlocsplit(netloc):
43 def netlocsplit(netloc):
44 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
44 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
45
45
46 a = netloc.find('@')
46 a = netloc.find('@')
47 if a == -1:
47 if a == -1:
48 user, passwd = None, None
48 user, passwd = None, None
49 else:
49 else:
50 userpass, netloc = netloc[:a], netloc[a+1:]
50 userpass, netloc = netloc[:a], netloc[a+1:]
51 c = userpass.find(':')
51 c = userpass.find(':')
52 if c == -1:
52 if c == -1:
53 user, passwd = urllib.unquote(userpass), None
53 user, passwd = urllib.unquote(userpass), None
54 else:
54 else:
55 user = urllib.unquote(userpass[:c])
55 user = urllib.unquote(userpass[:c])
56 passwd = urllib.unquote(userpass[c+1:])
56 passwd = urllib.unquote(userpass[c+1:])
57 c = netloc.find(':')
57 c = netloc.find(':')
58 if c == -1:
58 if c == -1:
59 host, port = netloc, None
59 host, port = netloc, None
60 else:
60 else:
61 host, port = netloc[:c], netloc[c+1:]
61 host, port = netloc[:c], netloc[c+1:]
62 return host, port, user, passwd
62 return host, port, user, passwd
63
63
64 def netlocunsplit(host, port, user=None, passwd=None):
64 def netlocunsplit(host, port, user=None, passwd=None):
65 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
65 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
66 if port:
66 if port:
67 hostport = host + ':' + port
67 hostport = host + ':' + port
68 else:
68 else:
69 hostport = host
69 hostport = host
70 if user:
70 if user:
71 if passwd:
71 if passwd:
72 userpass = urllib.quote(user) + ':' + urllib.quote(passwd)
72 userpass = urllib.quote(user) + ':' + urllib.quote(passwd)
73 else:
73 else:
74 userpass = urllib.quote(user)
74 userpass = urllib.quote(user)
75 return userpass + '@' + hostport
75 return userpass + '@' + hostport
76 return hostport
76 return hostport
77
77
78 class httpsendfile(file):
78 class httpsendfile(file):
79 def __len__(self):
79 def __len__(self):
80 return os.fstat(self.fileno()).st_size
80 return os.fstat(self.fileno()).st_size
81
81
82 def _gen_sendfile(connection):
82 def _gen_sendfile(connection):
83 def _sendfile(self, data):
83 def _sendfile(self, data):
84 # send a file
84 # send a file
85 if isinstance(data, httpsendfile):
85 if isinstance(data, httpsendfile):
86 # if auth required, some data sent twice, so rewind here
86 # if auth required, some data sent twice, so rewind here
87 data.seek(0)
87 data.seek(0)
88 for chunk in util.filechunkiter(data):
88 for chunk in util.filechunkiter(data):
89 connection.send(self, chunk)
89 connection.send(self, chunk)
90 else:
90 else:
91 connection.send(self, data)
91 connection.send(self, data)
92 return _sendfile
92 return _sendfile
93
93
94 class httpconnection(keepalive.HTTPConnection):
94 class httpconnection(keepalive.HTTPConnection):
95 # must be able to send big bundle as stream.
95 # must be able to send big bundle as stream.
96 send = _gen_sendfile(keepalive.HTTPConnection)
96 send = _gen_sendfile(keepalive.HTTPConnection)
97
97
98 class basehttphandler(keepalive.HTTPHandler):
98 class basehttphandler(keepalive.HTTPHandler):
99 def http_open(self, req):
99 def http_open(self, req):
100 return self.do_open(httpconnection, req)
100 return self.do_open(httpconnection, req)
101
101
102 has_https = hasattr(urllib2, 'HTTPSHandler')
102 has_https = hasattr(urllib2, 'HTTPSHandler')
103 if has_https:
103 if has_https:
104 class httpsconnection(httplib.HTTPSConnection):
104 class httpsconnection(httplib.HTTPSConnection):
105 response_class = keepalive.HTTPResponse
105 response_class = keepalive.HTTPResponse
106 # must be able to send big bundle as stream.
106 # must be able to send big bundle as stream.
107 send = _gen_sendfile(httplib.HTTPSConnection)
107 send = _gen_sendfile(httplib.HTTPSConnection)
108
108
109 class httphandler(basehttphandler, urllib2.HTTPSHandler):
109 class httphandler(basehttphandler, urllib2.HTTPSHandler):
110 def https_open(self, req):
110 def https_open(self, req):
111 return self.do_open(httpsconnection, req)
111 return self.do_open(httpsconnection, req)
112 else:
112 else:
113 class httphandler(basehttphandler):
113 class httphandler(basehttphandler):
114 pass
114 pass
115
115
116 def zgenerator(f):
116 def zgenerator(f):
117 zd = zlib.decompressobj()
117 zd = zlib.decompressobj()
118 try:
118 try:
119 for chunk in util.filechunkiter(f):
119 for chunk in util.filechunkiter(f):
120 yield zd.decompress(chunk)
120 yield zd.decompress(chunk)
121 except httplib.HTTPException, inst:
121 except httplib.HTTPException, inst:
122 raise IOError(None, _('connection ended unexpectedly'))
122 raise IOError(None, _('connection ended unexpectedly'))
123 yield zd.flush()
123 yield zd.flush()
124
124
125 class httprepository(remoterepository):
125 class httprepository(remoterepository):
126 def __init__(self, ui, path):
126 def __init__(self, ui, path):
127 self.path = path
127 self.path = path
128 self.caps = None
128 self.caps = None
129 self.handler = None
129 scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
130 scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
130 if query or frag:
131 if query or frag:
131 raise util.Abort(_('unsupported URL component: "%s"') %
132 raise util.Abort(_('unsupported URL component: "%s"') %
132 (query or frag))
133 (query or frag))
133 if not urlpath: urlpath = '/'
134 if not urlpath: urlpath = '/'
134 host, port, user, passwd = netlocsplit(netloc)
135 host, port, user, passwd = netlocsplit(netloc)
135
136
136 # urllib cannot handle URLs with embedded user or passwd
137 # urllib cannot handle URLs with embedded user or passwd
137 self._url = urlparse.urlunsplit((scheme, netlocunsplit(host, port),
138 self._url = urlparse.urlunsplit((scheme, netlocunsplit(host, port),
138 urlpath, '', ''))
139 urlpath, '', ''))
139 self.ui = ui
140 self.ui = ui
140
141
141 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
142 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
142 # XXX proxyauthinfo = None
143 # XXX proxyauthinfo = None
143 handlers = [httphandler()]
144 self.handler = httphandler()
145 handlers = [self.handler]
144
146
145 if proxyurl:
147 if proxyurl:
146 # proxy can be proper url or host[:port]
148 # proxy can be proper url or host[:port]
147 if not (proxyurl.startswith('http:') or
149 if not (proxyurl.startswith('http:') or
148 proxyurl.startswith('https:')):
150 proxyurl.startswith('https:')):
149 proxyurl = 'http://' + proxyurl + '/'
151 proxyurl = 'http://' + proxyurl + '/'
150 snpqf = urlparse.urlsplit(proxyurl)
152 snpqf = urlparse.urlsplit(proxyurl)
151 proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf
153 proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf
152 hpup = netlocsplit(proxynetloc)
154 hpup = netlocsplit(proxynetloc)
153
155
154 proxyhost, proxyport, proxyuser, proxypasswd = hpup
156 proxyhost, proxyport, proxyuser, proxypasswd = hpup
155 if not proxyuser:
157 if not proxyuser:
156 proxyuser = ui.config("http_proxy", "user")
158 proxyuser = ui.config("http_proxy", "user")
157 proxypasswd = ui.config("http_proxy", "passwd")
159 proxypasswd = ui.config("http_proxy", "passwd")
158
160
159 # see if we should use a proxy for this url
161 # see if we should use a proxy for this url
160 no_list = [ "localhost", "127.0.0.1" ]
162 no_list = [ "localhost", "127.0.0.1" ]
161 no_list.extend([p.lower() for
163 no_list.extend([p.lower() for
162 p in ui.configlist("http_proxy", "no")])
164 p in ui.configlist("http_proxy", "no")])
163 no_list.extend([p.strip().lower() for
165 no_list.extend([p.strip().lower() for
164 p in os.getenv("no_proxy", '').split(',')
166 p in os.getenv("no_proxy", '').split(',')
165 if p.strip()])
167 if p.strip()])
166 # "http_proxy.always" config is for running tests on localhost
168 # "http_proxy.always" config is for running tests on localhost
167 if (not ui.configbool("http_proxy", "always") and
169 if (not ui.configbool("http_proxy", "always") and
168 host.lower() in no_list):
170 host.lower() in no_list):
169 ui.debug(_('disabling proxy for %s\n') % host)
171 ui.debug(_('disabling proxy for %s\n') % host)
170 else:
172 else:
171 proxyurl = urlparse.urlunsplit((
173 proxyurl = urlparse.urlunsplit((
172 proxyscheme, netlocunsplit(proxyhost, proxyport,
174 proxyscheme, netlocunsplit(proxyhost, proxyport,
173 proxyuser, proxypasswd or ''),
175 proxyuser, proxypasswd or ''),
174 proxypath, proxyquery, proxyfrag))
176 proxypath, proxyquery, proxyfrag))
175 handlers.append(urllib2.ProxyHandler({scheme: proxyurl}))
177 handlers.append(urllib2.ProxyHandler({scheme: proxyurl}))
176 ui.debug(_('proxying through http://%s:%s\n') %
178 ui.debug(_('proxying through http://%s:%s\n') %
177 (proxyhost, proxyport))
179 (proxyhost, proxyport))
178
180
179 # urllib2 takes proxy values from the environment and those
181 # urllib2 takes proxy values from the environment and those
180 # will take precedence if found, so drop them
182 # will take precedence if found, so drop them
181 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
183 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
182 try:
184 try:
183 if os.environ.has_key(env):
185 if os.environ.has_key(env):
184 del os.environ[env]
186 del os.environ[env]
185 except OSError:
187 except OSError:
186 pass
188 pass
187
189
188 passmgr = passwordmgr(ui)
190 passmgr = passwordmgr(ui)
189 if user:
191 if user:
190 ui.debug(_('http auth: user %s, password %s\n') %
192 ui.debug(_('http auth: user %s, password %s\n') %
191 (user, passwd and '*' * len(passwd) or 'not set'))
193 (user, passwd and '*' * len(passwd) or 'not set'))
192 passmgr.add_password(None, host, user, passwd or '')
194 passmgr.add_password(None, host, user, passwd or '')
193
195
194 handlers.extend((urllib2.HTTPBasicAuthHandler(passmgr),
196 handlers.extend((urllib2.HTTPBasicAuthHandler(passmgr),
195 urllib2.HTTPDigestAuthHandler(passmgr)))
197 urllib2.HTTPDigestAuthHandler(passmgr)))
196 opener = urllib2.build_opener(*handlers)
198 opener = urllib2.build_opener(*handlers)
197
199
198 # 1.0 here is the _protocol_ version
200 # 1.0 here is the _protocol_ version
199 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
201 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
200 urllib2.install_opener(opener)
202 urllib2.install_opener(opener)
203
204 def __del__(self):
205 if self.handler:
206 self.handler.close_all()
207 self.handler = None
201
208
202 def url(self):
209 def url(self):
203 return self.path
210 return self.path
204
211
205 # look up capabilities only when needed
212 # look up capabilities only when needed
206
213
207 def get_caps(self):
214 def get_caps(self):
208 if self.caps is None:
215 if self.caps is None:
209 try:
216 try:
210 self.caps = self.do_read('capabilities').split()
217 self.caps = self.do_read('capabilities').split()
211 except hg.RepoError:
218 except hg.RepoError:
212 self.caps = ()
219 self.caps = ()
213 self.ui.debug(_('capabilities: %s\n') %
220 self.ui.debug(_('capabilities: %s\n') %
214 (' '.join(self.caps or ['none'])))
221 (' '.join(self.caps or ['none'])))
215 return self.caps
222 return self.caps
216
223
217 capabilities = property(get_caps)
224 capabilities = property(get_caps)
218
225
219 def lock(self):
226 def lock(self):
220 raise util.Abort(_('operation not supported over http'))
227 raise util.Abort(_('operation not supported over http'))
221
228
222 def do_cmd(self, cmd, **args):
229 def do_cmd(self, cmd, **args):
223 data = args.pop('data', None)
230 data = args.pop('data', None)
224 headers = args.pop('headers', {})
231 headers = args.pop('headers', {})
225 self.ui.debug(_("sending %s command\n") % cmd)
232 self.ui.debug(_("sending %s command\n") % cmd)
226 q = {"cmd": cmd}
233 q = {"cmd": cmd}
227 q.update(args)
234 q.update(args)
228 qs = '?%s' % urllib.urlencode(q)
235 qs = '?%s' % urllib.urlencode(q)
229 cu = "%s%s" % (self._url, qs)
236 cu = "%s%s" % (self._url, qs)
230 try:
237 try:
231 if data:
238 if data:
232 self.ui.debug(_("sending %s bytes\n") %
239 self.ui.debug(_("sending %s bytes\n") %
233 headers.get('content-length', 'X'))
240 headers.get('content-length', 'X'))
234 resp = urllib2.urlopen(urllib2.Request(cu, data, headers))
241 resp = urllib2.urlopen(urllib2.Request(cu, data, headers))
235 except urllib2.HTTPError, inst:
242 except urllib2.HTTPError, inst:
236 if inst.code == 401:
243 if inst.code == 401:
237 raise util.Abort(_('authorization failed'))
244 raise util.Abort(_('authorization failed'))
238 raise
245 raise
239 except httplib.HTTPException, inst:
246 except httplib.HTTPException, inst:
240 self.ui.debug(_('http error while sending %s command\n') % cmd)
247 self.ui.debug(_('http error while sending %s command\n') % cmd)
241 self.ui.print_exc()
248 self.ui.print_exc()
242 raise IOError(None, inst)
249 raise IOError(None, inst)
243 except IndexError:
250 except IndexError:
244 # this only happens with Python 2.3, later versions raise URLError
251 # this only happens with Python 2.3, later versions raise URLError
245 raise util.Abort(_('http error, possibly caused by proxy setting'))
252 raise util.Abort(_('http error, possibly caused by proxy setting'))
246 # record the url we got redirected to
253 # record the url we got redirected to
247 resp_url = resp.geturl()
254 resp_url = resp.geturl()
248 if resp_url.endswith(qs):
255 if resp_url.endswith(qs):
249 resp_url = resp_url[:-len(qs)]
256 resp_url = resp_url[:-len(qs)]
250 if self._url != resp_url:
257 if self._url != resp_url:
251 self.ui.status(_('real URL is %s\n') % resp_url)
258 self.ui.status(_('real URL is %s\n') % resp_url)
252 self._url = resp_url
259 self._url = resp_url
253 try:
260 try:
254 proto = resp.getheader('content-type')
261 proto = resp.getheader('content-type')
255 except AttributeError:
262 except AttributeError:
256 proto = resp.headers['content-type']
263 proto = resp.headers['content-type']
257
264
258 # accept old "text/plain" and "application/hg-changegroup" for now
265 # accept old "text/plain" and "application/hg-changegroup" for now
259 if not proto.startswith('application/mercurial-') and \
266 if not proto.startswith('application/mercurial-') and \
260 not proto.startswith('text/plain') and \
267 not proto.startswith('text/plain') and \
261 not proto.startswith('application/hg-changegroup'):
268 not proto.startswith('application/hg-changegroup'):
262 raise hg.RepoError(_("'%s' does not appear to be an hg repository") %
269 raise hg.RepoError(_("'%s' does not appear to be an hg repository") %
263 self._url)
270 self._url)
264
271
265 if proto.startswith('application/mercurial-'):
272 if proto.startswith('application/mercurial-'):
266 try:
273 try:
267 version = float(proto[22:])
274 version = float(proto[22:])
268 except ValueError:
275 except ValueError:
269 raise hg.RepoError(_("'%s' sent a broken Content-type "
276 raise hg.RepoError(_("'%s' sent a broken Content-type "
270 "header (%s)") % (self._url, proto))
277 "header (%s)") % (self._url, proto))
271 if version > 0.1:
278 if version > 0.1:
272 raise hg.RepoError(_("'%s' uses newer protocol %s") %
279 raise hg.RepoError(_("'%s' uses newer protocol %s") %
273 (self._url, version))
280 (self._url, version))
274
281
275 return resp
282 return resp
276
283
277 def do_read(self, cmd, **args):
284 def do_read(self, cmd, **args):
278 fp = self.do_cmd(cmd, **args)
285 fp = self.do_cmd(cmd, **args)
279 try:
286 try:
280 return fp.read()
287 return fp.read()
281 finally:
288 finally:
282 # if using keepalive, allow connection to be reused
289 # if using keepalive, allow connection to be reused
283 fp.close()
290 fp.close()
284
291
285 def lookup(self, key):
292 def lookup(self, key):
286 d = self.do_cmd("lookup", key = key).read()
293 d = self.do_cmd("lookup", key = key).read()
287 success, data = d[:-1].split(' ', 1)
294 success, data = d[:-1].split(' ', 1)
288 if int(success):
295 if int(success):
289 return bin(data)
296 return bin(data)
290 raise hg.RepoError(data)
297 raise hg.RepoError(data)
291
298
292 def heads(self):
299 def heads(self):
293 d = self.do_read("heads")
300 d = self.do_read("heads")
294 try:
301 try:
295 return map(bin, d[:-1].split(" "))
302 return map(bin, d[:-1].split(" "))
296 except:
303 except:
297 raise util.UnexpectedOutput(_("unexpected response:"), d)
304 raise util.UnexpectedOutput(_("unexpected response:"), d)
298
305
299 def branches(self, nodes):
306 def branches(self, nodes):
300 n = " ".join(map(hex, nodes))
307 n = " ".join(map(hex, nodes))
301 d = self.do_read("branches", nodes=n)
308 d = self.do_read("branches", nodes=n)
302 try:
309 try:
303 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
310 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
304 return br
311 return br
305 except:
312 except:
306 raise util.UnexpectedOutput(_("unexpected response:"), d)
313 raise util.UnexpectedOutput(_("unexpected response:"), d)
307
314
308 def between(self, pairs):
315 def between(self, pairs):
309 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
316 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
310 d = self.do_read("between", pairs=n)
317 d = self.do_read("between", pairs=n)
311 try:
318 try:
312 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
319 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
313 return p
320 return p
314 except:
321 except:
315 raise util.UnexpectedOutput(_("unexpected response:"), d)
322 raise util.UnexpectedOutput(_("unexpected response:"), d)
316
323
317 def changegroup(self, nodes, kind):
324 def changegroup(self, nodes, kind):
318 n = " ".join(map(hex, nodes))
325 n = " ".join(map(hex, nodes))
319 f = self.do_cmd("changegroup", roots=n)
326 f = self.do_cmd("changegroup", roots=n)
320 return util.chunkbuffer(zgenerator(f))
327 return util.chunkbuffer(zgenerator(f))
321
328
322 def changegroupsubset(self, bases, heads, source):
329 def changegroupsubset(self, bases, heads, source):
323 baselst = " ".join([hex(n) for n in bases])
330 baselst = " ".join([hex(n) for n in bases])
324 headlst = " ".join([hex(n) for n in heads])
331 headlst = " ".join([hex(n) for n in heads])
325 f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
332 f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
326 return util.chunkbuffer(zgenerator(f))
333 return util.chunkbuffer(zgenerator(f))
327
334
328 def unbundle(self, cg, heads, source):
335 def unbundle(self, cg, heads, source):
329 # have to stream bundle to a temp file because we do not have
336 # have to stream bundle to a temp file because we do not have
330 # http 1.1 chunked transfer.
337 # http 1.1 chunked transfer.
331
338
332 type = ""
339 type = ""
333 types = self.capable('unbundle')
340 types = self.capable('unbundle')
334 # servers older than d1b16a746db6 will send 'unbundle' as a
341 # servers older than d1b16a746db6 will send 'unbundle' as a
335 # boolean capability
342 # boolean capability
336 try:
343 try:
337 types = types.split(',')
344 types = types.split(',')
338 except AttributeError:
345 except AttributeError:
339 types = [""]
346 types = [""]
340 if types:
347 if types:
341 for x in types:
348 for x in types:
342 if x in changegroup.bundletypes:
349 if x in changegroup.bundletypes:
343 type = x
350 type = x
344 break
351 break
345
352
346 tempname = changegroup.writebundle(cg, None, type)
353 tempname = changegroup.writebundle(cg, None, type)
347 fp = httpsendfile(tempname, "rb")
354 fp = httpsendfile(tempname, "rb")
348 try:
355 try:
349 try:
356 try:
350 rfp = self.do_cmd(
357 rfp = self.do_cmd(
351 'unbundle', data=fp,
358 'unbundle', data=fp,
352 headers={'content-type': 'application/octet-stream'},
359 headers={'content-type': 'application/octet-stream'},
353 heads=' '.join(map(hex, heads)))
360 heads=' '.join(map(hex, heads)))
354 try:
361 try:
355 ret = int(rfp.readline())
362 ret = int(rfp.readline())
356 self.ui.write(rfp.read())
363 self.ui.write(rfp.read())
357 return ret
364 return ret
358 finally:
365 finally:
359 rfp.close()
366 rfp.close()
360 except socket.error, err:
367 except socket.error, err:
361 if err[0] in (errno.ECONNRESET, errno.EPIPE):
368 if err[0] in (errno.ECONNRESET, errno.EPIPE):
362 raise util.Abort(_('push failed: %s') % err[1])
369 raise util.Abort(_('push failed: %s') % err[1])
363 raise util.Abort(err[1])
370 raise util.Abort(err[1])
364 finally:
371 finally:
365 fp.close()
372 fp.close()
366 os.unlink(tempname)
373 os.unlink(tempname)
367
374
368 def stream_out(self):
375 def stream_out(self):
369 return self.do_cmd('stream_out')
376 return self.do_cmd('stream_out')
370
377
371 class httpsrepository(httprepository):
378 class httpsrepository(httprepository):
372 def __init__(self, ui, path):
379 def __init__(self, ui, path):
373 if not has_https:
380 if not has_https:
374 raise util.Abort(_('Python support for SSL and HTTPS '
381 raise util.Abort(_('Python support for SSL and HTTPS '
375 'is not installed'))
382 'is not installed'))
376 httprepository.__init__(self, ui, path)
383 httprepository.__init__(self, ui, path)
377
384
378 def instance(ui, path, create):
385 def instance(ui, path, create):
379 if create:
386 if create:
380 raise util.Abort(_('cannot create new http repository'))
387 raise util.Abort(_('cannot create new http repository'))
381 if path.startswith('hg:'):
388 if path.startswith('hg:'):
382 ui.warn(_("hg:// syntax is deprecated, please use http:// instead\n"))
389 ui.warn(_("hg:// syntax is deprecated, please use http:// instead\n"))
383 path = 'http:' + path[3:]
390 path = 'http:' + path[3:]
384 if path.startswith('https:'):
391 if path.startswith('https:'):
385 return httpsrepository(ui, path)
392 return httpsrepository(ui, path)
386 return httprepository(ui, path)
393 return httprepository(ui, path)
@@ -1,1920 +1,1922 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, appendfile, changegroup
10 import repo, appendfile, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.root = os.path.realpath(path)
34 self.root = os.path.realpath(path)
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 os.mkdir(os.path.join(self.path, "store"))
44 os.mkdir(os.path.join(self.path, "store"))
45 requirements = ("revlogv1", "store")
45 requirements = ("revlogv1", "store")
46 reqfile = self.opener("requires", "w")
46 reqfile = self.opener("requires", "w")
47 for r in requirements:
47 for r in requirements:
48 reqfile.write("%s\n" % r)
48 reqfile.write("%s\n" % r)
49 reqfile.close()
49 reqfile.close()
50 # create an invalid changelog
50 # create an invalid changelog
51 self.opener("00changelog.i", "a").write(
51 self.opener("00changelog.i", "a").write(
52 '\0\0\0\2' # represents revlogv2
52 '\0\0\0\2' # represents revlogv2
53 ' dummy changelog to prevent using the old repo layout'
53 ' dummy changelog to prevent using the old repo layout'
54 )
54 )
55 else:
55 else:
56 raise repo.RepoError(_("repository %s not found") % path)
56 raise repo.RepoError(_("repository %s not found") % path)
57 elif create:
57 elif create:
58 raise repo.RepoError(_("repository %s already exists") % path)
58 raise repo.RepoError(_("repository %s already exists") % path)
59 else:
59 else:
60 # find requirements
60 # find requirements
61 try:
61 try:
62 requirements = self.opener("requires").read().splitlines()
62 requirements = self.opener("requires").read().splitlines()
63 except IOError, inst:
63 except IOError, inst:
64 if inst.errno != errno.ENOENT:
64 if inst.errno != errno.ENOENT:
65 raise
65 raise
66 requirements = []
66 requirements = []
67 # check them
67 # check them
68 for r in requirements:
68 for r in requirements:
69 if r not in self.supported:
69 if r not in self.supported:
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71
71
72 # setup store
72 # setup store
73 if "store" in requirements:
73 if "store" in requirements:
74 self.encodefn = util.encodefilename
74 self.encodefn = util.encodefilename
75 self.decodefn = util.decodefilename
75 self.decodefn = util.decodefilename
76 self.spath = os.path.join(self.path, "store")
76 self.spath = os.path.join(self.path, "store")
77 else:
77 else:
78 self.encodefn = lambda x: x
78 self.encodefn = lambda x: x
79 self.decodefn = lambda x: x
79 self.decodefn = lambda x: x
80 self.spath = self.path
80 self.spath = self.path
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82
82
83 self.ui = ui.ui(parentui=parentui)
83 self.ui = ui.ui(parentui=parentui)
84 try:
84 try:
85 self.ui.readconfig(self.join("hgrc"), self.root)
85 self.ui.readconfig(self.join("hgrc"), self.root)
86 except IOError:
86 except IOError:
87 pass
87 pass
88
88
89 v = self.ui.configrevlog()
89 v = self.ui.configrevlog()
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 fl = v.get('flags', None)
92 fl = v.get('flags', None)
93 flags = 0
93 flags = 0
94 if fl != None:
94 if fl != None:
95 for x in fl.split():
95 for x in fl.split():
96 flags |= revlog.flagstr(x)
96 flags |= revlog.flagstr(x)
97 elif self.revlogv1:
97 elif self.revlogv1:
98 flags = revlog.REVLOG_DEFAULT_FLAGS
98 flags = revlog.REVLOG_DEFAULT_FLAGS
99
99
100 v = self.revlogversion | flags
100 v = self.revlogversion | flags
101 self.manifest = manifest.manifest(self.sopener, v)
101 self.manifest = manifest.manifest(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
103
103
104 fallback = self.ui.config('ui', 'fallbackencoding')
104 fallback = self.ui.config('ui', 'fallbackencoding')
105 if fallback:
105 if fallback:
106 util._fallbackencoding = fallback
106 util._fallbackencoding = fallback
107
107
108 # the changelog might not have the inline index flag
108 # the changelog might not have the inline index flag
109 # on. If the format of the changelog is the same as found in
109 # on. If the format of the changelog is the same as found in
110 # .hgrc, apply any flags found in the .hgrc as well.
110 # .hgrc, apply any flags found in the .hgrc as well.
111 # Otherwise, just version from the changelog
111 # Otherwise, just version from the changelog
112 v = self.changelog.version
112 v = self.changelog.version
113 if v == self.revlogversion:
113 if v == self.revlogversion:
114 v |= flags
114 v |= flags
115 self.revlogversion = v
115 self.revlogversion = v
116
116
117 self.tagscache = None
117 self.tagscache = None
118 self.branchcache = None
118 self.branchcache = None
119 self.nodetagscache = None
119 self.nodetagscache = None
120 self.filterpats = {}
120 self.filterpats = {}
121 self.transhandle = None
121 self.transhandle = None
122
122
123 self._link = lambda x: False
123 self._link = lambda x: False
124 if util.checklink(self.root):
124 if util.checklink(self.root):
125 r = self.root # avoid circular reference in lambda
125 r = self.root # avoid circular reference in lambda
126 self._link = lambda x: util.is_link(os.path.join(r, x))
126 self._link = lambda x: util.is_link(os.path.join(r, x))
127
127
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129
129
130 def url(self):
130 def url(self):
131 return 'file:' + self.root
131 return 'file:' + self.root
132
132
133 def hook(self, name, throw=False, **args):
133 def hook(self, name, throw=False, **args):
134 def callhook(hname, funcname):
134 def callhook(hname, funcname):
135 '''call python hook. hook is callable object, looked up as
135 '''call python hook. hook is callable object, looked up as
136 name in python module. if callable returns "true", hook
136 name in python module. if callable returns "true", hook
137 fails, else passes. if hook raises exception, treated as
137 fails, else passes. if hook raises exception, treated as
138 hook failure. exception propagates if throw is "true".
138 hook failure. exception propagates if throw is "true".
139
139
140 reason for "true" meaning "hook failed" is so that
140 reason for "true" meaning "hook failed" is so that
141 unmodified commands (e.g. mercurial.commands.update) can
141 unmodified commands (e.g. mercurial.commands.update) can
142 be run as hooks without wrappers to convert return values.'''
142 be run as hooks without wrappers to convert return values.'''
143
143
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 obj = funcname
145 obj = funcname
146 if not callable(obj):
146 if not callable(obj):
147 d = funcname.rfind('.')
147 d = funcname.rfind('.')
148 if d == -1:
148 if d == -1:
149 raise util.Abort(_('%s hook is invalid ("%s" not in '
149 raise util.Abort(_('%s hook is invalid ("%s" not in '
150 'a module)') % (hname, funcname))
150 'a module)') % (hname, funcname))
151 modname = funcname[:d]
151 modname = funcname[:d]
152 try:
152 try:
153 obj = __import__(modname)
153 obj = __import__(modname)
154 except ImportError:
154 except ImportError:
155 try:
155 try:
156 # extensions are loaded with hgext_ prefix
156 # extensions are loaded with hgext_ prefix
157 obj = __import__("hgext_%s" % modname)
157 obj = __import__("hgext_%s" % modname)
158 except ImportError:
158 except ImportError:
159 raise util.Abort(_('%s hook is invalid '
159 raise util.Abort(_('%s hook is invalid '
160 '(import of "%s" failed)') %
160 '(import of "%s" failed)') %
161 (hname, modname))
161 (hname, modname))
162 try:
162 try:
163 for p in funcname.split('.')[1:]:
163 for p in funcname.split('.')[1:]:
164 obj = getattr(obj, p)
164 obj = getattr(obj, p)
165 except AttributeError, err:
165 except AttributeError, err:
166 raise util.Abort(_('%s hook is invalid '
166 raise util.Abort(_('%s hook is invalid '
167 '("%s" is not defined)') %
167 '("%s" is not defined)') %
168 (hname, funcname))
168 (hname, funcname))
169 if not callable(obj):
169 if not callable(obj):
170 raise util.Abort(_('%s hook is invalid '
170 raise util.Abort(_('%s hook is invalid '
171 '("%s" is not callable)') %
171 '("%s" is not callable)') %
172 (hname, funcname))
172 (hname, funcname))
173 try:
173 try:
174 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
174 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
175 except (KeyboardInterrupt, util.SignalInterrupt):
175 except (KeyboardInterrupt, util.SignalInterrupt):
176 raise
176 raise
177 except Exception, exc:
177 except Exception, exc:
178 if isinstance(exc, util.Abort):
178 if isinstance(exc, util.Abort):
179 self.ui.warn(_('error: %s hook failed: %s\n') %
179 self.ui.warn(_('error: %s hook failed: %s\n') %
180 (hname, exc.args[0]))
180 (hname, exc.args[0]))
181 else:
181 else:
182 self.ui.warn(_('error: %s hook raised an exception: '
182 self.ui.warn(_('error: %s hook raised an exception: '
183 '%s\n') % (hname, exc))
183 '%s\n') % (hname, exc))
184 if throw:
184 if throw:
185 raise
185 raise
186 self.ui.print_exc()
186 self.ui.print_exc()
187 return True
187 return True
188 if r:
188 if r:
189 if throw:
189 if throw:
190 raise util.Abort(_('%s hook failed') % hname)
190 raise util.Abort(_('%s hook failed') % hname)
191 self.ui.warn(_('warning: %s hook failed\n') % hname)
191 self.ui.warn(_('warning: %s hook failed\n') % hname)
192 return r
192 return r
193
193
194 def runhook(name, cmd):
194 def runhook(name, cmd):
195 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
195 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
196 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
196 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
197 r = util.system(cmd, environ=env, cwd=self.root)
197 r = util.system(cmd, environ=env, cwd=self.root)
198 if r:
198 if r:
199 desc, r = util.explain_exit(r)
199 desc, r = util.explain_exit(r)
200 if throw:
200 if throw:
201 raise util.Abort(_('%s hook %s') % (name, desc))
201 raise util.Abort(_('%s hook %s') % (name, desc))
202 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
202 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
203 return r
203 return r
204
204
205 r = False
205 r = False
206 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
206 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
207 if hname.split(".", 1)[0] == name and cmd]
207 if hname.split(".", 1)[0] == name and cmd]
208 hooks.sort()
208 hooks.sort()
209 for hname, cmd in hooks:
209 for hname, cmd in hooks:
210 if callable(cmd):
210 if callable(cmd):
211 r = callhook(hname, cmd) or r
211 r = callhook(hname, cmd) or r
212 elif cmd.startswith('python:'):
212 elif cmd.startswith('python:'):
213 r = callhook(hname, cmd[7:].strip()) or r
213 r = callhook(hname, cmd[7:].strip()) or r
214 else:
214 else:
215 r = runhook(hname, cmd) or r
215 r = runhook(hname, cmd) or r
216 return r
216 return r
217
217
218 tag_disallowed = ':\r\n'
218 tag_disallowed = ':\r\n'
219
219
220 def _tag(self, name, node, message, local, user, date, parent=None):
220 def _tag(self, name, node, message, local, user, date, parent=None):
221 use_dirstate = parent is None
221 use_dirstate = parent is None
222
222
223 for c in self.tag_disallowed:
223 for c in self.tag_disallowed:
224 if c in name:
224 if c in name:
225 raise util.Abort(_('%r cannot be used in a tag name') % c)
225 raise util.Abort(_('%r cannot be used in a tag name') % c)
226
226
227 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
227 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
228
228
229 if local:
229 if local:
230 # local tags are stored in the current charset
230 # local tags are stored in the current charset
231 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
231 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
232 self.hook('tag', node=hex(node), tag=name, local=local)
232 self.hook('tag', node=hex(node), tag=name, local=local)
233 return
233 return
234
234
235 # committed tags are stored in UTF-8
235 # committed tags are stored in UTF-8
236 line = '%s %s\n' % (hex(node), util.fromlocal(name))
236 line = '%s %s\n' % (hex(node), util.fromlocal(name))
237 if use_dirstate:
237 if use_dirstate:
238 self.wfile('.hgtags', 'ab').write(line)
238 self.wfile('.hgtags', 'ab').write(line)
239 else:
239 else:
240 ntags = self.filectx('.hgtags', parent).data()
240 ntags = self.filectx('.hgtags', parent).data()
241 self.wfile('.hgtags', 'ab').write(ntags + line)
241 self.wfile('.hgtags', 'ab').write(ntags + line)
242 if use_dirstate and self.dirstate.state('.hgtags') == '?':
242 if use_dirstate and self.dirstate.state('.hgtags') == '?':
243 self.add(['.hgtags'])
243 self.add(['.hgtags'])
244
244
245 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
245 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
246
246
247 self.hook('tag', node=hex(node), tag=name, local=local)
247 self.hook('tag', node=hex(node), tag=name, local=local)
248
248
249 return tagnode
249 return tagnode
250
250
251 def tag(self, name, node, message, local, user, date):
251 def tag(self, name, node, message, local, user, date):
252 '''tag a revision with a symbolic name.
252 '''tag a revision with a symbolic name.
253
253
254 if local is True, the tag is stored in a per-repository file.
254 if local is True, the tag is stored in a per-repository file.
255 otherwise, it is stored in the .hgtags file, and a new
255 otherwise, it is stored in the .hgtags file, and a new
256 changeset is committed with the change.
256 changeset is committed with the change.
257
257
258 keyword arguments:
258 keyword arguments:
259
259
260 local: whether to store tag in non-version-controlled file
260 local: whether to store tag in non-version-controlled file
261 (default False)
261 (default False)
262
262
263 message: commit message to use if committing
263 message: commit message to use if committing
264
264
265 user: name of user to use if committing
265 user: name of user to use if committing
266
266
267 date: date tuple to use if committing'''
267 date: date tuple to use if committing'''
268
268
269 for x in self.status()[:5]:
269 for x in self.status()[:5]:
270 if '.hgtags' in x:
270 if '.hgtags' in x:
271 raise util.Abort(_('working copy of .hgtags is changed '
271 raise util.Abort(_('working copy of .hgtags is changed '
272 '(please commit .hgtags manually)'))
272 '(please commit .hgtags manually)'))
273
273
274
274
275 self._tag(name, node, message, local, user, date)
275 self._tag(name, node, message, local, user, date)
276
276
277 def tags(self):
277 def tags(self):
278 '''return a mapping of tag to node'''
278 '''return a mapping of tag to node'''
279 if not self.tagscache:
279 if not self.tagscache:
280 self.tagscache = {}
280 self.tagscache = {}
281
281
282 def parsetag(line, context):
282 def parsetag(line, context):
283 if not line:
283 if not line:
284 return
284 return
285 s = l.split(" ", 1)
285 s = l.split(" ", 1)
286 if len(s) != 2:
286 if len(s) != 2:
287 self.ui.warn(_("%s: cannot parse entry\n") % context)
287 self.ui.warn(_("%s: cannot parse entry\n") % context)
288 return
288 return
289 node, key = s
289 node, key = s
290 key = util.tolocal(key.strip()) # stored in UTF-8
290 key = util.tolocal(key.strip()) # stored in UTF-8
291 try:
291 try:
292 bin_n = bin(node)
292 bin_n = bin(node)
293 except TypeError:
293 except TypeError:
294 self.ui.warn(_("%s: node '%s' is not well formed\n") %
294 self.ui.warn(_("%s: node '%s' is not well formed\n") %
295 (context, node))
295 (context, node))
296 return
296 return
297 if bin_n not in self.changelog.nodemap:
297 if bin_n not in self.changelog.nodemap:
298 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
298 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
299 (context, key))
299 (context, key))
300 return
300 return
301 self.tagscache[key] = bin_n
301 self.tagscache[key] = bin_n
302
302
303 # read the tags file from each head, ending with the tip,
303 # read the tags file from each head, ending with the tip,
304 # and add each tag found to the map, with "newer" ones
304 # and add each tag found to the map, with "newer" ones
305 # taking precedence
305 # taking precedence
306 f = None
306 f = None
307 for rev, node, fnode in self._hgtagsnodes():
307 for rev, node, fnode in self._hgtagsnodes():
308 f = (f and f.filectx(fnode) or
308 f = (f and f.filectx(fnode) or
309 self.filectx('.hgtags', fileid=fnode))
309 self.filectx('.hgtags', fileid=fnode))
310 count = 0
310 count = 0
311 for l in f.data().splitlines():
311 for l in f.data().splitlines():
312 count += 1
312 count += 1
313 parsetag(l, _("%s, line %d") % (str(f), count))
313 parsetag(l, _("%s, line %d") % (str(f), count))
314
314
315 try:
315 try:
316 f = self.opener("localtags")
316 f = self.opener("localtags")
317 count = 0
317 count = 0
318 for l in f:
318 for l in f:
319 # localtags are stored in the local character set
319 # localtags are stored in the local character set
320 # while the internal tag table is stored in UTF-8
320 # while the internal tag table is stored in UTF-8
321 l = util.fromlocal(l)
321 l = util.fromlocal(l)
322 count += 1
322 count += 1
323 parsetag(l, _("localtags, line %d") % count)
323 parsetag(l, _("localtags, line %d") % count)
324 except IOError:
324 except IOError:
325 pass
325 pass
326
326
327 self.tagscache['tip'] = self.changelog.tip()
327 self.tagscache['tip'] = self.changelog.tip()
328
328
329 return self.tagscache
329 return self.tagscache
330
330
331 def _hgtagsnodes(self):
331 def _hgtagsnodes(self):
332 heads = self.heads()
332 heads = self.heads()
333 heads.reverse()
333 heads.reverse()
334 last = {}
334 last = {}
335 ret = []
335 ret = []
336 for node in heads:
336 for node in heads:
337 c = self.changectx(node)
337 c = self.changectx(node)
338 rev = c.rev()
338 rev = c.rev()
339 try:
339 try:
340 fnode = c.filenode('.hgtags')
340 fnode = c.filenode('.hgtags')
341 except revlog.LookupError:
341 except revlog.LookupError:
342 continue
342 continue
343 ret.append((rev, node, fnode))
343 ret.append((rev, node, fnode))
344 if fnode in last:
344 if fnode in last:
345 ret[last[fnode]] = None
345 ret[last[fnode]] = None
346 last[fnode] = len(ret) - 1
346 last[fnode] = len(ret) - 1
347 return [item for item in ret if item]
347 return [item for item in ret if item]
348
348
349 def tagslist(self):
349 def tagslist(self):
350 '''return a list of tags ordered by revision'''
350 '''return a list of tags ordered by revision'''
351 l = []
351 l = []
352 for t, n in self.tags().items():
352 for t, n in self.tags().items():
353 try:
353 try:
354 r = self.changelog.rev(n)
354 r = self.changelog.rev(n)
355 except:
355 except:
356 r = -2 # sort to the beginning of the list if unknown
356 r = -2 # sort to the beginning of the list if unknown
357 l.append((r, t, n))
357 l.append((r, t, n))
358 l.sort()
358 l.sort()
359 return [(t, n) for r, t, n in l]
359 return [(t, n) for r, t, n in l]
360
360
361 def nodetags(self, node):
361 def nodetags(self, node):
362 '''return the tags associated with a node'''
362 '''return the tags associated with a node'''
363 if not self.nodetagscache:
363 if not self.nodetagscache:
364 self.nodetagscache = {}
364 self.nodetagscache = {}
365 for t, n in self.tags().items():
365 for t, n in self.tags().items():
366 self.nodetagscache.setdefault(n, []).append(t)
366 self.nodetagscache.setdefault(n, []).append(t)
367 return self.nodetagscache.get(node, [])
367 return self.nodetagscache.get(node, [])
368
368
369 def _branchtags(self):
369 def _branchtags(self):
370 partial, last, lrev = self._readbranchcache()
370 partial, last, lrev = self._readbranchcache()
371
371
372 tiprev = self.changelog.count() - 1
372 tiprev = self.changelog.count() - 1
373 if lrev != tiprev:
373 if lrev != tiprev:
374 self._updatebranchcache(partial, lrev+1, tiprev+1)
374 self._updatebranchcache(partial, lrev+1, tiprev+1)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
376
376
377 return partial
377 return partial
378
378
379 def branchtags(self):
379 def branchtags(self):
380 if self.branchcache is not None:
380 if self.branchcache is not None:
381 return self.branchcache
381 return self.branchcache
382
382
383 self.branchcache = {} # avoid recursion in changectx
383 self.branchcache = {} # avoid recursion in changectx
384 partial = self._branchtags()
384 partial = self._branchtags()
385
385
386 # the branch cache is stored on disk as UTF-8, but in the local
386 # the branch cache is stored on disk as UTF-8, but in the local
387 # charset internally
387 # charset internally
388 for k, v in partial.items():
388 for k, v in partial.items():
389 self.branchcache[util.tolocal(k)] = v
389 self.branchcache[util.tolocal(k)] = v
390 return self.branchcache
390 return self.branchcache
391
391
392 def _readbranchcache(self):
392 def _readbranchcache(self):
393 partial = {}
393 partial = {}
394 try:
394 try:
395 f = self.opener("branches.cache")
395 f = self.opener("branches.cache")
396 lines = f.read().split('\n')
396 lines = f.read().split('\n')
397 f.close()
397 f.close()
398 last, lrev = lines.pop(0).rstrip().split(" ", 1)
398 last, lrev = lines.pop(0).rstrip().split(" ", 1)
399 last, lrev = bin(last), int(lrev)
399 last, lrev = bin(last), int(lrev)
400 if not (lrev < self.changelog.count() and
400 if not (lrev < self.changelog.count() and
401 self.changelog.node(lrev) == last): # sanity check
401 self.changelog.node(lrev) == last): # sanity check
402 # invalidate the cache
402 # invalidate the cache
403 raise ValueError('Invalid branch cache: unknown tip')
403 raise ValueError('Invalid branch cache: unknown tip')
404 for l in lines:
404 for l in lines:
405 if not l: continue
405 if not l: continue
406 node, label = l.rstrip().split(" ", 1)
406 node, label = l.rstrip().split(" ", 1)
407 partial[label] = bin(node)
407 partial[label] = bin(node)
408 except (KeyboardInterrupt, util.SignalInterrupt):
408 except (KeyboardInterrupt, util.SignalInterrupt):
409 raise
409 raise
410 except Exception, inst:
410 except Exception, inst:
411 if self.ui.debugflag:
411 if self.ui.debugflag:
412 self.ui.warn(str(inst), '\n')
412 self.ui.warn(str(inst), '\n')
413 partial, last, lrev = {}, nullid, nullrev
413 partial, last, lrev = {}, nullid, nullrev
414 return partial, last, lrev
414 return partial, last, lrev
415
415
416 def _writebranchcache(self, branches, tip, tiprev):
416 def _writebranchcache(self, branches, tip, tiprev):
417 try:
417 try:
418 f = self.opener("branches.cache", "w")
418 f = self.opener("branches.cache", "w")
419 f.write("%s %s\n" % (hex(tip), tiprev))
419 f.write("%s %s\n" % (hex(tip), tiprev))
420 for label, node in branches.iteritems():
420 for label, node in branches.iteritems():
421 f.write("%s %s\n" % (hex(node), label))
421 f.write("%s %s\n" % (hex(node), label))
422 except IOError:
422 except IOError:
423 pass
423 pass
424
424
425 def _updatebranchcache(self, partial, start, end):
425 def _updatebranchcache(self, partial, start, end):
426 for r in xrange(start, end):
426 for r in xrange(start, end):
427 c = self.changectx(r)
427 c = self.changectx(r)
428 b = c.branch()
428 b = c.branch()
429 if b:
429 if b:
430 partial[b] = c.node()
430 partial[b] = c.node()
431
431
432 def lookup(self, key):
432 def lookup(self, key):
433 if key == '.':
433 if key == '.':
434 key = self.dirstate.parents()[0]
434 key = self.dirstate.parents()[0]
435 if key == nullid:
435 if key == nullid:
436 raise repo.RepoError(_("no revision checked out"))
436 raise repo.RepoError(_("no revision checked out"))
437 elif key == 'null':
437 elif key == 'null':
438 return nullid
438 return nullid
439 n = self.changelog._match(key)
439 n = self.changelog._match(key)
440 if n:
440 if n:
441 return n
441 return n
442 if key in self.tags():
442 if key in self.tags():
443 return self.tags()[key]
443 return self.tags()[key]
444 if key in self.branchtags():
444 if key in self.branchtags():
445 return self.branchtags()[key]
445 return self.branchtags()[key]
446 n = self.changelog._partialmatch(key)
446 n = self.changelog._partialmatch(key)
447 if n:
447 if n:
448 return n
448 return n
449 raise repo.RepoError(_("unknown revision '%s'") % key)
449 raise repo.RepoError(_("unknown revision '%s'") % key)
450
450
451 def dev(self):
451 def dev(self):
452 return os.lstat(self.path).st_dev
452 return os.lstat(self.path).st_dev
453
453
454 def local(self):
454 def local(self):
455 return True
455 return True
456
456
457 def join(self, f):
457 def join(self, f):
458 return os.path.join(self.path, f)
458 return os.path.join(self.path, f)
459
459
460 def sjoin(self, f):
460 def sjoin(self, f):
461 f = self.encodefn(f)
461 f = self.encodefn(f)
462 return os.path.join(self.spath, f)
462 return os.path.join(self.spath, f)
463
463
464 def wjoin(self, f):
464 def wjoin(self, f):
465 return os.path.join(self.root, f)
465 return os.path.join(self.root, f)
466
466
467 def file(self, f):
467 def file(self, f):
468 if f[0] == '/':
468 if f[0] == '/':
469 f = f[1:]
469 f = f[1:]
470 return filelog.filelog(self.sopener, f, self.revlogversion)
470 return filelog.filelog(self.sopener, f, self.revlogversion)
471
471
472 def changectx(self, changeid=None):
472 def changectx(self, changeid=None):
473 return context.changectx(self, changeid)
473 return context.changectx(self, changeid)
474
474
475 def workingctx(self):
475 def workingctx(self):
476 return context.workingctx(self)
476 return context.workingctx(self)
477
477
478 def parents(self, changeid=None):
478 def parents(self, changeid=None):
479 '''
479 '''
480 get list of changectxs for parents of changeid or working directory
480 get list of changectxs for parents of changeid or working directory
481 '''
481 '''
482 if changeid is None:
482 if changeid is None:
483 pl = self.dirstate.parents()
483 pl = self.dirstate.parents()
484 else:
484 else:
485 n = self.changelog.lookup(changeid)
485 n = self.changelog.lookup(changeid)
486 pl = self.changelog.parents(n)
486 pl = self.changelog.parents(n)
487 if pl[1] == nullid:
487 if pl[1] == nullid:
488 return [self.changectx(pl[0])]
488 return [self.changectx(pl[0])]
489 return [self.changectx(pl[0]), self.changectx(pl[1])]
489 return [self.changectx(pl[0]), self.changectx(pl[1])]
490
490
491 def filectx(self, path, changeid=None, fileid=None):
491 def filectx(self, path, changeid=None, fileid=None):
492 """changeid can be a changeset revision, node, or tag.
492 """changeid can be a changeset revision, node, or tag.
493 fileid can be a file revision or node."""
493 fileid can be a file revision or node."""
494 return context.filectx(self, path, changeid, fileid)
494 return context.filectx(self, path, changeid, fileid)
495
495
496 def getcwd(self):
496 def getcwd(self):
497 return self.dirstate.getcwd()
497 return self.dirstate.getcwd()
498
498
499 def wfile(self, f, mode='r'):
499 def wfile(self, f, mode='r'):
500 return self.wopener(f, mode)
500 return self.wopener(f, mode)
501
501
502 def _filter(self, filter, filename, data):
502 def _filter(self, filter, filename, data):
503 if filter not in self.filterpats:
503 if filter not in self.filterpats:
504 l = []
504 l = []
505 for pat, cmd in self.ui.configitems(filter):
505 for pat, cmd in self.ui.configitems(filter):
506 mf = util.matcher(self.root, "", [pat], [], [])[1]
506 mf = util.matcher(self.root, "", [pat], [], [])[1]
507 l.append((mf, cmd))
507 l.append((mf, cmd))
508 self.filterpats[filter] = l
508 self.filterpats[filter] = l
509
509
510 for mf, cmd in self.filterpats[filter]:
510 for mf, cmd in self.filterpats[filter]:
511 if mf(filename):
511 if mf(filename):
512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
513 data = util.filter(data, cmd)
513 data = util.filter(data, cmd)
514 break
514 break
515
515
516 return data
516 return data
517
517
518 def wread(self, filename):
518 def wread(self, filename):
519 if self._link(filename):
519 if self._link(filename):
520 data = os.readlink(self.wjoin(filename))
520 data = os.readlink(self.wjoin(filename))
521 else:
521 else:
522 data = self.wopener(filename, 'r').read()
522 data = self.wopener(filename, 'r').read()
523 return self._filter("encode", filename, data)
523 return self._filter("encode", filename, data)
524
524
525 def wwrite(self, filename, data, flags):
525 def wwrite(self, filename, data, flags):
526 data = self._filter("decode", filename, data)
526 data = self._filter("decode", filename, data)
527 if "l" in flags:
527 if "l" in flags:
528 try:
528 try:
529 os.unlink(self.wjoin(filename))
529 os.unlink(self.wjoin(filename))
530 except OSError:
530 except OSError:
531 pass
531 pass
532 os.symlink(data, self.wjoin(filename))
532 os.symlink(data, self.wjoin(filename))
533 else:
533 else:
534 try:
534 try:
535 if self._link(filename):
535 if self._link(filename):
536 os.unlink(self.wjoin(filename))
536 os.unlink(self.wjoin(filename))
537 except OSError:
537 except OSError:
538 pass
538 pass
539 self.wopener(filename, 'w').write(data)
539 self.wopener(filename, 'w').write(data)
540 util.set_exec(self.wjoin(filename), "x" in flags)
540 util.set_exec(self.wjoin(filename), "x" in flags)
541
541
542 def wwritedata(self, filename, data):
542 def wwritedata(self, filename, data):
543 return self._filter("decode", filename, data)
543 return self._filter("decode", filename, data)
544
544
545 def transaction(self):
545 def transaction(self):
546 tr = self.transhandle
546 tr = self.transhandle
547 if tr != None and tr.running():
547 if tr != None and tr.running():
548 return tr.nest()
548 return tr.nest()
549
549
550 # save dirstate for rollback
550 # save dirstate for rollback
551 try:
551 try:
552 ds = self.opener("dirstate").read()
552 ds = self.opener("dirstate").read()
553 except IOError:
553 except IOError:
554 ds = ""
554 ds = ""
555 self.opener("journal.dirstate", "w").write(ds)
555 self.opener("journal.dirstate", "w").write(ds)
556
556
557 renames = [(self.sjoin("journal"), self.sjoin("undo")),
557 renames = [(self.sjoin("journal"), self.sjoin("undo")),
558 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
558 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
559 tr = transaction.transaction(self.ui.warn, self.sopener,
559 tr = transaction.transaction(self.ui.warn, self.sopener,
560 self.sjoin("journal"),
560 self.sjoin("journal"),
561 aftertrans(renames))
561 aftertrans(renames))
562 self.transhandle = tr
562 self.transhandle = tr
563 return tr
563 return tr
564
564
565 def recover(self):
565 def recover(self):
566 l = self.lock()
566 l = self.lock()
567 if os.path.exists(self.sjoin("journal")):
567 if os.path.exists(self.sjoin("journal")):
568 self.ui.status(_("rolling back interrupted transaction\n"))
568 self.ui.status(_("rolling back interrupted transaction\n"))
569 transaction.rollback(self.sopener, self.sjoin("journal"))
569 transaction.rollback(self.sopener, self.sjoin("journal"))
570 self.reload()
570 self.reload()
571 return True
571 return True
572 else:
572 else:
573 self.ui.warn(_("no interrupted transaction available\n"))
573 self.ui.warn(_("no interrupted transaction available\n"))
574 return False
574 return False
575
575
576 def rollback(self, wlock=None):
576 def rollback(self, wlock=None):
577 if not wlock:
577 if not wlock:
578 wlock = self.wlock()
578 wlock = self.wlock()
579 l = self.lock()
579 l = self.lock()
580 if os.path.exists(self.sjoin("undo")):
580 if os.path.exists(self.sjoin("undo")):
581 self.ui.status(_("rolling back last transaction\n"))
581 self.ui.status(_("rolling back last transaction\n"))
582 transaction.rollback(self.sopener, self.sjoin("undo"))
582 transaction.rollback(self.sopener, self.sjoin("undo"))
583 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
583 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
584 self.reload()
584 self.reload()
585 self.wreload()
585 self.wreload()
586 else:
586 else:
587 self.ui.warn(_("no rollback information available\n"))
587 self.ui.warn(_("no rollback information available\n"))
588
588
589 def wreload(self):
589 def wreload(self):
590 self.dirstate.read()
590 self.dirstate.read()
591
591
592 def reload(self):
592 def reload(self):
593 self.changelog.load()
593 self.changelog.load()
594 self.manifest.load()
594 self.manifest.load()
595 self.tagscache = None
595 self.tagscache = None
596 self.nodetagscache = None
596 self.nodetagscache = None
597
597
598 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
598 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
599 desc=None):
599 desc=None):
600 try:
600 try:
601 l = lock.lock(lockname, 0, releasefn, desc=desc)
601 l = lock.lock(lockname, 0, releasefn, desc=desc)
602 except lock.LockHeld, inst:
602 except lock.LockHeld, inst:
603 if not wait:
603 if not wait:
604 raise
604 raise
605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
606 (desc, inst.locker))
606 (desc, inst.locker))
607 # default to 600 seconds timeout
607 # default to 600 seconds timeout
608 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
608 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
609 releasefn, desc=desc)
609 releasefn, desc=desc)
610 if acquirefn:
610 if acquirefn:
611 acquirefn()
611 acquirefn()
612 return l
612 return l
613
613
614 def lock(self, wait=1):
614 def lock(self, wait=1):
615 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
615 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
616 desc=_('repository %s') % self.origroot)
616 desc=_('repository %s') % self.origroot)
617
617
618 def wlock(self, wait=1):
618 def wlock(self, wait=1):
619 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
619 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
620 self.wreload,
620 self.wreload,
621 desc=_('working directory of %s') % self.origroot)
621 desc=_('working directory of %s') % self.origroot)
622
622
623 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
623 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
624 """
624 """
625 commit an individual file as part of a larger transaction
625 commit an individual file as part of a larger transaction
626 """
626 """
627
627
628 t = self.wread(fn)
628 t = self.wread(fn)
629 fl = self.file(fn)
629 fl = self.file(fn)
630 fp1 = manifest1.get(fn, nullid)
630 fp1 = manifest1.get(fn, nullid)
631 fp2 = manifest2.get(fn, nullid)
631 fp2 = manifest2.get(fn, nullid)
632
632
633 meta = {}
633 meta = {}
634 cp = self.dirstate.copied(fn)
634 cp = self.dirstate.copied(fn)
635 if cp:
635 if cp:
636 # Mark the new revision of this file as a copy of another
636 # Mark the new revision of this file as a copy of another
637 # file. This copy data will effectively act as a parent
637 # file. This copy data will effectively act as a parent
638 # of this new revision. If this is a merge, the first
638 # of this new revision. If this is a merge, the first
639 # parent will be the nullid (meaning "look up the copy data")
639 # parent will be the nullid (meaning "look up the copy data")
640 # and the second one will be the other parent. For example:
640 # and the second one will be the other parent. For example:
641 #
641 #
642 # 0 --- 1 --- 3 rev1 changes file foo
642 # 0 --- 1 --- 3 rev1 changes file foo
643 # \ / rev2 renames foo to bar and changes it
643 # \ / rev2 renames foo to bar and changes it
644 # \- 2 -/ rev3 should have bar with all changes and
644 # \- 2 -/ rev3 should have bar with all changes and
645 # should record that bar descends from
645 # should record that bar descends from
646 # bar in rev2 and foo in rev1
646 # bar in rev2 and foo in rev1
647 #
647 #
648 # this allows this merge to succeed:
648 # this allows this merge to succeed:
649 #
649 #
650 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
650 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
651 # \ / merging rev3 and rev4 should use bar@rev2
651 # \ / merging rev3 and rev4 should use bar@rev2
652 # \- 2 --- 4 as the merge base
652 # \- 2 --- 4 as the merge base
653 #
653 #
654 meta["copy"] = cp
654 meta["copy"] = cp
655 if not manifest2: # not a branch merge
655 if not manifest2: # not a branch merge
656 meta["copyrev"] = hex(manifest1.get(cp, nullid))
656 meta["copyrev"] = hex(manifest1.get(cp, nullid))
657 fp2 = nullid
657 fp2 = nullid
658 elif fp2 != nullid: # copied on remote side
658 elif fp2 != nullid: # copied on remote side
659 meta["copyrev"] = hex(manifest1.get(cp, nullid))
659 meta["copyrev"] = hex(manifest1.get(cp, nullid))
660 elif fp1 != nullid: # copied on local side, reversed
660 elif fp1 != nullid: # copied on local side, reversed
661 meta["copyrev"] = hex(manifest2.get(cp))
661 meta["copyrev"] = hex(manifest2.get(cp))
662 fp2 = fp1
662 fp2 = fp1
663 else: # directory rename
663 else: # directory rename
664 meta["copyrev"] = hex(manifest1.get(cp, nullid))
664 meta["copyrev"] = hex(manifest1.get(cp, nullid))
665 self.ui.debug(_(" %s: copy %s:%s\n") %
665 self.ui.debug(_(" %s: copy %s:%s\n") %
666 (fn, cp, meta["copyrev"]))
666 (fn, cp, meta["copyrev"]))
667 fp1 = nullid
667 fp1 = nullid
668 elif fp2 != nullid:
668 elif fp2 != nullid:
669 # is one parent an ancestor of the other?
669 # is one parent an ancestor of the other?
670 fpa = fl.ancestor(fp1, fp2)
670 fpa = fl.ancestor(fp1, fp2)
671 if fpa == fp1:
671 if fpa == fp1:
672 fp1, fp2 = fp2, nullid
672 fp1, fp2 = fp2, nullid
673 elif fpa == fp2:
673 elif fpa == fp2:
674 fp2 = nullid
674 fp2 = nullid
675
675
676 # is the file unmodified from the parent? report existing entry
676 # is the file unmodified from the parent? report existing entry
677 if fp2 == nullid and not fl.cmp(fp1, t):
677 if fp2 == nullid and not fl.cmp(fp1, t):
678 return fp1
678 return fp1
679
679
680 changelist.append(fn)
680 changelist.append(fn)
681 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
681 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
682
682
683 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
683 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
684 if p1 is None:
684 if p1 is None:
685 p1, p2 = self.dirstate.parents()
685 p1, p2 = self.dirstate.parents()
686 return self.commit(files=files, text=text, user=user, date=date,
686 return self.commit(files=files, text=text, user=user, date=date,
687 p1=p1, p2=p2, wlock=wlock, extra=extra)
687 p1=p1, p2=p2, wlock=wlock, extra=extra)
688
688
689 def commit(self, files=None, text="", user=None, date=None,
689 def commit(self, files=None, text="", user=None, date=None,
690 match=util.always, force=False, lock=None, wlock=None,
690 match=util.always, force=False, lock=None, wlock=None,
691 force_editor=False, p1=None, p2=None, extra={}):
691 force_editor=False, p1=None, p2=None, extra={}):
692
692
693 commit = []
693 commit = []
694 remove = []
694 remove = []
695 changed = []
695 changed = []
696 use_dirstate = (p1 is None) # not rawcommit
696 use_dirstate = (p1 is None) # not rawcommit
697 extra = extra.copy()
697 extra = extra.copy()
698
698
699 if use_dirstate:
699 if use_dirstate:
700 if files:
700 if files:
701 for f in files:
701 for f in files:
702 s = self.dirstate.state(f)
702 s = self.dirstate.state(f)
703 if s in 'nmai':
703 if s in 'nmai':
704 commit.append(f)
704 commit.append(f)
705 elif s == 'r':
705 elif s == 'r':
706 remove.append(f)
706 remove.append(f)
707 else:
707 else:
708 self.ui.warn(_("%s not tracked!\n") % f)
708 self.ui.warn(_("%s not tracked!\n") % f)
709 else:
709 else:
710 changes = self.status(match=match)[:5]
710 changes = self.status(match=match)[:5]
711 modified, added, removed, deleted, unknown = changes
711 modified, added, removed, deleted, unknown = changes
712 commit = modified + added
712 commit = modified + added
713 remove = removed
713 remove = removed
714 else:
714 else:
715 commit = files
715 commit = files
716
716
717 if use_dirstate:
717 if use_dirstate:
718 p1, p2 = self.dirstate.parents()
718 p1, p2 = self.dirstate.parents()
719 update_dirstate = True
719 update_dirstate = True
720 else:
720 else:
721 p1, p2 = p1, p2 or nullid
721 p1, p2 = p1, p2 or nullid
722 update_dirstate = (self.dirstate.parents()[0] == p1)
722 update_dirstate = (self.dirstate.parents()[0] == p1)
723
723
724 c1 = self.changelog.read(p1)
724 c1 = self.changelog.read(p1)
725 c2 = self.changelog.read(p2)
725 c2 = self.changelog.read(p2)
726 m1 = self.manifest.read(c1[0]).copy()
726 m1 = self.manifest.read(c1[0]).copy()
727 m2 = self.manifest.read(c2[0])
727 m2 = self.manifest.read(c2[0])
728
728
729 if use_dirstate:
729 if use_dirstate:
730 branchname = self.workingctx().branch()
730 branchname = self.workingctx().branch()
731 try:
731 try:
732 branchname = branchname.decode('UTF-8').encode('UTF-8')
732 branchname = branchname.decode('UTF-8').encode('UTF-8')
733 except UnicodeDecodeError:
733 except UnicodeDecodeError:
734 raise util.Abort(_('branch name not in UTF-8!'))
734 raise util.Abort(_('branch name not in UTF-8!'))
735 else:
735 else:
736 branchname = ""
736 branchname = ""
737
737
738 if use_dirstate:
738 if use_dirstate:
739 oldname = c1[5].get("branch", "") # stored in UTF-8
739 oldname = c1[5].get("branch", "") # stored in UTF-8
740 if not commit and not remove and not force and p2 == nullid and \
740 if not commit and not remove and not force and p2 == nullid and \
741 branchname == oldname:
741 branchname == oldname:
742 self.ui.status(_("nothing changed\n"))
742 self.ui.status(_("nothing changed\n"))
743 return None
743 return None
744
744
745 xp1 = hex(p1)
745 xp1 = hex(p1)
746 if p2 == nullid: xp2 = ''
746 if p2 == nullid: xp2 = ''
747 else: xp2 = hex(p2)
747 else: xp2 = hex(p2)
748
748
749 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
749 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
750
750
751 if not wlock:
751 if not wlock:
752 wlock = self.wlock()
752 wlock = self.wlock()
753 if not lock:
753 if not lock:
754 lock = self.lock()
754 lock = self.lock()
755 tr = self.transaction()
755 tr = self.transaction()
756
756
757 # check in files
757 # check in files
758 new = {}
758 new = {}
759 linkrev = self.changelog.count()
759 linkrev = self.changelog.count()
760 commit.sort()
760 commit.sort()
761 is_exec = util.execfunc(self.root, m1.execf)
761 is_exec = util.execfunc(self.root, m1.execf)
762 is_link = util.linkfunc(self.root, m1.linkf)
762 is_link = util.linkfunc(self.root, m1.linkf)
763 for f in commit:
763 for f in commit:
764 self.ui.note(f + "\n")
764 self.ui.note(f + "\n")
765 try:
765 try:
766 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
766 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
767 m1.set(f, is_exec(f), is_link(f))
767 m1.set(f, is_exec(f), is_link(f))
768 except (OSError, IOError):
768 except (OSError, IOError):
769 if use_dirstate:
769 if use_dirstate:
770 self.ui.warn(_("trouble committing %s!\n") % f)
770 self.ui.warn(_("trouble committing %s!\n") % f)
771 raise
771 raise
772 else:
772 else:
773 remove.append(f)
773 remove.append(f)
774
774
775 # update manifest
775 # update manifest
776 m1.update(new)
776 m1.update(new)
777 remove.sort()
777 remove.sort()
778 removed = []
778 removed = []
779
779
780 for f in remove:
780 for f in remove:
781 if f in m1:
781 if f in m1:
782 del m1[f]
782 del m1[f]
783 removed.append(f)
783 removed.append(f)
784 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
784 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
785
785
786 # add changeset
786 # add changeset
787 new = new.keys()
787 new = new.keys()
788 new.sort()
788 new.sort()
789
789
790 user = user or self.ui.username()
790 user = user or self.ui.username()
791 if not text or force_editor:
791 if not text or force_editor:
792 edittext = []
792 edittext = []
793 if text:
793 if text:
794 edittext.append(text)
794 edittext.append(text)
795 edittext.append("")
795 edittext.append("")
796 edittext.append("HG: user: %s" % user)
796 edittext.append("HG: user: %s" % user)
797 if p2 != nullid:
797 if p2 != nullid:
798 edittext.append("HG: branch merge")
798 edittext.append("HG: branch merge")
799 if branchname:
799 if branchname:
800 edittext.append("HG: branch %s" % util.tolocal(branchname))
800 edittext.append("HG: branch %s" % util.tolocal(branchname))
801 edittext.extend(["HG: changed %s" % f for f in changed])
801 edittext.extend(["HG: changed %s" % f for f in changed])
802 edittext.extend(["HG: removed %s" % f for f in removed])
802 edittext.extend(["HG: removed %s" % f for f in removed])
803 if not changed and not remove:
803 if not changed and not remove:
804 edittext.append("HG: no files changed")
804 edittext.append("HG: no files changed")
805 edittext.append("")
805 edittext.append("")
806 # run editor in the repository root
806 # run editor in the repository root
807 olddir = os.getcwd()
807 olddir = os.getcwd()
808 os.chdir(self.root)
808 os.chdir(self.root)
809 text = self.ui.edit("\n".join(edittext), user)
809 text = self.ui.edit("\n".join(edittext), user)
810 os.chdir(olddir)
810 os.chdir(olddir)
811
811
812 lines = [line.rstrip() for line in text.rstrip().splitlines()]
812 lines = [line.rstrip() for line in text.rstrip().splitlines()]
813 while lines and not lines[0]:
813 while lines and not lines[0]:
814 del lines[0]
814 del lines[0]
815 if not lines:
815 if not lines:
816 return None
816 return None
817 text = '\n'.join(lines)
817 text = '\n'.join(lines)
818 if branchname:
818 if branchname:
819 extra["branch"] = branchname
819 extra["branch"] = branchname
820 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
820 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
821 user, date, extra)
821 user, date, extra)
822 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
822 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
823 parent2=xp2)
823 parent2=xp2)
824 tr.close()
824 tr.close()
825
825
826 if self.branchcache and "branch" in extra:
826 if self.branchcache and "branch" in extra:
827 self.branchcache[util.tolocal(extra["branch"])] = n
827 self.branchcache[util.tolocal(extra["branch"])] = n
828
828
829 if use_dirstate or update_dirstate:
829 if use_dirstate or update_dirstate:
830 self.dirstate.setparents(n)
830 self.dirstate.setparents(n)
831 if use_dirstate:
831 if use_dirstate:
832 self.dirstate.update(new, "n")
832 self.dirstate.update(new, "n")
833 self.dirstate.forget(removed)
833 self.dirstate.forget(removed)
834
834
835 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
835 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
836 return n
836 return n
837
837
838 def walk(self, node=None, files=[], match=util.always, badmatch=None):
838 def walk(self, node=None, files=[], match=util.always, badmatch=None):
839 '''
839 '''
840 walk recursively through the directory tree or a given
840 walk recursively through the directory tree or a given
841 changeset, finding all files matched by the match
841 changeset, finding all files matched by the match
842 function
842 function
843
843
844 results are yielded in a tuple (src, filename), where src
844 results are yielded in a tuple (src, filename), where src
845 is one of:
845 is one of:
846 'f' the file was found in the directory tree
846 'f' the file was found in the directory tree
847 'm' the file was only in the dirstate and not in the tree
847 'm' the file was only in the dirstate and not in the tree
848 'b' file was not found and matched badmatch
848 'b' file was not found and matched badmatch
849 '''
849 '''
850
850
851 if node:
851 if node:
852 fdict = dict.fromkeys(files)
852 fdict = dict.fromkeys(files)
853 for fn in self.manifest.read(self.changelog.read(node)[0]):
853 for fn in self.manifest.read(self.changelog.read(node)[0]):
854 for ffn in fdict:
854 for ffn in fdict:
855 # match if the file is the exact name or a directory
855 # match if the file is the exact name or a directory
856 if ffn == fn or fn.startswith("%s/" % ffn):
856 if ffn == fn or fn.startswith("%s/" % ffn):
857 del fdict[ffn]
857 del fdict[ffn]
858 break
858 break
859 if match(fn):
859 if match(fn):
860 yield 'm', fn
860 yield 'm', fn
861 for fn in fdict:
861 for fn in fdict:
862 if badmatch and badmatch(fn):
862 if badmatch and badmatch(fn):
863 if match(fn):
863 if match(fn):
864 yield 'b', fn
864 yield 'b', fn
865 else:
865 else:
866 self.ui.warn(_('%s: No such file in rev %s\n') % (
866 self.ui.warn(_('%s: No such file in rev %s\n') % (
867 util.pathto(self.getcwd(), fn), short(node)))
867 util.pathto(self.getcwd(), fn), short(node)))
868 else:
868 else:
869 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
869 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
870 yield src, fn
870 yield src, fn
871
871
872 def status(self, node1=None, node2=None, files=[], match=util.always,
872 def status(self, node1=None, node2=None, files=[], match=util.always,
873 wlock=None, list_ignored=False, list_clean=False):
873 wlock=None, list_ignored=False, list_clean=False):
874 """return status of files between two nodes or node and working directory
874 """return status of files between two nodes or node and working directory
875
875
876 If node1 is None, use the first dirstate parent instead.
876 If node1 is None, use the first dirstate parent instead.
877 If node2 is None, compare node1 with working directory.
877 If node2 is None, compare node1 with working directory.
878 """
878 """
879
879
880 def fcmp(fn, mf):
880 def fcmp(fn, mf):
881 t1 = self.wread(fn)
881 t1 = self.wread(fn)
882 return self.file(fn).cmp(mf.get(fn, nullid), t1)
882 return self.file(fn).cmp(mf.get(fn, nullid), t1)
883
883
884 def mfmatches(node):
884 def mfmatches(node):
885 change = self.changelog.read(node)
885 change = self.changelog.read(node)
886 mf = self.manifest.read(change[0]).copy()
886 mf = self.manifest.read(change[0]).copy()
887 for fn in mf.keys():
887 for fn in mf.keys():
888 if not match(fn):
888 if not match(fn):
889 del mf[fn]
889 del mf[fn]
890 return mf
890 return mf
891
891
892 modified, added, removed, deleted, unknown = [], [], [], [], []
892 modified, added, removed, deleted, unknown = [], [], [], [], []
893 ignored, clean = [], []
893 ignored, clean = [], []
894
894
895 compareworking = False
895 compareworking = False
896 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
896 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
897 compareworking = True
897 compareworking = True
898
898
899 if not compareworking:
899 if not compareworking:
900 # read the manifest from node1 before the manifest from node2,
900 # read the manifest from node1 before the manifest from node2,
901 # so that we'll hit the manifest cache if we're going through
901 # so that we'll hit the manifest cache if we're going through
902 # all the revisions in parent->child order.
902 # all the revisions in parent->child order.
903 mf1 = mfmatches(node1)
903 mf1 = mfmatches(node1)
904
904
905 # are we comparing the working directory?
905 # are we comparing the working directory?
906 if not node2:
906 if not node2:
907 if not wlock:
907 if not wlock:
908 try:
908 try:
909 wlock = self.wlock(wait=0)
909 wlock = self.wlock(wait=0)
910 except lock.LockException:
910 except lock.LockException:
911 wlock = None
911 wlock = None
912 (lookup, modified, added, removed, deleted, unknown,
912 (lookup, modified, added, removed, deleted, unknown,
913 ignored, clean) = self.dirstate.status(files, match,
913 ignored, clean) = self.dirstate.status(files, match,
914 list_ignored, list_clean)
914 list_ignored, list_clean)
915
915
916 # are we comparing working dir against its parent?
916 # are we comparing working dir against its parent?
917 if compareworking:
917 if compareworking:
918 if lookup:
918 if lookup:
919 # do a full compare of any files that might have changed
919 # do a full compare of any files that might have changed
920 mf2 = mfmatches(self.dirstate.parents()[0])
920 mf2 = mfmatches(self.dirstate.parents()[0])
921 for f in lookup:
921 for f in lookup:
922 if fcmp(f, mf2):
922 if fcmp(f, mf2):
923 modified.append(f)
923 modified.append(f)
924 else:
924 else:
925 clean.append(f)
925 clean.append(f)
926 if wlock is not None:
926 if wlock is not None:
927 self.dirstate.update([f], "n")
927 self.dirstate.update([f], "n")
928 else:
928 else:
929 # we are comparing working dir against non-parent
929 # we are comparing working dir against non-parent
930 # generate a pseudo-manifest for the working dir
930 # generate a pseudo-manifest for the working dir
931 # XXX: create it in dirstate.py ?
931 # XXX: create it in dirstate.py ?
932 mf2 = mfmatches(self.dirstate.parents()[0])
932 mf2 = mfmatches(self.dirstate.parents()[0])
933 is_exec = util.execfunc(self.root, mf2.execf)
933 is_exec = util.execfunc(self.root, mf2.execf)
934 is_link = util.linkfunc(self.root, mf2.linkf)
934 is_link = util.linkfunc(self.root, mf2.linkf)
935 for f in lookup + modified + added:
935 for f in lookup + modified + added:
936 mf2[f] = ""
936 mf2[f] = ""
937 mf2.set(f, is_exec(f), is_link(f))
937 mf2.set(f, is_exec(f), is_link(f))
938 for f in removed:
938 for f in removed:
939 if f in mf2:
939 if f in mf2:
940 del mf2[f]
940 del mf2[f]
941 else:
941 else:
942 # we are comparing two revisions
942 # we are comparing two revisions
943 mf2 = mfmatches(node2)
943 mf2 = mfmatches(node2)
944
944
945 if not compareworking:
945 if not compareworking:
946 # flush lists from dirstate before comparing manifests
946 # flush lists from dirstate before comparing manifests
947 modified, added, clean = [], [], []
947 modified, added, clean = [], [], []
948
948
949 # make sure to sort the files so we talk to the disk in a
949 # make sure to sort the files so we talk to the disk in a
950 # reasonable order
950 # reasonable order
951 mf2keys = mf2.keys()
951 mf2keys = mf2.keys()
952 mf2keys.sort()
952 mf2keys.sort()
953 for fn in mf2keys:
953 for fn in mf2keys:
954 if mf1.has_key(fn):
954 if mf1.has_key(fn):
955 if mf1.flags(fn) != mf2.flags(fn) or \
955 if mf1.flags(fn) != mf2.flags(fn) or \
956 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
956 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
957 modified.append(fn)
957 modified.append(fn)
958 elif list_clean:
958 elif list_clean:
959 clean.append(fn)
959 clean.append(fn)
960 del mf1[fn]
960 del mf1[fn]
961 else:
961 else:
962 added.append(fn)
962 added.append(fn)
963
963
964 removed = mf1.keys()
964 removed = mf1.keys()
965
965
966 # sort and return results:
966 # sort and return results:
967 for l in modified, added, removed, deleted, unknown, ignored, clean:
967 for l in modified, added, removed, deleted, unknown, ignored, clean:
968 l.sort()
968 l.sort()
969 return (modified, added, removed, deleted, unknown, ignored, clean)
969 return (modified, added, removed, deleted, unknown, ignored, clean)
970
970
971 def add(self, list, wlock=None):
971 def add(self, list, wlock=None):
972 if not wlock:
972 if not wlock:
973 wlock = self.wlock()
973 wlock = self.wlock()
974 for f in list:
974 for f in list:
975 p = self.wjoin(f)
975 p = self.wjoin(f)
976 islink = os.path.islink(p)
976 islink = os.path.islink(p)
977 if not islink and not os.path.exists(p):
977 if not islink and not os.path.exists(p):
978 self.ui.warn(_("%s does not exist!\n") % f)
978 self.ui.warn(_("%s does not exist!\n") % f)
979 elif not islink and not os.path.isfile(p):
979 elif not islink and not os.path.isfile(p):
980 self.ui.warn(_("%s not added: only files and symlinks "
980 self.ui.warn(_("%s not added: only files and symlinks "
981 "supported currently\n") % f)
981 "supported currently\n") % f)
982 elif self.dirstate.state(f) in 'an':
982 elif self.dirstate.state(f) in 'an':
983 self.ui.warn(_("%s already tracked!\n") % f)
983 self.ui.warn(_("%s already tracked!\n") % f)
984 else:
984 else:
985 self.dirstate.update([f], "a")
985 self.dirstate.update([f], "a")
986
986
987 def forget(self, list, wlock=None):
987 def forget(self, list, wlock=None):
988 if not wlock:
988 if not wlock:
989 wlock = self.wlock()
989 wlock = self.wlock()
990 for f in list:
990 for f in list:
991 if self.dirstate.state(f) not in 'ai':
991 if self.dirstate.state(f) not in 'ai':
992 self.ui.warn(_("%s not added!\n") % f)
992 self.ui.warn(_("%s not added!\n") % f)
993 else:
993 else:
994 self.dirstate.forget([f])
994 self.dirstate.forget([f])
995
995
996 def remove(self, list, unlink=False, wlock=None):
996 def remove(self, list, unlink=False, wlock=None):
997 if unlink:
997 if unlink:
998 for f in list:
998 for f in list:
999 try:
999 try:
1000 util.unlink(self.wjoin(f))
1000 util.unlink(self.wjoin(f))
1001 except OSError, inst:
1001 except OSError, inst:
1002 if inst.errno != errno.ENOENT:
1002 if inst.errno != errno.ENOENT:
1003 raise
1003 raise
1004 if not wlock:
1004 if not wlock:
1005 wlock = self.wlock()
1005 wlock = self.wlock()
1006 for f in list:
1006 for f in list:
1007 p = self.wjoin(f)
1007 p = self.wjoin(f)
1008 if os.path.exists(p):
1008 if os.path.exists(p):
1009 self.ui.warn(_("%s still exists!\n") % f)
1009 self.ui.warn(_("%s still exists!\n") % f)
1010 elif self.dirstate.state(f) == 'a':
1010 elif self.dirstate.state(f) == 'a':
1011 self.dirstate.forget([f])
1011 self.dirstate.forget([f])
1012 elif f not in self.dirstate:
1012 elif f not in self.dirstate:
1013 self.ui.warn(_("%s not tracked!\n") % f)
1013 self.ui.warn(_("%s not tracked!\n") % f)
1014 else:
1014 else:
1015 self.dirstate.update([f], "r")
1015 self.dirstate.update([f], "r")
1016
1016
1017 def undelete(self, list, wlock=None):
1017 def undelete(self, list, wlock=None):
1018 p = self.dirstate.parents()[0]
1018 p = self.dirstate.parents()[0]
1019 mn = self.changelog.read(p)[0]
1019 mn = self.changelog.read(p)[0]
1020 m = self.manifest.read(mn)
1020 m = self.manifest.read(mn)
1021 if not wlock:
1021 if not wlock:
1022 wlock = self.wlock()
1022 wlock = self.wlock()
1023 for f in list:
1023 for f in list:
1024 if self.dirstate.state(f) not in "r":
1024 if self.dirstate.state(f) not in "r":
1025 self.ui.warn("%s not removed!\n" % f)
1025 self.ui.warn("%s not removed!\n" % f)
1026 else:
1026 else:
1027 t = self.file(f).read(m[f])
1027 t = self.file(f).read(m[f])
1028 self.wwrite(f, t, m.flags(f))
1028 self.wwrite(f, t, m.flags(f))
1029 self.dirstate.update([f], "n")
1029 self.dirstate.update([f], "n")
1030
1030
1031 def copy(self, source, dest, wlock=None):
1031 def copy(self, source, dest, wlock=None):
1032 p = self.wjoin(dest)
1032 p = self.wjoin(dest)
1033 if not os.path.exists(p):
1033 if not os.path.exists(p):
1034 self.ui.warn(_("%s does not exist!\n") % dest)
1034 self.ui.warn(_("%s does not exist!\n") % dest)
1035 elif not os.path.isfile(p):
1035 elif not os.path.isfile(p):
1036 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1036 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1037 else:
1037 else:
1038 if not wlock:
1038 if not wlock:
1039 wlock = self.wlock()
1039 wlock = self.wlock()
1040 if self.dirstate.state(dest) == '?':
1040 if self.dirstate.state(dest) == '?':
1041 self.dirstate.update([dest], "a")
1041 self.dirstate.update([dest], "a")
1042 self.dirstate.copy(source, dest)
1042 self.dirstate.copy(source, dest)
1043
1043
1044 def heads(self, start=None):
1044 def heads(self, start=None):
1045 heads = self.changelog.heads(start)
1045 heads = self.changelog.heads(start)
1046 # sort the output in rev descending order
1046 # sort the output in rev descending order
1047 heads = [(-self.changelog.rev(h), h) for h in heads]
1047 heads = [(-self.changelog.rev(h), h) for h in heads]
1048 heads.sort()
1048 heads.sort()
1049 return [n for (r, n) in heads]
1049 return [n for (r, n) in heads]
1050
1050
1051 def branches(self, nodes):
1051 def branches(self, nodes):
1052 if not nodes:
1052 if not nodes:
1053 nodes = [self.changelog.tip()]
1053 nodes = [self.changelog.tip()]
1054 b = []
1054 b = []
1055 for n in nodes:
1055 for n in nodes:
1056 t = n
1056 t = n
1057 while 1:
1057 while 1:
1058 p = self.changelog.parents(n)
1058 p = self.changelog.parents(n)
1059 if p[1] != nullid or p[0] == nullid:
1059 if p[1] != nullid or p[0] == nullid:
1060 b.append((t, n, p[0], p[1]))
1060 b.append((t, n, p[0], p[1]))
1061 break
1061 break
1062 n = p[0]
1062 n = p[0]
1063 return b
1063 return b
1064
1064
1065 def between(self, pairs):
1065 def between(self, pairs):
1066 r = []
1066 r = []
1067
1067
1068 for top, bottom in pairs:
1068 for top, bottom in pairs:
1069 n, l, i = top, [], 0
1069 n, l, i = top, [], 0
1070 f = 1
1070 f = 1
1071
1071
1072 while n != bottom:
1072 while n != bottom:
1073 p = self.changelog.parents(n)[0]
1073 p = self.changelog.parents(n)[0]
1074 if i == f:
1074 if i == f:
1075 l.append(n)
1075 l.append(n)
1076 f = f * 2
1076 f = f * 2
1077 n = p
1077 n = p
1078 i += 1
1078 i += 1
1079
1079
1080 r.append(l)
1080 r.append(l)
1081
1081
1082 return r
1082 return r
1083
1083
1084 def findincoming(self, remote, base=None, heads=None, force=False):
1084 def findincoming(self, remote, base=None, heads=None, force=False):
1085 """Return list of roots of the subsets of missing nodes from remote
1085 """Return list of roots of the subsets of missing nodes from remote
1086
1086
1087 If base dict is specified, assume that these nodes and their parents
1087 If base dict is specified, assume that these nodes and their parents
1088 exist on the remote side and that no child of a node of base exists
1088 exist on the remote side and that no child of a node of base exists
1089 in both remote and self.
1089 in both remote and self.
1090 Furthermore base will be updated to include the nodes that exists
1090 Furthermore base will be updated to include the nodes that exists
1091 in self and remote but no children exists in self and remote.
1091 in self and remote but no children exists in self and remote.
1092 If a list of heads is specified, return only nodes which are heads
1092 If a list of heads is specified, return only nodes which are heads
1093 or ancestors of these heads.
1093 or ancestors of these heads.
1094
1094
1095 All the ancestors of base are in self and in remote.
1095 All the ancestors of base are in self and in remote.
1096 All the descendants of the list returned are missing in self.
1096 All the descendants of the list returned are missing in self.
1097 (and so we know that the rest of the nodes are missing in remote, see
1097 (and so we know that the rest of the nodes are missing in remote, see
1098 outgoing)
1098 outgoing)
1099 """
1099 """
1100 m = self.changelog.nodemap
1100 m = self.changelog.nodemap
1101 search = []
1101 search = []
1102 fetch = {}
1102 fetch = {}
1103 seen = {}
1103 seen = {}
1104 seenbranch = {}
1104 seenbranch = {}
1105 if base == None:
1105 if base == None:
1106 base = {}
1106 base = {}
1107
1107
1108 if not heads:
1108 if not heads:
1109 heads = remote.heads()
1109 heads = remote.heads()
1110
1110
1111 if self.changelog.tip() == nullid:
1111 if self.changelog.tip() == nullid:
1112 base[nullid] = 1
1112 base[nullid] = 1
1113 if heads != [nullid]:
1113 if heads != [nullid]:
1114 return [nullid]
1114 return [nullid]
1115 return []
1115 return []
1116
1116
1117 # assume we're closer to the tip than the root
1117 # assume we're closer to the tip than the root
1118 # and start by examining the heads
1118 # and start by examining the heads
1119 self.ui.status(_("searching for changes\n"))
1119 self.ui.status(_("searching for changes\n"))
1120
1120
1121 unknown = []
1121 unknown = []
1122 for h in heads:
1122 for h in heads:
1123 if h not in m:
1123 if h not in m:
1124 unknown.append(h)
1124 unknown.append(h)
1125 else:
1125 else:
1126 base[h] = 1
1126 base[h] = 1
1127
1127
1128 if not unknown:
1128 if not unknown:
1129 return []
1129 return []
1130
1130
1131 req = dict.fromkeys(unknown)
1131 req = dict.fromkeys(unknown)
1132 reqcnt = 0
1132 reqcnt = 0
1133
1133
1134 # search through remote branches
1134 # search through remote branches
1135 # a 'branch' here is a linear segment of history, with four parts:
1135 # a 'branch' here is a linear segment of history, with four parts:
1136 # head, root, first parent, second parent
1136 # head, root, first parent, second parent
1137 # (a branch always has two parents (or none) by definition)
1137 # (a branch always has two parents (or none) by definition)
1138 unknown = remote.branches(unknown)
1138 unknown = remote.branches(unknown)
1139 while unknown:
1139 while unknown:
1140 r = []
1140 r = []
1141 while unknown:
1141 while unknown:
1142 n = unknown.pop(0)
1142 n = unknown.pop(0)
1143 if n[0] in seen:
1143 if n[0] in seen:
1144 continue
1144 continue
1145
1145
1146 self.ui.debug(_("examining %s:%s\n")
1146 self.ui.debug(_("examining %s:%s\n")
1147 % (short(n[0]), short(n[1])))
1147 % (short(n[0]), short(n[1])))
1148 if n[0] == nullid: # found the end of the branch
1148 if n[0] == nullid: # found the end of the branch
1149 pass
1149 pass
1150 elif n in seenbranch:
1150 elif n in seenbranch:
1151 self.ui.debug(_("branch already found\n"))
1151 self.ui.debug(_("branch already found\n"))
1152 continue
1152 continue
1153 elif n[1] and n[1] in m: # do we know the base?
1153 elif n[1] and n[1] in m: # do we know the base?
1154 self.ui.debug(_("found incomplete branch %s:%s\n")
1154 self.ui.debug(_("found incomplete branch %s:%s\n")
1155 % (short(n[0]), short(n[1])))
1155 % (short(n[0]), short(n[1])))
1156 search.append(n) # schedule branch range for scanning
1156 search.append(n) # schedule branch range for scanning
1157 seenbranch[n] = 1
1157 seenbranch[n] = 1
1158 else:
1158 else:
1159 if n[1] not in seen and n[1] not in fetch:
1159 if n[1] not in seen and n[1] not in fetch:
1160 if n[2] in m and n[3] in m:
1160 if n[2] in m and n[3] in m:
1161 self.ui.debug(_("found new changeset %s\n") %
1161 self.ui.debug(_("found new changeset %s\n") %
1162 short(n[1]))
1162 short(n[1]))
1163 fetch[n[1]] = 1 # earliest unknown
1163 fetch[n[1]] = 1 # earliest unknown
1164 for p in n[2:4]:
1164 for p in n[2:4]:
1165 if p in m:
1165 if p in m:
1166 base[p] = 1 # latest known
1166 base[p] = 1 # latest known
1167
1167
1168 for p in n[2:4]:
1168 for p in n[2:4]:
1169 if p not in req and p not in m:
1169 if p not in req and p not in m:
1170 r.append(p)
1170 r.append(p)
1171 req[p] = 1
1171 req[p] = 1
1172 seen[n[0]] = 1
1172 seen[n[0]] = 1
1173
1173
1174 if r:
1174 if r:
1175 reqcnt += 1
1175 reqcnt += 1
1176 self.ui.debug(_("request %d: %s\n") %
1176 self.ui.debug(_("request %d: %s\n") %
1177 (reqcnt, " ".join(map(short, r))))
1177 (reqcnt, " ".join(map(short, r))))
1178 for p in xrange(0, len(r), 10):
1178 for p in xrange(0, len(r), 10):
1179 for b in remote.branches(r[p:p+10]):
1179 for b in remote.branches(r[p:p+10]):
1180 self.ui.debug(_("received %s:%s\n") %
1180 self.ui.debug(_("received %s:%s\n") %
1181 (short(b[0]), short(b[1])))
1181 (short(b[0]), short(b[1])))
1182 unknown.append(b)
1182 unknown.append(b)
1183
1183
1184 # do binary search on the branches we found
1184 # do binary search on the branches we found
1185 while search:
1185 while search:
1186 n = search.pop(0)
1186 n = search.pop(0)
1187 reqcnt += 1
1187 reqcnt += 1
1188 l = remote.between([(n[0], n[1])])[0]
1188 l = remote.between([(n[0], n[1])])[0]
1189 l.append(n[1])
1189 l.append(n[1])
1190 p = n[0]
1190 p = n[0]
1191 f = 1
1191 f = 1
1192 for i in l:
1192 for i in l:
1193 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1193 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1194 if i in m:
1194 if i in m:
1195 if f <= 2:
1195 if f <= 2:
1196 self.ui.debug(_("found new branch changeset %s\n") %
1196 self.ui.debug(_("found new branch changeset %s\n") %
1197 short(p))
1197 short(p))
1198 fetch[p] = 1
1198 fetch[p] = 1
1199 base[i] = 1
1199 base[i] = 1
1200 else:
1200 else:
1201 self.ui.debug(_("narrowed branch search to %s:%s\n")
1201 self.ui.debug(_("narrowed branch search to %s:%s\n")
1202 % (short(p), short(i)))
1202 % (short(p), short(i)))
1203 search.append((p, i))
1203 search.append((p, i))
1204 break
1204 break
1205 p, f = i, f * 2
1205 p, f = i, f * 2
1206
1206
1207 # sanity check our fetch list
1207 # sanity check our fetch list
1208 for f in fetch.keys():
1208 for f in fetch.keys():
1209 if f in m:
1209 if f in m:
1210 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1210 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1211
1211
1212 if base.keys() == [nullid]:
1212 if base.keys() == [nullid]:
1213 if force:
1213 if force:
1214 self.ui.warn(_("warning: repository is unrelated\n"))
1214 self.ui.warn(_("warning: repository is unrelated\n"))
1215 else:
1215 else:
1216 raise util.Abort(_("repository is unrelated"))
1216 raise util.Abort(_("repository is unrelated"))
1217
1217
1218 self.ui.debug(_("found new changesets starting at ") +
1218 self.ui.debug(_("found new changesets starting at ") +
1219 " ".join([short(f) for f in fetch]) + "\n")
1219 " ".join([short(f) for f in fetch]) + "\n")
1220
1220
1221 self.ui.debug(_("%d total queries\n") % reqcnt)
1221 self.ui.debug(_("%d total queries\n") % reqcnt)
1222
1222
1223 return fetch.keys()
1223 return fetch.keys()
1224
1224
1225 def findoutgoing(self, remote, base=None, heads=None, force=False):
1225 def findoutgoing(self, remote, base=None, heads=None, force=False):
1226 """Return list of nodes that are roots of subsets not in remote
1226 """Return list of nodes that are roots of subsets not in remote
1227
1227
1228 If base dict is specified, assume that these nodes and their parents
1228 If base dict is specified, assume that these nodes and their parents
1229 exist on the remote side.
1229 exist on the remote side.
1230 If a list of heads is specified, return only nodes which are heads
1230 If a list of heads is specified, return only nodes which are heads
1231 or ancestors of these heads, and return a second element which
1231 or ancestors of these heads, and return a second element which
1232 contains all remote heads which get new children.
1232 contains all remote heads which get new children.
1233 """
1233 """
1234 if base == None:
1234 if base == None:
1235 base = {}
1235 base = {}
1236 self.findincoming(remote, base, heads, force=force)
1236 self.findincoming(remote, base, heads, force=force)
1237
1237
1238 self.ui.debug(_("common changesets up to ")
1238 self.ui.debug(_("common changesets up to ")
1239 + " ".join(map(short, base.keys())) + "\n")
1239 + " ".join(map(short, base.keys())) + "\n")
1240
1240
1241 remain = dict.fromkeys(self.changelog.nodemap)
1241 remain = dict.fromkeys(self.changelog.nodemap)
1242
1242
1243 # prune everything remote has from the tree
1243 # prune everything remote has from the tree
1244 del remain[nullid]
1244 del remain[nullid]
1245 remove = base.keys()
1245 remove = base.keys()
1246 while remove:
1246 while remove:
1247 n = remove.pop(0)
1247 n = remove.pop(0)
1248 if n in remain:
1248 if n in remain:
1249 del remain[n]
1249 del remain[n]
1250 for p in self.changelog.parents(n):
1250 for p in self.changelog.parents(n):
1251 remove.append(p)
1251 remove.append(p)
1252
1252
1253 # find every node whose parents have been pruned
1253 # find every node whose parents have been pruned
1254 subset = []
1254 subset = []
1255 # find every remote head that will get new children
1255 # find every remote head that will get new children
1256 updated_heads = {}
1256 updated_heads = {}
1257 for n in remain:
1257 for n in remain:
1258 p1, p2 = self.changelog.parents(n)
1258 p1, p2 = self.changelog.parents(n)
1259 if p1 not in remain and p2 not in remain:
1259 if p1 not in remain and p2 not in remain:
1260 subset.append(n)
1260 subset.append(n)
1261 if heads:
1261 if heads:
1262 if p1 in heads:
1262 if p1 in heads:
1263 updated_heads[p1] = True
1263 updated_heads[p1] = True
1264 if p2 in heads:
1264 if p2 in heads:
1265 updated_heads[p2] = True
1265 updated_heads[p2] = True
1266
1266
1267 # this is the set of all roots we have to push
1267 # this is the set of all roots we have to push
1268 if heads:
1268 if heads:
1269 return subset, updated_heads.keys()
1269 return subset, updated_heads.keys()
1270 else:
1270 else:
1271 return subset
1271 return subset
1272
1272
1273 def pull(self, remote, heads=None, force=False, lock=None):
1273 def pull(self, remote, heads=None, force=False, lock=None):
1274 mylock = False
1274 mylock = False
1275 if not lock:
1275 if not lock:
1276 lock = self.lock()
1276 lock = self.lock()
1277 mylock = True
1277 mylock = True
1278
1278
1279 try:
1279 try:
1280 fetch = self.findincoming(remote, force=force)
1280 fetch = self.findincoming(remote, force=force)
1281 if fetch == [nullid]:
1281 if fetch == [nullid]:
1282 self.ui.status(_("requesting all changes\n"))
1282 self.ui.status(_("requesting all changes\n"))
1283
1283
1284 if not fetch:
1284 if not fetch:
1285 self.ui.status(_("no changes found\n"))
1285 self.ui.status(_("no changes found\n"))
1286 return 0
1286 return 0
1287
1287
1288 if heads is None:
1288 if heads is None:
1289 cg = remote.changegroup(fetch, 'pull')
1289 cg = remote.changegroup(fetch, 'pull')
1290 else:
1290 else:
1291 if 'changegroupsubset' not in remote.capabilities:
1291 if 'changegroupsubset' not in remote.capabilities:
1292 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1292 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1293 cg = remote.changegroupsubset(fetch, heads, 'pull')
1293 cg = remote.changegroupsubset(fetch, heads, 'pull')
1294 return self.addchangegroup(cg, 'pull', remote.url())
1294 return self.addchangegroup(cg, 'pull', remote.url())
1295 finally:
1295 finally:
1296 if mylock:
1296 if mylock:
1297 lock.release()
1297 lock.release()
1298
1298
1299 def push(self, remote, force=False, revs=None):
1299 def push(self, remote, force=False, revs=None):
1300 # there are two ways to push to remote repo:
1300 # there are two ways to push to remote repo:
1301 #
1301 #
1302 # addchangegroup assumes local user can lock remote
1302 # addchangegroup assumes local user can lock remote
1303 # repo (local filesystem, old ssh servers).
1303 # repo (local filesystem, old ssh servers).
1304 #
1304 #
1305 # unbundle assumes local user cannot lock remote repo (new ssh
1305 # unbundle assumes local user cannot lock remote repo (new ssh
1306 # servers, http servers).
1306 # servers, http servers).
1307
1307
1308 if remote.capable('unbundle'):
1308 if remote.capable('unbundle'):
1309 return self.push_unbundle(remote, force, revs)
1309 return self.push_unbundle(remote, force, revs)
1310 return self.push_addchangegroup(remote, force, revs)
1310 return self.push_addchangegroup(remote, force, revs)
1311
1311
1312 def prepush(self, remote, force, revs):
1312 def prepush(self, remote, force, revs):
1313 base = {}
1313 base = {}
1314 remote_heads = remote.heads()
1314 remote_heads = remote.heads()
1315 inc = self.findincoming(remote, base, remote_heads, force=force)
1315 inc = self.findincoming(remote, base, remote_heads, force=force)
1316
1316
1317 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1317 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1318 if revs is not None:
1318 if revs is not None:
1319 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1319 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1320 else:
1320 else:
1321 bases, heads = update, self.changelog.heads()
1321 bases, heads = update, self.changelog.heads()
1322
1322
1323 if not bases:
1323 if not bases:
1324 self.ui.status(_("no changes found\n"))
1324 self.ui.status(_("no changes found\n"))
1325 return None, 1
1325 return None, 1
1326 elif not force:
1326 elif not force:
1327 # check if we're creating new remote heads
1327 # check if we're creating new remote heads
1328 # to be a remote head after push, node must be either
1328 # to be a remote head after push, node must be either
1329 # - unknown locally
1329 # - unknown locally
1330 # - a local outgoing head descended from update
1330 # - a local outgoing head descended from update
1331 # - a remote head that's known locally and not
1331 # - a remote head that's known locally and not
1332 # ancestral to an outgoing head
1332 # ancestral to an outgoing head
1333
1333
1334 warn = 0
1334 warn = 0
1335
1335
1336 if remote_heads == [nullid]:
1336 if remote_heads == [nullid]:
1337 warn = 0
1337 warn = 0
1338 elif not revs and len(heads) > len(remote_heads):
1338 elif not revs and len(heads) > len(remote_heads):
1339 warn = 1
1339 warn = 1
1340 else:
1340 else:
1341 newheads = list(heads)
1341 newheads = list(heads)
1342 for r in remote_heads:
1342 for r in remote_heads:
1343 if r in self.changelog.nodemap:
1343 if r in self.changelog.nodemap:
1344 desc = self.changelog.heads(r, heads)
1344 desc = self.changelog.heads(r, heads)
1345 l = [h for h in heads if h in desc]
1345 l = [h for h in heads if h in desc]
1346 if not l:
1346 if not l:
1347 newheads.append(r)
1347 newheads.append(r)
1348 else:
1348 else:
1349 newheads.append(r)
1349 newheads.append(r)
1350 if len(newheads) > len(remote_heads):
1350 if len(newheads) > len(remote_heads):
1351 warn = 1
1351 warn = 1
1352
1352
1353 if warn:
1353 if warn:
1354 self.ui.warn(_("abort: push creates new remote branches!\n"))
1354 self.ui.warn(_("abort: push creates new remote branches!\n"))
1355 self.ui.status(_("(did you forget to merge?"
1355 self.ui.status(_("(did you forget to merge?"
1356 " use push -f to force)\n"))
1356 " use push -f to force)\n"))
1357 return None, 1
1357 return None, 1
1358 elif inc:
1358 elif inc:
1359 self.ui.warn(_("note: unsynced remote changes!\n"))
1359 self.ui.warn(_("note: unsynced remote changes!\n"))
1360
1360
1361
1361
1362 if revs is None:
1362 if revs is None:
1363 cg = self.changegroup(update, 'push')
1363 cg = self.changegroup(update, 'push')
1364 else:
1364 else:
1365 cg = self.changegroupsubset(update, revs, 'push')
1365 cg = self.changegroupsubset(update, revs, 'push')
1366 return cg, remote_heads
1366 return cg, remote_heads
1367
1367
1368 def push_addchangegroup(self, remote, force, revs):
1368 def push_addchangegroup(self, remote, force, revs):
1369 lock = remote.lock()
1369 lock = remote.lock()
1370
1370
1371 ret = self.prepush(remote, force, revs)
1371 ret = self.prepush(remote, force, revs)
1372 if ret[0] is not None:
1372 if ret[0] is not None:
1373 cg, remote_heads = ret
1373 cg, remote_heads = ret
1374 return remote.addchangegroup(cg, 'push', self.url())
1374 return remote.addchangegroup(cg, 'push', self.url())
1375 return ret[1]
1375 return ret[1]
1376
1376
1377 def push_unbundle(self, remote, force, revs):
1377 def push_unbundle(self, remote, force, revs):
1378 # local repo finds heads on server, finds out what revs it
1378 # local repo finds heads on server, finds out what revs it
1379 # must push. once revs transferred, if server finds it has
1379 # must push. once revs transferred, if server finds it has
1380 # different heads (someone else won commit/push race), server
1380 # different heads (someone else won commit/push race), server
1381 # aborts.
1381 # aborts.
1382
1382
1383 ret = self.prepush(remote, force, revs)
1383 ret = self.prepush(remote, force, revs)
1384 if ret[0] is not None:
1384 if ret[0] is not None:
1385 cg, remote_heads = ret
1385 cg, remote_heads = ret
1386 if force: remote_heads = ['force']
1386 if force: remote_heads = ['force']
1387 return remote.unbundle(cg, remote_heads, 'push')
1387 return remote.unbundle(cg, remote_heads, 'push')
1388 return ret[1]
1388 return ret[1]
1389
1389
1390 def changegroupinfo(self, nodes):
1390 def changegroupinfo(self, nodes):
1391 self.ui.note(_("%d changesets found\n") % len(nodes))
1391 self.ui.note(_("%d changesets found\n") % len(nodes))
1392 if self.ui.debugflag:
1392 if self.ui.debugflag:
1393 self.ui.debug(_("List of changesets:\n"))
1393 self.ui.debug(_("List of changesets:\n"))
1394 for node in nodes:
1394 for node in nodes:
1395 self.ui.debug("%s\n" % hex(node))
1395 self.ui.debug("%s\n" % hex(node))
1396
1396
1397 def changegroupsubset(self, bases, heads, source):
1397 def changegroupsubset(self, bases, heads, source):
1398 """This function generates a changegroup consisting of all the nodes
1398 """This function generates a changegroup consisting of all the nodes
1399 that are descendents of any of the bases, and ancestors of any of
1399 that are descendents of any of the bases, and ancestors of any of
1400 the heads.
1400 the heads.
1401
1401
1402 It is fairly complex as determining which filenodes and which
1402 It is fairly complex as determining which filenodes and which
1403 manifest nodes need to be included for the changeset to be complete
1403 manifest nodes need to be included for the changeset to be complete
1404 is non-trivial.
1404 is non-trivial.
1405
1405
1406 Another wrinkle is doing the reverse, figuring out which changeset in
1406 Another wrinkle is doing the reverse, figuring out which changeset in
1407 the changegroup a particular filenode or manifestnode belongs to."""
1407 the changegroup a particular filenode or manifestnode belongs to."""
1408
1408
1409 self.hook('preoutgoing', throw=True, source=source)
1409 self.hook('preoutgoing', throw=True, source=source)
1410
1410
1411 # Set up some initial variables
1411 # Set up some initial variables
1412 # Make it easy to refer to self.changelog
1412 # Make it easy to refer to self.changelog
1413 cl = self.changelog
1413 cl = self.changelog
1414 # msng is short for missing - compute the list of changesets in this
1414 # msng is short for missing - compute the list of changesets in this
1415 # changegroup.
1415 # changegroup.
1416 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1416 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1417 self.changegroupinfo(msng_cl_lst)
1417 self.changegroupinfo(msng_cl_lst)
1418 # Some bases may turn out to be superfluous, and some heads may be
1418 # Some bases may turn out to be superfluous, and some heads may be
1419 # too. nodesbetween will return the minimal set of bases and heads
1419 # too. nodesbetween will return the minimal set of bases and heads
1420 # necessary to re-create the changegroup.
1420 # necessary to re-create the changegroup.
1421
1421
1422 # Known heads are the list of heads that it is assumed the recipient
1422 # Known heads are the list of heads that it is assumed the recipient
1423 # of this changegroup will know about.
1423 # of this changegroup will know about.
1424 knownheads = {}
1424 knownheads = {}
1425 # We assume that all parents of bases are known heads.
1425 # We assume that all parents of bases are known heads.
1426 for n in bases:
1426 for n in bases:
1427 for p in cl.parents(n):
1427 for p in cl.parents(n):
1428 if p != nullid:
1428 if p != nullid:
1429 knownheads[p] = 1
1429 knownheads[p] = 1
1430 knownheads = knownheads.keys()
1430 knownheads = knownheads.keys()
1431 if knownheads:
1431 if knownheads:
1432 # Now that we know what heads are known, we can compute which
1432 # Now that we know what heads are known, we can compute which
1433 # changesets are known. The recipient must know about all
1433 # changesets are known. The recipient must know about all
1434 # changesets required to reach the known heads from the null
1434 # changesets required to reach the known heads from the null
1435 # changeset.
1435 # changeset.
1436 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1436 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1437 junk = None
1437 junk = None
1438 # Transform the list into an ersatz set.
1438 # Transform the list into an ersatz set.
1439 has_cl_set = dict.fromkeys(has_cl_set)
1439 has_cl_set = dict.fromkeys(has_cl_set)
1440 else:
1440 else:
1441 # If there were no known heads, the recipient cannot be assumed to
1441 # If there were no known heads, the recipient cannot be assumed to
1442 # know about any changesets.
1442 # know about any changesets.
1443 has_cl_set = {}
1443 has_cl_set = {}
1444
1444
1445 # Make it easy to refer to self.manifest
1445 # Make it easy to refer to self.manifest
1446 mnfst = self.manifest
1446 mnfst = self.manifest
1447 # We don't know which manifests are missing yet
1447 # We don't know which manifests are missing yet
1448 msng_mnfst_set = {}
1448 msng_mnfst_set = {}
1449 # Nor do we know which filenodes are missing.
1449 # Nor do we know which filenodes are missing.
1450 msng_filenode_set = {}
1450 msng_filenode_set = {}
1451
1451
1452 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1452 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1453 junk = None
1453 junk = None
1454
1454
1455 # A changeset always belongs to itself, so the changenode lookup
1455 # A changeset always belongs to itself, so the changenode lookup
1456 # function for a changenode is identity.
1456 # function for a changenode is identity.
1457 def identity(x):
1457 def identity(x):
1458 return x
1458 return x
1459
1459
1460 # A function generating function. Sets up an environment for the
1460 # A function generating function. Sets up an environment for the
1461 # inner function.
1461 # inner function.
1462 def cmp_by_rev_func(revlog):
1462 def cmp_by_rev_func(revlog):
1463 # Compare two nodes by their revision number in the environment's
1463 # Compare two nodes by their revision number in the environment's
1464 # revision history. Since the revision number both represents the
1464 # revision history. Since the revision number both represents the
1465 # most efficient order to read the nodes in, and represents a
1465 # most efficient order to read the nodes in, and represents a
1466 # topological sorting of the nodes, this function is often useful.
1466 # topological sorting of the nodes, this function is often useful.
1467 def cmp_by_rev(a, b):
1467 def cmp_by_rev(a, b):
1468 return cmp(revlog.rev(a), revlog.rev(b))
1468 return cmp(revlog.rev(a), revlog.rev(b))
1469 return cmp_by_rev
1469 return cmp_by_rev
1470
1470
1471 # If we determine that a particular file or manifest node must be a
1471 # If we determine that a particular file or manifest node must be a
1472 # node that the recipient of the changegroup will already have, we can
1472 # node that the recipient of the changegroup will already have, we can
1473 # also assume the recipient will have all the parents. This function
1473 # also assume the recipient will have all the parents. This function
1474 # prunes them from the set of missing nodes.
1474 # prunes them from the set of missing nodes.
1475 def prune_parents(revlog, hasset, msngset):
1475 def prune_parents(revlog, hasset, msngset):
1476 haslst = hasset.keys()
1476 haslst = hasset.keys()
1477 haslst.sort(cmp_by_rev_func(revlog))
1477 haslst.sort(cmp_by_rev_func(revlog))
1478 for node in haslst:
1478 for node in haslst:
1479 parentlst = [p for p in revlog.parents(node) if p != nullid]
1479 parentlst = [p for p in revlog.parents(node) if p != nullid]
1480 while parentlst:
1480 while parentlst:
1481 n = parentlst.pop()
1481 n = parentlst.pop()
1482 if n not in hasset:
1482 if n not in hasset:
1483 hasset[n] = 1
1483 hasset[n] = 1
1484 p = [p for p in revlog.parents(n) if p != nullid]
1484 p = [p for p in revlog.parents(n) if p != nullid]
1485 parentlst.extend(p)
1485 parentlst.extend(p)
1486 for n in hasset:
1486 for n in hasset:
1487 msngset.pop(n, None)
1487 msngset.pop(n, None)
1488
1488
1489 # This is a function generating function used to set up an environment
1489 # This is a function generating function used to set up an environment
1490 # for the inner function to execute in.
1490 # for the inner function to execute in.
1491 def manifest_and_file_collector(changedfileset):
1491 def manifest_and_file_collector(changedfileset):
1492 # This is an information gathering function that gathers
1492 # This is an information gathering function that gathers
1493 # information from each changeset node that goes out as part of
1493 # information from each changeset node that goes out as part of
1494 # the changegroup. The information gathered is a list of which
1494 # the changegroup. The information gathered is a list of which
1495 # manifest nodes are potentially required (the recipient may
1495 # manifest nodes are potentially required (the recipient may
1496 # already have them) and total list of all files which were
1496 # already have them) and total list of all files which were
1497 # changed in any changeset in the changegroup.
1497 # changed in any changeset in the changegroup.
1498 #
1498 #
1499 # We also remember the first changenode we saw any manifest
1499 # We also remember the first changenode we saw any manifest
1500 # referenced by so we can later determine which changenode 'owns'
1500 # referenced by so we can later determine which changenode 'owns'
1501 # the manifest.
1501 # the manifest.
1502 def collect_manifests_and_files(clnode):
1502 def collect_manifests_and_files(clnode):
1503 c = cl.read(clnode)
1503 c = cl.read(clnode)
1504 for f in c[3]:
1504 for f in c[3]:
1505 # This is to make sure we only have one instance of each
1505 # This is to make sure we only have one instance of each
1506 # filename string for each filename.
1506 # filename string for each filename.
1507 changedfileset.setdefault(f, f)
1507 changedfileset.setdefault(f, f)
1508 msng_mnfst_set.setdefault(c[0], clnode)
1508 msng_mnfst_set.setdefault(c[0], clnode)
1509 return collect_manifests_and_files
1509 return collect_manifests_and_files
1510
1510
1511 # Figure out which manifest nodes (of the ones we think might be part
1511 # Figure out which manifest nodes (of the ones we think might be part
1512 # of the changegroup) the recipient must know about and remove them
1512 # of the changegroup) the recipient must know about and remove them
1513 # from the changegroup.
1513 # from the changegroup.
1514 def prune_manifests():
1514 def prune_manifests():
1515 has_mnfst_set = {}
1515 has_mnfst_set = {}
1516 for n in msng_mnfst_set:
1516 for n in msng_mnfst_set:
1517 # If a 'missing' manifest thinks it belongs to a changenode
1517 # If a 'missing' manifest thinks it belongs to a changenode
1518 # the recipient is assumed to have, obviously the recipient
1518 # the recipient is assumed to have, obviously the recipient
1519 # must have that manifest.
1519 # must have that manifest.
1520 linknode = cl.node(mnfst.linkrev(n))
1520 linknode = cl.node(mnfst.linkrev(n))
1521 if linknode in has_cl_set:
1521 if linknode in has_cl_set:
1522 has_mnfst_set[n] = 1
1522 has_mnfst_set[n] = 1
1523 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1523 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1524
1524
1525 # Use the information collected in collect_manifests_and_files to say
1525 # Use the information collected in collect_manifests_and_files to say
1526 # which changenode any manifestnode belongs to.
1526 # which changenode any manifestnode belongs to.
1527 def lookup_manifest_link(mnfstnode):
1527 def lookup_manifest_link(mnfstnode):
1528 return msng_mnfst_set[mnfstnode]
1528 return msng_mnfst_set[mnfstnode]
1529
1529
1530 # A function generating function that sets up the initial environment
1530 # A function generating function that sets up the initial environment
1531 # the inner function.
1531 # the inner function.
1532 def filenode_collector(changedfiles):
1532 def filenode_collector(changedfiles):
1533 next_rev = [0]
1533 next_rev = [0]
1534 # This gathers information from each manifestnode included in the
1534 # This gathers information from each manifestnode included in the
1535 # changegroup about which filenodes the manifest node references
1535 # changegroup about which filenodes the manifest node references
1536 # so we can include those in the changegroup too.
1536 # so we can include those in the changegroup too.
1537 #
1537 #
1538 # It also remembers which changenode each filenode belongs to. It
1538 # It also remembers which changenode each filenode belongs to. It
1539 # does this by assuming the a filenode belongs to the changenode
1539 # does this by assuming the a filenode belongs to the changenode
1540 # the first manifest that references it belongs to.
1540 # the first manifest that references it belongs to.
1541 def collect_msng_filenodes(mnfstnode):
1541 def collect_msng_filenodes(mnfstnode):
1542 r = mnfst.rev(mnfstnode)
1542 r = mnfst.rev(mnfstnode)
1543 if r == next_rev[0]:
1543 if r == next_rev[0]:
1544 # If the last rev we looked at was the one just previous,
1544 # If the last rev we looked at was the one just previous,
1545 # we only need to see a diff.
1545 # we only need to see a diff.
1546 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1546 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1547 # For each line in the delta
1547 # For each line in the delta
1548 for dline in delta.splitlines():
1548 for dline in delta.splitlines():
1549 # get the filename and filenode for that line
1549 # get the filename and filenode for that line
1550 f, fnode = dline.split('\0')
1550 f, fnode = dline.split('\0')
1551 fnode = bin(fnode[:40])
1551 fnode = bin(fnode[:40])
1552 f = changedfiles.get(f, None)
1552 f = changedfiles.get(f, None)
1553 # And if the file is in the list of files we care
1553 # And if the file is in the list of files we care
1554 # about.
1554 # about.
1555 if f is not None:
1555 if f is not None:
1556 # Get the changenode this manifest belongs to
1556 # Get the changenode this manifest belongs to
1557 clnode = msng_mnfst_set[mnfstnode]
1557 clnode = msng_mnfst_set[mnfstnode]
1558 # Create the set of filenodes for the file if
1558 # Create the set of filenodes for the file if
1559 # there isn't one already.
1559 # there isn't one already.
1560 ndset = msng_filenode_set.setdefault(f, {})
1560 ndset = msng_filenode_set.setdefault(f, {})
1561 # And set the filenode's changelog node to the
1561 # And set the filenode's changelog node to the
1562 # manifest's if it hasn't been set already.
1562 # manifest's if it hasn't been set already.
1563 ndset.setdefault(fnode, clnode)
1563 ndset.setdefault(fnode, clnode)
1564 else:
1564 else:
1565 # Otherwise we need a full manifest.
1565 # Otherwise we need a full manifest.
1566 m = mnfst.read(mnfstnode)
1566 m = mnfst.read(mnfstnode)
1567 # For every file in we care about.
1567 # For every file in we care about.
1568 for f in changedfiles:
1568 for f in changedfiles:
1569 fnode = m.get(f, None)
1569 fnode = m.get(f, None)
1570 # If it's in the manifest
1570 # If it's in the manifest
1571 if fnode is not None:
1571 if fnode is not None:
1572 # See comments above.
1572 # See comments above.
1573 clnode = msng_mnfst_set[mnfstnode]
1573 clnode = msng_mnfst_set[mnfstnode]
1574 ndset = msng_filenode_set.setdefault(f, {})
1574 ndset = msng_filenode_set.setdefault(f, {})
1575 ndset.setdefault(fnode, clnode)
1575 ndset.setdefault(fnode, clnode)
1576 # Remember the revision we hope to see next.
1576 # Remember the revision we hope to see next.
1577 next_rev[0] = r + 1
1577 next_rev[0] = r + 1
1578 return collect_msng_filenodes
1578 return collect_msng_filenodes
1579
1579
1580 # We have a list of filenodes we think we need for a file, lets remove
1580 # We have a list of filenodes we think we need for a file, lets remove
1581 # all those we now the recipient must have.
1581 # all those we now the recipient must have.
1582 def prune_filenodes(f, filerevlog):
1582 def prune_filenodes(f, filerevlog):
1583 msngset = msng_filenode_set[f]
1583 msngset = msng_filenode_set[f]
1584 hasset = {}
1584 hasset = {}
1585 # If a 'missing' filenode thinks it belongs to a changenode we
1585 # If a 'missing' filenode thinks it belongs to a changenode we
1586 # assume the recipient must have, then the recipient must have
1586 # assume the recipient must have, then the recipient must have
1587 # that filenode.
1587 # that filenode.
1588 for n in msngset:
1588 for n in msngset:
1589 clnode = cl.node(filerevlog.linkrev(n))
1589 clnode = cl.node(filerevlog.linkrev(n))
1590 if clnode in has_cl_set:
1590 if clnode in has_cl_set:
1591 hasset[n] = 1
1591 hasset[n] = 1
1592 prune_parents(filerevlog, hasset, msngset)
1592 prune_parents(filerevlog, hasset, msngset)
1593
1593
1594 # A function generator function that sets up the a context for the
1594 # A function generator function that sets up the a context for the
1595 # inner function.
1595 # inner function.
1596 def lookup_filenode_link_func(fname):
1596 def lookup_filenode_link_func(fname):
1597 msngset = msng_filenode_set[fname]
1597 msngset = msng_filenode_set[fname]
1598 # Lookup the changenode the filenode belongs to.
1598 # Lookup the changenode the filenode belongs to.
1599 def lookup_filenode_link(fnode):
1599 def lookup_filenode_link(fnode):
1600 return msngset[fnode]
1600 return msngset[fnode]
1601 return lookup_filenode_link
1601 return lookup_filenode_link
1602
1602
1603 # Now that we have all theses utility functions to help out and
1603 # Now that we have all theses utility functions to help out and
1604 # logically divide up the task, generate the group.
1604 # logically divide up the task, generate the group.
1605 def gengroup():
1605 def gengroup():
1606 # The set of changed files starts empty.
1606 # The set of changed files starts empty.
1607 changedfiles = {}
1607 changedfiles = {}
1608 # Create a changenode group generator that will call our functions
1608 # Create a changenode group generator that will call our functions
1609 # back to lookup the owning changenode and collect information.
1609 # back to lookup the owning changenode and collect information.
1610 group = cl.group(msng_cl_lst, identity,
1610 group = cl.group(msng_cl_lst, identity,
1611 manifest_and_file_collector(changedfiles))
1611 manifest_and_file_collector(changedfiles))
1612 for chnk in group:
1612 for chnk in group:
1613 yield chnk
1613 yield chnk
1614
1614
1615 # The list of manifests has been collected by the generator
1615 # The list of manifests has been collected by the generator
1616 # calling our functions back.
1616 # calling our functions back.
1617 prune_manifests()
1617 prune_manifests()
1618 msng_mnfst_lst = msng_mnfst_set.keys()
1618 msng_mnfst_lst = msng_mnfst_set.keys()
1619 # Sort the manifestnodes by revision number.
1619 # Sort the manifestnodes by revision number.
1620 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1620 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1621 # Create a generator for the manifestnodes that calls our lookup
1621 # Create a generator for the manifestnodes that calls our lookup
1622 # and data collection functions back.
1622 # and data collection functions back.
1623 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1623 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1624 filenode_collector(changedfiles))
1624 filenode_collector(changedfiles))
1625 for chnk in group:
1625 for chnk in group:
1626 yield chnk
1626 yield chnk
1627
1627
1628 # These are no longer needed, dereference and toss the memory for
1628 # These are no longer needed, dereference and toss the memory for
1629 # them.
1629 # them.
1630 msng_mnfst_lst = None
1630 msng_mnfst_lst = None
1631 msng_mnfst_set.clear()
1631 msng_mnfst_set.clear()
1632
1632
1633 changedfiles = changedfiles.keys()
1633 changedfiles = changedfiles.keys()
1634 changedfiles.sort()
1634 changedfiles.sort()
1635 # Go through all our files in order sorted by name.
1635 # Go through all our files in order sorted by name.
1636 for fname in changedfiles:
1636 for fname in changedfiles:
1637 filerevlog = self.file(fname)
1637 filerevlog = self.file(fname)
1638 # Toss out the filenodes that the recipient isn't really
1638 # Toss out the filenodes that the recipient isn't really
1639 # missing.
1639 # missing.
1640 if msng_filenode_set.has_key(fname):
1640 if msng_filenode_set.has_key(fname):
1641 prune_filenodes(fname, filerevlog)
1641 prune_filenodes(fname, filerevlog)
1642 msng_filenode_lst = msng_filenode_set[fname].keys()
1642 msng_filenode_lst = msng_filenode_set[fname].keys()
1643 else:
1643 else:
1644 msng_filenode_lst = []
1644 msng_filenode_lst = []
1645 # If any filenodes are left, generate the group for them,
1645 # If any filenodes are left, generate the group for them,
1646 # otherwise don't bother.
1646 # otherwise don't bother.
1647 if len(msng_filenode_lst) > 0:
1647 if len(msng_filenode_lst) > 0:
1648 yield changegroup.genchunk(fname)
1648 yield changegroup.genchunk(fname)
1649 # Sort the filenodes by their revision #
1649 # Sort the filenodes by their revision #
1650 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1650 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1651 # Create a group generator and only pass in a changenode
1651 # Create a group generator and only pass in a changenode
1652 # lookup function as we need to collect no information
1652 # lookup function as we need to collect no information
1653 # from filenodes.
1653 # from filenodes.
1654 group = filerevlog.group(msng_filenode_lst,
1654 group = filerevlog.group(msng_filenode_lst,
1655 lookup_filenode_link_func(fname))
1655 lookup_filenode_link_func(fname))
1656 for chnk in group:
1656 for chnk in group:
1657 yield chnk
1657 yield chnk
1658 if msng_filenode_set.has_key(fname):
1658 if msng_filenode_set.has_key(fname):
1659 # Don't need this anymore, toss it to free memory.
1659 # Don't need this anymore, toss it to free memory.
1660 del msng_filenode_set[fname]
1660 del msng_filenode_set[fname]
1661 # Signal that no more groups are left.
1661 # Signal that no more groups are left.
1662 yield changegroup.closechunk()
1662 yield changegroup.closechunk()
1663
1663
1664 if msng_cl_lst:
1664 if msng_cl_lst:
1665 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1665 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1666
1666
1667 return util.chunkbuffer(gengroup())
1667 return util.chunkbuffer(gengroup())
1668
1668
1669 def changegroup(self, basenodes, source):
1669 def changegroup(self, basenodes, source):
1670 """Generate a changegroup of all nodes that we have that a recipient
1670 """Generate a changegroup of all nodes that we have that a recipient
1671 doesn't.
1671 doesn't.
1672
1672
1673 This is much easier than the previous function as we can assume that
1673 This is much easier than the previous function as we can assume that
1674 the recipient has any changenode we aren't sending them."""
1674 the recipient has any changenode we aren't sending them."""
1675
1675
1676 self.hook('preoutgoing', throw=True, source=source)
1676 self.hook('preoutgoing', throw=True, source=source)
1677
1677
1678 cl = self.changelog
1678 cl = self.changelog
1679 nodes = cl.nodesbetween(basenodes, None)[0]
1679 nodes = cl.nodesbetween(basenodes, None)[0]
1680 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1680 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1681 self.changegroupinfo(nodes)
1681 self.changegroupinfo(nodes)
1682
1682
1683 def identity(x):
1683 def identity(x):
1684 return x
1684 return x
1685
1685
1686 def gennodelst(revlog):
1686 def gennodelst(revlog):
1687 for r in xrange(0, revlog.count()):
1687 for r in xrange(0, revlog.count()):
1688 n = revlog.node(r)
1688 n = revlog.node(r)
1689 if revlog.linkrev(n) in revset:
1689 if revlog.linkrev(n) in revset:
1690 yield n
1690 yield n
1691
1691
1692 def changed_file_collector(changedfileset):
1692 def changed_file_collector(changedfileset):
1693 def collect_changed_files(clnode):
1693 def collect_changed_files(clnode):
1694 c = cl.read(clnode)
1694 c = cl.read(clnode)
1695 for fname in c[3]:
1695 for fname in c[3]:
1696 changedfileset[fname] = 1
1696 changedfileset[fname] = 1
1697 return collect_changed_files
1697 return collect_changed_files
1698
1698
1699 def lookuprevlink_func(revlog):
1699 def lookuprevlink_func(revlog):
1700 def lookuprevlink(n):
1700 def lookuprevlink(n):
1701 return cl.node(revlog.linkrev(n))
1701 return cl.node(revlog.linkrev(n))
1702 return lookuprevlink
1702 return lookuprevlink
1703
1703
1704 def gengroup():
1704 def gengroup():
1705 # construct a list of all changed files
1705 # construct a list of all changed files
1706 changedfiles = {}
1706 changedfiles = {}
1707
1707
1708 for chnk in cl.group(nodes, identity,
1708 for chnk in cl.group(nodes, identity,
1709 changed_file_collector(changedfiles)):
1709 changed_file_collector(changedfiles)):
1710 yield chnk
1710 yield chnk
1711 changedfiles = changedfiles.keys()
1711 changedfiles = changedfiles.keys()
1712 changedfiles.sort()
1712 changedfiles.sort()
1713
1713
1714 mnfst = self.manifest
1714 mnfst = self.manifest
1715 nodeiter = gennodelst(mnfst)
1715 nodeiter = gennodelst(mnfst)
1716 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1716 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1717 yield chnk
1717 yield chnk
1718
1718
1719 for fname in changedfiles:
1719 for fname in changedfiles:
1720 filerevlog = self.file(fname)
1720 filerevlog = self.file(fname)
1721 nodeiter = gennodelst(filerevlog)
1721 nodeiter = gennodelst(filerevlog)
1722 nodeiter = list(nodeiter)
1722 nodeiter = list(nodeiter)
1723 if nodeiter:
1723 if nodeiter:
1724 yield changegroup.genchunk(fname)
1724 yield changegroup.genchunk(fname)
1725 lookup = lookuprevlink_func(filerevlog)
1725 lookup = lookuprevlink_func(filerevlog)
1726 for chnk in filerevlog.group(nodeiter, lookup):
1726 for chnk in filerevlog.group(nodeiter, lookup):
1727 yield chnk
1727 yield chnk
1728
1728
1729 yield changegroup.closechunk()
1729 yield changegroup.closechunk()
1730
1730
1731 if nodes:
1731 if nodes:
1732 self.hook('outgoing', node=hex(nodes[0]), source=source)
1732 self.hook('outgoing', node=hex(nodes[0]), source=source)
1733
1733
1734 return util.chunkbuffer(gengroup())
1734 return util.chunkbuffer(gengroup())
1735
1735
1736 def addchangegroup(self, source, srctype, url):
1736 def addchangegroup(self, source, srctype, url):
1737 """add changegroup to repo.
1737 """add changegroup to repo.
1738
1738
1739 return values:
1739 return values:
1740 - nothing changed or no source: 0
1740 - nothing changed or no source: 0
1741 - more heads than before: 1+added heads (2..n)
1741 - more heads than before: 1+added heads (2..n)
1742 - less heads than before: -1-removed heads (-2..-n)
1742 - less heads than before: -1-removed heads (-2..-n)
1743 - number of heads stays the same: 1
1743 - number of heads stays the same: 1
1744 """
1744 """
1745 def csmap(x):
1745 def csmap(x):
1746 self.ui.debug(_("add changeset %s\n") % short(x))
1746 self.ui.debug(_("add changeset %s\n") % short(x))
1747 return cl.count()
1747 return cl.count()
1748
1748
1749 def revmap(x):
1749 def revmap(x):
1750 return cl.rev(x)
1750 return cl.rev(x)
1751
1751
1752 if not source:
1752 if not source:
1753 return 0
1753 return 0
1754
1754
1755 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1755 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1756
1756
1757 changesets = files = revisions = 0
1757 changesets = files = revisions = 0
1758
1758
1759 tr = self.transaction()
1759 tr = self.transaction()
1760
1760
1761 # write changelog data to temp files so concurrent readers will not see
1761 # write changelog data to temp files so concurrent readers will not see
1762 # inconsistent view
1762 # inconsistent view
1763 cl = None
1763 cl = None
1764 try:
1764 try:
1765 cl = appendfile.appendchangelog(self.sopener,
1765 cl = appendfile.appendchangelog(self.sopener,
1766 self.changelog.version)
1766 self.changelog.version)
1767
1767
1768 oldheads = len(cl.heads())
1768 oldheads = len(cl.heads())
1769
1769
1770 # pull off the changeset group
1770 # pull off the changeset group
1771 self.ui.status(_("adding changesets\n"))
1771 self.ui.status(_("adding changesets\n"))
1772 cor = cl.count() - 1
1772 cor = cl.count() - 1
1773 chunkiter = changegroup.chunkiter(source)
1773 chunkiter = changegroup.chunkiter(source)
1774 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1774 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1775 raise util.Abort(_("received changelog group is empty"))
1775 raise util.Abort(_("received changelog group is empty"))
1776 cnr = cl.count() - 1
1776 cnr = cl.count() - 1
1777 changesets = cnr - cor
1777 changesets = cnr - cor
1778
1778
1779 # pull off the manifest group
1779 # pull off the manifest group
1780 self.ui.status(_("adding manifests\n"))
1780 self.ui.status(_("adding manifests\n"))
1781 chunkiter = changegroup.chunkiter(source)
1781 chunkiter = changegroup.chunkiter(source)
1782 # no need to check for empty manifest group here:
1782 # no need to check for empty manifest group here:
1783 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1783 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1784 # no new manifest will be created and the manifest group will
1784 # no new manifest will be created and the manifest group will
1785 # be empty during the pull
1785 # be empty during the pull
1786 self.manifest.addgroup(chunkiter, revmap, tr)
1786 self.manifest.addgroup(chunkiter, revmap, tr)
1787
1787
1788 # process the files
1788 # process the files
1789 self.ui.status(_("adding file changes\n"))
1789 self.ui.status(_("adding file changes\n"))
1790 while 1:
1790 while 1:
1791 f = changegroup.getchunk(source)
1791 f = changegroup.getchunk(source)
1792 if not f:
1792 if not f:
1793 break
1793 break
1794 self.ui.debug(_("adding %s revisions\n") % f)
1794 self.ui.debug(_("adding %s revisions\n") % f)
1795 fl = self.file(f)
1795 fl = self.file(f)
1796 o = fl.count()
1796 o = fl.count()
1797 chunkiter = changegroup.chunkiter(source)
1797 chunkiter = changegroup.chunkiter(source)
1798 if fl.addgroup(chunkiter, revmap, tr) is None:
1798 if fl.addgroup(chunkiter, revmap, tr) is None:
1799 raise util.Abort(_("received file revlog group is empty"))
1799 raise util.Abort(_("received file revlog group is empty"))
1800 revisions += fl.count() - o
1800 revisions += fl.count() - o
1801 files += 1
1801 files += 1
1802
1802
1803 cl.writedata()
1803 cl.writedata()
1804 finally:
1804 finally:
1805 if cl:
1805 if cl:
1806 cl.cleanup()
1806 cl.cleanup()
1807
1807
1808 # make changelog see real files again
1808 # make changelog see real files again
1809 self.changelog = changelog.changelog(self.sopener,
1809 self.changelog = changelog.changelog(self.sopener,
1810 self.changelog.version)
1810 self.changelog.version)
1811 self.changelog.checkinlinesize(tr)
1811 self.changelog.checkinlinesize(tr)
1812
1812
1813 newheads = len(self.changelog.heads())
1813 newheads = len(self.changelog.heads())
1814 heads = ""
1814 heads = ""
1815 if oldheads and newheads != oldheads:
1815 if oldheads and newheads != oldheads:
1816 heads = _(" (%+d heads)") % (newheads - oldheads)
1816 heads = _(" (%+d heads)") % (newheads - oldheads)
1817
1817
1818 self.ui.status(_("added %d changesets"
1818 self.ui.status(_("added %d changesets"
1819 " with %d changes to %d files%s\n")
1819 " with %d changes to %d files%s\n")
1820 % (changesets, revisions, files, heads))
1820 % (changesets, revisions, files, heads))
1821
1821
1822 if changesets > 0:
1822 if changesets > 0:
1823 self.hook('pretxnchangegroup', throw=True,
1823 self.hook('pretxnchangegroup', throw=True,
1824 node=hex(self.changelog.node(cor+1)), source=srctype,
1824 node=hex(self.changelog.node(cor+1)), source=srctype,
1825 url=url)
1825 url=url)
1826
1826
1827 tr.close()
1827 tr.close()
1828
1828
1829 if changesets > 0:
1829 if changesets > 0:
1830 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1830 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1831 source=srctype, url=url)
1831 source=srctype, url=url)
1832
1832
1833 for i in xrange(cor + 1, cnr + 1):
1833 for i in xrange(cor + 1, cnr + 1):
1834 self.hook("incoming", node=hex(self.changelog.node(i)),
1834 self.hook("incoming", node=hex(self.changelog.node(i)),
1835 source=srctype, url=url)
1835 source=srctype, url=url)
1836
1836
1837 # never return 0 here:
1837 # never return 0 here:
1838 if newheads < oldheads:
1838 if newheads < oldheads:
1839 return newheads - oldheads - 1
1839 return newheads - oldheads - 1
1840 else:
1840 else:
1841 return newheads - oldheads + 1
1841 return newheads - oldheads + 1
1842
1842
1843
1843
1844 def stream_in(self, remote):
1844 def stream_in(self, remote):
1845 fp = remote.stream_out()
1845 fp = remote.stream_out()
1846 l = fp.readline()
1846 l = fp.readline()
1847 try:
1847 try:
1848 resp = int(l)
1848 resp = int(l)
1849 except ValueError:
1849 except ValueError:
1850 raise util.UnexpectedOutput(
1850 raise util.UnexpectedOutput(
1851 _('Unexpected response from remote server:'), l)
1851 _('Unexpected response from remote server:'), l)
1852 if resp == 1:
1852 if resp == 1:
1853 raise util.Abort(_('operation forbidden by server'))
1853 raise util.Abort(_('operation forbidden by server'))
1854 elif resp == 2:
1854 elif resp == 2:
1855 raise util.Abort(_('locking the remote repository failed'))
1855 raise util.Abort(_('locking the remote repository failed'))
1856 elif resp != 0:
1856 elif resp != 0:
1857 raise util.Abort(_('the server sent an unknown error code'))
1857 raise util.Abort(_('the server sent an unknown error code'))
1858 self.ui.status(_('streaming all changes\n'))
1858 self.ui.status(_('streaming all changes\n'))
1859 l = fp.readline()
1859 l = fp.readline()
1860 try:
1860 try:
1861 total_files, total_bytes = map(int, l.split(' ', 1))
1861 total_files, total_bytes = map(int, l.split(' ', 1))
1862 except ValueError, TypeError:
1862 except ValueError, TypeError:
1863 raise util.UnexpectedOutput(
1863 raise util.UnexpectedOutput(
1864 _('Unexpected response from remote server:'), l)
1864 _('Unexpected response from remote server:'), l)
1865 self.ui.status(_('%d files to transfer, %s of data\n') %
1865 self.ui.status(_('%d files to transfer, %s of data\n') %
1866 (total_files, util.bytecount(total_bytes)))
1866 (total_files, util.bytecount(total_bytes)))
1867 start = time.time()
1867 start = time.time()
1868 for i in xrange(total_files):
1868 for i in xrange(total_files):
1869 # XXX doesn't support '\n' or '\r' in filenames
1869 # XXX doesn't support '\n' or '\r' in filenames
1870 l = fp.readline()
1870 l = fp.readline()
1871 try:
1871 try:
1872 name, size = l.split('\0', 1)
1872 name, size = l.split('\0', 1)
1873 size = int(size)
1873 size = int(size)
1874 except ValueError, TypeError:
1874 except ValueError, TypeError:
1875 raise util.UnexpectedOutput(
1875 raise util.UnexpectedOutput(
1876 _('Unexpected response from remote server:'), l)
1876 _('Unexpected response from remote server:'), l)
1877 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1877 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1878 ofp = self.sopener(name, 'w')
1878 ofp = self.sopener(name, 'w')
1879 for chunk in util.filechunkiter(fp, limit=size):
1879 for chunk in util.filechunkiter(fp, limit=size):
1880 ofp.write(chunk)
1880 ofp.write(chunk)
1881 ofp.close()
1881 ofp.close()
1882 elapsed = time.time() - start
1882 elapsed = time.time() - start
1883 if elapsed <= 0:
1884 elapsed = 0.001
1883 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1885 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1884 (util.bytecount(total_bytes), elapsed,
1886 (util.bytecount(total_bytes), elapsed,
1885 util.bytecount(total_bytes / elapsed)))
1887 util.bytecount(total_bytes / elapsed)))
1886 self.reload()
1888 self.reload()
1887 return len(self.heads()) + 1
1889 return len(self.heads()) + 1
1888
1890
1889 def clone(self, remote, heads=[], stream=False):
1891 def clone(self, remote, heads=[], stream=False):
1890 '''clone remote repository.
1892 '''clone remote repository.
1891
1893
1892 keyword arguments:
1894 keyword arguments:
1893 heads: list of revs to clone (forces use of pull)
1895 heads: list of revs to clone (forces use of pull)
1894 stream: use streaming clone if possible'''
1896 stream: use streaming clone if possible'''
1895
1897
1896 # now, all clients that can request uncompressed clones can
1898 # now, all clients that can request uncompressed clones can
1897 # read repo formats supported by all servers that can serve
1899 # read repo formats supported by all servers that can serve
1898 # them.
1900 # them.
1899
1901
1900 # if revlog format changes, client will have to check version
1902 # if revlog format changes, client will have to check version
1901 # and format flags on "stream" capability, and use
1903 # and format flags on "stream" capability, and use
1902 # uncompressed only if compatible.
1904 # uncompressed only if compatible.
1903
1905
1904 if stream and not heads and remote.capable('stream'):
1906 if stream and not heads and remote.capable('stream'):
1905 return self.stream_in(remote)
1907 return self.stream_in(remote)
1906 return self.pull(remote, heads)
1908 return self.pull(remote, heads)
1907
1909
1908 # used to avoid circular references so destructors work
1910 # used to avoid circular references so destructors work
1909 def aftertrans(files):
1911 def aftertrans(files):
1910 renamefiles = [tuple(t) for t in files]
1912 renamefiles = [tuple(t) for t in files]
1911 def a():
1913 def a():
1912 for src, dest in renamefiles:
1914 for src, dest in renamefiles:
1913 util.rename(src, dest)
1915 util.rename(src, dest)
1914 return a
1916 return a
1915
1917
1916 def instance(ui, path, create):
1918 def instance(ui, path, create):
1917 return localrepository(ui, util.drop_scheme('file', path), create)
1919 return localrepository(ui, util.drop_scheme('file', path), create)
1918
1920
1919 def islocal(path):
1921 def islocal(path):
1920 return True
1922 return True
@@ -1,95 +1,95 b''
1 # streamclone.py - streaming clone server support for mercurial
1 # streamclone.py - streaming clone server support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from i18n import _
8 from i18n import _
9 import os, stat, util, lock
9 import os, stat, util, lock
10
10
11 # if server supports streaming clone, it advertises "stream"
11 # if server supports streaming clone, it advertises "stream"
12 # capability with value that is version+flags of repo it is serving.
12 # capability with value that is version+flags of repo it is serving.
13 # client only streams if it can read that repo format.
13 # client only streams if it can read that repo format.
14
14
15 def walkrepo(root):
15 def walkrepo(root):
16 '''iterate over metadata files in repository.
16 '''iterate over metadata files in repository.
17 walk in natural (sorted) order.
17 walk in natural (sorted) order.
18 yields 2-tuples: name of .d or .i file, size of file.'''
18 yields 2-tuples: name of .d or .i file, size of file.'''
19
19
20 strip_count = len(root) + len(os.sep)
20 strip_count = len(root) + len(os.sep)
21 def walk(path, recurse):
21 def walk(path, recurse):
22 ents = os.listdir(path)
22 ents = os.listdir(path)
23 ents.sort()
23 ents.sort()
24 for e in ents:
24 for e in ents:
25 pe = os.path.join(path, e)
25 pe = os.path.join(path, e)
26 st = os.lstat(pe)
26 st = os.lstat(pe)
27 if stat.S_ISDIR(st.st_mode):
27 if stat.S_ISDIR(st.st_mode):
28 if recurse:
28 if recurse:
29 for x in walk(pe, True):
29 for x in walk(pe, True):
30 yield x
30 yield x
31 else:
31 else:
32 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
32 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
33 continue
33 continue
34 sfx = e[-2:]
34 sfx = e[-2:]
35 if sfx in ('.d', '.i'):
35 if sfx in ('.d', '.i'):
36 yield pe[strip_count:], st.st_size
36 yield pe[strip_count:], st.st_size
37 # write file data first
37 # write file data first
38 for x in walk(os.path.join(root, 'data'), True):
38 for x in walk(os.path.join(root, 'data'), True):
39 yield x
39 yield x
40 # write manifest before changelog
40 # write manifest before changelog
41 meta = list(walk(root, False))
41 meta = list(walk(root, False))
42 meta.sort()
42 meta.sort()
43 meta.reverse()
43 meta.reverse()
44 for x in meta:
44 for x in meta:
45 yield x
45 yield x
46
46
47 # stream file format is simple.
47 # stream file format is simple.
48 #
48 #
49 # server writes out line that says how many files, how many total
49 # server writes out line that says how many files, how many total
50 # bytes. separator is ascii space, byte counts are strings.
50 # bytes. separator is ascii space, byte counts are strings.
51 #
51 #
52 # then for each file:
52 # then for each file:
53 #
53 #
54 # server writes out line that says file name, how many bytes in
54 # server writes out line that says file name, how many bytes in
55 # file. separator is ascii nul, byte count is string.
55 # file. separator is ascii nul, byte count is string.
56 #
56 #
57 # server writes out raw file data.
57 # server writes out raw file data.
58
58
59 def stream_out(repo, fileobj):
59 def stream_out(repo, fileobj):
60 '''stream out all metadata files in repository.
60 '''stream out all metadata files in repository.
61 writes to file-like object, must support write() and optional flush().'''
61 writes to file-like object, must support write() and optional flush().'''
62
62
63 if not repo.ui.configbool('server', 'uncompressed'):
63 if not repo.ui.configbool('server', 'uncompressed'):
64 fileobj.write('1\n')
64 fileobj.write('1\n')
65 return
65 return
66
66
67 # get consistent snapshot of repo. lock during scan so lock not
67 # get consistent snapshot of repo. lock during scan so lock not
68 # needed while we stream, and commits can happen.
68 # needed while we stream, and commits can happen.
69 try:
69 try:
70 repolock = repo.lock()
70 repolock = repo.lock()
71 except (lock.LockHeld, lock.LockUnavailable), inst:
71 except (lock.LockHeld, lock.LockUnavailable), inst:
72 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
72 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
73 fileobj.write('2\n')
73 fileobj.write('2\n')
74 return
74 return
75
75
76 fileobj.write('0\n')
76 fileobj.write('0\n')
77 repo.ui.debug('scanning\n')
77 repo.ui.debug('scanning\n')
78 entries = []
78 entries = []
79 total_bytes = 0
79 total_bytes = 0
80 for name, size in walkrepo(repo.spath):
80 for name, size in walkrepo(repo.spath):
81 name = util.pconvert(repo.decodefn(name))
81 name = repo.decodefn(util.pconvert(name))
82 entries.append((name, size))
82 entries.append((name, size))
83 total_bytes += size
83 total_bytes += size
84 repolock.release()
84 repolock.release()
85
85
86 repo.ui.debug('%d files, %d bytes to transfer\n' %
86 repo.ui.debug('%d files, %d bytes to transfer\n' %
87 (len(entries), total_bytes))
87 (len(entries), total_bytes))
88 fileobj.write('%d %d\n' % (len(entries), total_bytes))
88 fileobj.write('%d %d\n' % (len(entries), total_bytes))
89 for name, size in entries:
89 for name, size in entries:
90 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
90 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
91 fileobj.write('%s\0%d\n' % (name, size))
91 fileobj.write('%s\0%d\n' % (name, size))
92 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
92 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
93 fileobj.write(chunk)
93 fileobj.write(chunk)
94 flush = getattr(fileobj, 'flush', None)
94 flush = getattr(fileobj, 'flush', None)
95 if flush: flush()
95 if flush: flush()
@@ -1,1440 +1,1448 b''
1 """
1 """
2 util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
3
3
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7
7
8 This software may be used and distributed according to the terms
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
9 of the GNU General Public License, incorporated herein by reference.
10
10
11 This contains helper routines that are independent of the SCM core and hide
11 This contains helper routines that are independent of the SCM core and hide
12 platform-specific details from the core.
12 platform-specific details from the core.
13 """
13 """
14
14
15 from i18n import _
15 from i18n import _
16 import cStringIO, errno, getpass, popen2, re, shutil, sys, tempfile
16 import cStringIO, errno, getpass, popen2, re, shutil, sys, tempfile
17 import os, threading, time, calendar, ConfigParser, locale, glob
17 import os, threading, time, calendar, ConfigParser, locale, glob
18
18
19 try:
19 try:
20 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding() \
20 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding() \
21 or "ascii"
21 or "ascii"
22 except locale.Error:
22 except locale.Error:
23 _encoding = 'ascii'
23 _encoding = 'ascii'
24 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
24 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
25 _fallbackencoding = 'ISO-8859-1'
25 _fallbackencoding = 'ISO-8859-1'
26
26
27 def tolocal(s):
27 def tolocal(s):
28 """
28 """
29 Convert a string from internal UTF-8 to local encoding
29 Convert a string from internal UTF-8 to local encoding
30
30
31 All internal strings should be UTF-8 but some repos before the
31 All internal strings should be UTF-8 but some repos before the
32 implementation of locale support may contain latin1 or possibly
32 implementation of locale support may contain latin1 or possibly
33 other character sets. We attempt to decode everything strictly
33 other character sets. We attempt to decode everything strictly
34 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
34 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
35 replace unknown characters.
35 replace unknown characters.
36 """
36 """
37 for e in ('UTF-8', _fallbackencoding):
37 for e in ('UTF-8', _fallbackencoding):
38 try:
38 try:
39 u = s.decode(e) # attempt strict decoding
39 u = s.decode(e) # attempt strict decoding
40 return u.encode(_encoding, "replace")
40 return u.encode(_encoding, "replace")
41 except LookupError, k:
41 except LookupError, k:
42 raise Abort(_("%s, please check your locale settings") % k)
42 raise Abort(_("%s, please check your locale settings") % k)
43 except UnicodeDecodeError:
43 except UnicodeDecodeError:
44 pass
44 pass
45 u = s.decode("utf-8", "replace") # last ditch
45 u = s.decode("utf-8", "replace") # last ditch
46 return u.encode(_encoding, "replace")
46 return u.encode(_encoding, "replace")
47
47
48 def fromlocal(s):
48 def fromlocal(s):
49 """
49 """
50 Convert a string from the local character encoding to UTF-8
50 Convert a string from the local character encoding to UTF-8
51
51
52 We attempt to decode strings using the encoding mode set by
52 We attempt to decode strings using the encoding mode set by
53 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
53 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
54 characters will cause an error message. Other modes include
54 characters will cause an error message. Other modes include
55 'replace', which replaces unknown characters with a special
55 'replace', which replaces unknown characters with a special
56 Unicode character, and 'ignore', which drops the character.
56 Unicode character, and 'ignore', which drops the character.
57 """
57 """
58 try:
58 try:
59 return s.decode(_encoding, _encodingmode).encode("utf-8")
59 return s.decode(_encoding, _encodingmode).encode("utf-8")
60 except UnicodeDecodeError, inst:
60 except UnicodeDecodeError, inst:
61 sub = s[max(0, inst.start-10):inst.start+10]
61 sub = s[max(0, inst.start-10):inst.start+10]
62 raise Abort("decoding near '%s': %s!" % (sub, inst))
62 raise Abort("decoding near '%s': %s!" % (sub, inst))
63 except LookupError, k:
63 except LookupError, k:
64 raise Abort(_("%s, please check your locale settings") % k)
64 raise Abort(_("%s, please check your locale settings") % k)
65
65
66 def locallen(s):
66 def locallen(s):
67 """Find the length in characters of a local string"""
67 """Find the length in characters of a local string"""
68 return len(s.decode(_encoding, "replace"))
68 return len(s.decode(_encoding, "replace"))
69
69
70 def localsub(s, a, b=None):
70 def localsub(s, a, b=None):
71 try:
71 try:
72 u = s.decode(_encoding, _encodingmode)
72 u = s.decode(_encoding, _encodingmode)
73 if b is not None:
73 if b is not None:
74 u = u[a:b]
74 u = u[a:b]
75 else:
75 else:
76 u = u[:a]
76 u = u[:a]
77 return u.encode(_encoding, _encodingmode)
77 return u.encode(_encoding, _encodingmode)
78 except UnicodeDecodeError, inst:
78 except UnicodeDecodeError, inst:
79 sub = s[max(0, inst.start-10), inst.start+10]
79 sub = s[max(0, inst.start-10), inst.start+10]
80 raise Abort(_("decoding near '%s': %s!\n") % (sub, inst))
80 raise Abort(_("decoding near '%s': %s!\n") % (sub, inst))
81
81
82 # used by parsedate
82 # used by parsedate
83 defaultdateformats = (
83 defaultdateformats = (
84 '%Y-%m-%d %H:%M:%S',
84 '%Y-%m-%d %H:%M:%S',
85 '%Y-%m-%d %I:%M:%S%p',
85 '%Y-%m-%d %I:%M:%S%p',
86 '%Y-%m-%d %H:%M',
86 '%Y-%m-%d %H:%M',
87 '%Y-%m-%d %I:%M%p',
87 '%Y-%m-%d %I:%M%p',
88 '%Y-%m-%d',
88 '%Y-%m-%d',
89 '%m-%d',
89 '%m-%d',
90 '%m/%d',
90 '%m/%d',
91 '%m/%d/%y',
91 '%m/%d/%y',
92 '%m/%d/%Y',
92 '%m/%d/%Y',
93 '%a %b %d %H:%M:%S %Y',
93 '%a %b %d %H:%M:%S %Y',
94 '%a %b %d %I:%M:%S%p %Y',
94 '%a %b %d %I:%M:%S%p %Y',
95 '%b %d %H:%M:%S %Y',
95 '%b %d %H:%M:%S %Y',
96 '%b %d %I:%M:%S%p %Y',
96 '%b %d %I:%M:%S%p %Y',
97 '%b %d %H:%M:%S',
97 '%b %d %H:%M:%S',
98 '%b %d %I:%M:%S%p',
98 '%b %d %I:%M:%S%p',
99 '%b %d %H:%M',
99 '%b %d %H:%M',
100 '%b %d %I:%M%p',
100 '%b %d %I:%M%p',
101 '%b %d %Y',
101 '%b %d %Y',
102 '%b %d',
102 '%b %d',
103 '%H:%M:%S',
103 '%H:%M:%S',
104 '%I:%M:%SP',
104 '%I:%M:%SP',
105 '%H:%M',
105 '%H:%M',
106 '%I:%M%p',
106 '%I:%M%p',
107 )
107 )
108
108
109 extendeddateformats = defaultdateformats + (
109 extendeddateformats = defaultdateformats + (
110 "%Y",
110 "%Y",
111 "%Y-%m",
111 "%Y-%m",
112 "%b",
112 "%b",
113 "%b %Y",
113 "%b %Y",
114 )
114 )
115
115
116 class SignalInterrupt(Exception):
116 class SignalInterrupt(Exception):
117 """Exception raised on SIGTERM and SIGHUP."""
117 """Exception raised on SIGTERM and SIGHUP."""
118
118
119 # differences from SafeConfigParser:
119 # differences from SafeConfigParser:
120 # - case-sensitive keys
120 # - case-sensitive keys
121 # - allows values that are not strings (this means that you may not
121 # - allows values that are not strings (this means that you may not
122 # be able to save the configuration to a file)
122 # be able to save the configuration to a file)
123 class configparser(ConfigParser.SafeConfigParser):
123 class configparser(ConfigParser.SafeConfigParser):
124 def optionxform(self, optionstr):
124 def optionxform(self, optionstr):
125 return optionstr
125 return optionstr
126
126
127 def set(self, section, option, value):
127 def set(self, section, option, value):
128 return ConfigParser.ConfigParser.set(self, section, option, value)
128 return ConfigParser.ConfigParser.set(self, section, option, value)
129
129
130 def _interpolate(self, section, option, rawval, vars):
130 def _interpolate(self, section, option, rawval, vars):
131 if not isinstance(rawval, basestring):
131 if not isinstance(rawval, basestring):
132 return rawval
132 return rawval
133 return ConfigParser.SafeConfigParser._interpolate(self, section,
133 return ConfigParser.SafeConfigParser._interpolate(self, section,
134 option, rawval, vars)
134 option, rawval, vars)
135
135
136 def cachefunc(func):
136 def cachefunc(func):
137 '''cache the result of function calls'''
137 '''cache the result of function calls'''
138 # XXX doesn't handle keywords args
138 # XXX doesn't handle keywords args
139 cache = {}
139 cache = {}
140 if func.func_code.co_argcount == 1:
140 if func.func_code.co_argcount == 1:
141 # we gain a small amount of time because
141 # we gain a small amount of time because
142 # we don't need to pack/unpack the list
142 # we don't need to pack/unpack the list
143 def f(arg):
143 def f(arg):
144 if arg not in cache:
144 if arg not in cache:
145 cache[arg] = func(arg)
145 cache[arg] = func(arg)
146 return cache[arg]
146 return cache[arg]
147 else:
147 else:
148 def f(*args):
148 def f(*args):
149 if args not in cache:
149 if args not in cache:
150 cache[args] = func(*args)
150 cache[args] = func(*args)
151 return cache[args]
151 return cache[args]
152
152
153 return f
153 return f
154
154
155 def pipefilter(s, cmd):
155 def pipefilter(s, cmd):
156 '''filter string S through command CMD, returning its output'''
156 '''filter string S through command CMD, returning its output'''
157 (pout, pin) = popen2.popen2(cmd, -1, 'b')
157 (pout, pin) = popen2.popen2(cmd, -1, 'b')
158 def writer():
158 def writer():
159 try:
159 try:
160 pin.write(s)
160 pin.write(s)
161 pin.close()
161 pin.close()
162 except IOError, inst:
162 except IOError, inst:
163 if inst.errno != errno.EPIPE:
163 if inst.errno != errno.EPIPE:
164 raise
164 raise
165
165
166 # we should use select instead on UNIX, but this will work on most
166 # we should use select instead on UNIX, but this will work on most
167 # systems, including Windows
167 # systems, including Windows
168 w = threading.Thread(target=writer)
168 w = threading.Thread(target=writer)
169 w.start()
169 w.start()
170 f = pout.read()
170 f = pout.read()
171 pout.close()
171 pout.close()
172 w.join()
172 w.join()
173 return f
173 return f
174
174
175 def tempfilter(s, cmd):
175 def tempfilter(s, cmd):
176 '''filter string S through a pair of temporary files with CMD.
176 '''filter string S through a pair of temporary files with CMD.
177 CMD is used as a template to create the real command to be run,
177 CMD is used as a template to create the real command to be run,
178 with the strings INFILE and OUTFILE replaced by the real names of
178 with the strings INFILE and OUTFILE replaced by the real names of
179 the temporary files generated.'''
179 the temporary files generated.'''
180 inname, outname = None, None
180 inname, outname = None, None
181 try:
181 try:
182 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
182 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
183 fp = os.fdopen(infd, 'wb')
183 fp = os.fdopen(infd, 'wb')
184 fp.write(s)
184 fp.write(s)
185 fp.close()
185 fp.close()
186 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
186 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
187 os.close(outfd)
187 os.close(outfd)
188 cmd = cmd.replace('INFILE', inname)
188 cmd = cmd.replace('INFILE', inname)
189 cmd = cmd.replace('OUTFILE', outname)
189 cmd = cmd.replace('OUTFILE', outname)
190 code = os.system(cmd)
190 code = os.system(cmd)
191 if code: raise Abort(_("command '%s' failed: %s") %
191 if code: raise Abort(_("command '%s' failed: %s") %
192 (cmd, explain_exit(code)))
192 (cmd, explain_exit(code)))
193 return open(outname, 'rb').read()
193 return open(outname, 'rb').read()
194 finally:
194 finally:
195 try:
195 try:
196 if inname: os.unlink(inname)
196 if inname: os.unlink(inname)
197 except: pass
197 except: pass
198 try:
198 try:
199 if outname: os.unlink(outname)
199 if outname: os.unlink(outname)
200 except: pass
200 except: pass
201
201
202 filtertable = {
202 filtertable = {
203 'tempfile:': tempfilter,
203 'tempfile:': tempfilter,
204 'pipe:': pipefilter,
204 'pipe:': pipefilter,
205 }
205 }
206
206
207 def filter(s, cmd):
207 def filter(s, cmd):
208 "filter a string through a command that transforms its input to its output"
208 "filter a string through a command that transforms its input to its output"
209 for name, fn in filtertable.iteritems():
209 for name, fn in filtertable.iteritems():
210 if cmd.startswith(name):
210 if cmd.startswith(name):
211 return fn(s, cmd[len(name):].lstrip())
211 return fn(s, cmd[len(name):].lstrip())
212 return pipefilter(s, cmd)
212 return pipefilter(s, cmd)
213
213
214 def find_in_path(name, path, default=None):
214 def find_in_path(name, path, default=None):
215 '''find name in search path. path can be string (will be split
215 '''find name in search path. path can be string (will be split
216 with os.pathsep), or iterable thing that returns strings. if name
216 with os.pathsep), or iterable thing that returns strings. if name
217 found, return path to name. else return default.'''
217 found, return path to name. else return default.'''
218 if isinstance(path, str):
218 if isinstance(path, str):
219 path = path.split(os.pathsep)
219 path = path.split(os.pathsep)
220 for p in path:
220 for p in path:
221 p_name = os.path.join(p, name)
221 p_name = os.path.join(p, name)
222 if os.path.exists(p_name):
222 if os.path.exists(p_name):
223 return p_name
223 return p_name
224 return default
224 return default
225
225
226 def binary(s):
226 def binary(s):
227 """return true if a string is binary data using diff's heuristic"""
227 """return true if a string is binary data using diff's heuristic"""
228 if s and '\0' in s[:4096]:
228 if s and '\0' in s[:4096]:
229 return True
229 return True
230 return False
230 return False
231
231
232 def unique(g):
232 def unique(g):
233 """return the uniq elements of iterable g"""
233 """return the uniq elements of iterable g"""
234 seen = {}
234 seen = {}
235 l = []
235 l = []
236 for f in g:
236 for f in g:
237 if f not in seen:
237 if f not in seen:
238 seen[f] = 1
238 seen[f] = 1
239 l.append(f)
239 l.append(f)
240 return l
240 return l
241
241
242 class Abort(Exception):
242 class Abort(Exception):
243 """Raised if a command needs to print an error and exit."""
243 """Raised if a command needs to print an error and exit."""
244
244
245 class UnexpectedOutput(Abort):
245 class UnexpectedOutput(Abort):
246 """Raised to print an error with part of output and exit."""
246 """Raised to print an error with part of output and exit."""
247
247
248 def always(fn): return True
248 def always(fn): return True
249 def never(fn): return False
249 def never(fn): return False
250
250
251 def expand_glob(pats):
251 def expand_glob(pats):
252 '''On Windows, expand the implicit globs in a list of patterns'''
252 '''On Windows, expand the implicit globs in a list of patterns'''
253 if os.name != 'nt':
253 if os.name != 'nt':
254 return list(pats)
254 return list(pats)
255 ret = []
255 ret = []
256 for p in pats:
256 for p in pats:
257 kind, name = patkind(p, None)
257 kind, name = patkind(p, None)
258 if kind is None:
258 if kind is None:
259 globbed = glob.glob(name)
259 globbed = glob.glob(name)
260 if globbed:
260 if globbed:
261 ret.extend(globbed)
261 ret.extend(globbed)
262 continue
262 continue
263 # if we couldn't expand the glob, just keep it around
263 # if we couldn't expand the glob, just keep it around
264 ret.append(p)
264 ret.append(p)
265 return ret
265 return ret
266
266
267 def patkind(name, dflt_pat='glob'):
267 def patkind(name, dflt_pat='glob'):
268 """Split a string into an optional pattern kind prefix and the
268 """Split a string into an optional pattern kind prefix and the
269 actual pattern."""
269 actual pattern."""
270 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
270 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
271 if name.startswith(prefix + ':'): return name.split(':', 1)
271 if name.startswith(prefix + ':'): return name.split(':', 1)
272 return dflt_pat, name
272 return dflt_pat, name
273
273
274 def globre(pat, head='^', tail='$'):
274 def globre(pat, head='^', tail='$'):
275 "convert a glob pattern into a regexp"
275 "convert a glob pattern into a regexp"
276 i, n = 0, len(pat)
276 i, n = 0, len(pat)
277 res = ''
277 res = ''
278 group = False
278 group = False
279 def peek(): return i < n and pat[i]
279 def peek(): return i < n and pat[i]
280 while i < n:
280 while i < n:
281 c = pat[i]
281 c = pat[i]
282 i = i+1
282 i = i+1
283 if c == '*':
283 if c == '*':
284 if peek() == '*':
284 if peek() == '*':
285 i += 1
285 i += 1
286 res += '.*'
286 res += '.*'
287 else:
287 else:
288 res += '[^/]*'
288 res += '[^/]*'
289 elif c == '?':
289 elif c == '?':
290 res += '.'
290 res += '.'
291 elif c == '[':
291 elif c == '[':
292 j = i
292 j = i
293 if j < n and pat[j] in '!]':
293 if j < n and pat[j] in '!]':
294 j += 1
294 j += 1
295 while j < n and pat[j] != ']':
295 while j < n and pat[j] != ']':
296 j += 1
296 j += 1
297 if j >= n:
297 if j >= n:
298 res += '\\['
298 res += '\\['
299 else:
299 else:
300 stuff = pat[i:j].replace('\\','\\\\')
300 stuff = pat[i:j].replace('\\','\\\\')
301 i = j + 1
301 i = j + 1
302 if stuff[0] == '!':
302 if stuff[0] == '!':
303 stuff = '^' + stuff[1:]
303 stuff = '^' + stuff[1:]
304 elif stuff[0] == '^':
304 elif stuff[0] == '^':
305 stuff = '\\' + stuff
305 stuff = '\\' + stuff
306 res = '%s[%s]' % (res, stuff)
306 res = '%s[%s]' % (res, stuff)
307 elif c == '{':
307 elif c == '{':
308 group = True
308 group = True
309 res += '(?:'
309 res += '(?:'
310 elif c == '}' and group:
310 elif c == '}' and group:
311 res += ')'
311 res += ')'
312 group = False
312 group = False
313 elif c == ',' and group:
313 elif c == ',' and group:
314 res += '|'
314 res += '|'
315 elif c == '\\':
315 elif c == '\\':
316 p = peek()
316 p = peek()
317 if p:
317 if p:
318 i += 1
318 i += 1
319 res += re.escape(p)
319 res += re.escape(p)
320 else:
320 else:
321 res += re.escape(c)
321 res += re.escape(c)
322 else:
322 else:
323 res += re.escape(c)
323 res += re.escape(c)
324 return head + res + tail
324 return head + res + tail
325
325
326 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
326 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
327
327
328 def pathto(n1, n2):
328 def pathto(n1, n2):
329 '''return the relative path from one place to another.
329 '''return the relative path from one place to another.
330 n1 should use os.sep to separate directories
330 n1 should use os.sep to separate directories
331 n2 should use "/" to separate directories
331 n2 should use "/" to separate directories
332 returns an os.sep-separated path.
332 returns an os.sep-separated path.
333 '''
333 '''
334 if not n1: return localpath(n2)
334 if not n1: return localpath(n2)
335 a, b = n1.split(os.sep), n2.split('/')
335 a, b = n1.split(os.sep), n2.split('/')
336 a.reverse()
336 a.reverse()
337 b.reverse()
337 b.reverse()
338 while a and b and a[-1] == b[-1]:
338 while a and b and a[-1] == b[-1]:
339 a.pop()
339 a.pop()
340 b.pop()
340 b.pop()
341 b.reverse()
341 b.reverse()
342 return os.sep.join((['..'] * len(a)) + b)
342 return os.sep.join((['..'] * len(a)) + b)
343
343
344 def canonpath(root, cwd, myname):
344 def canonpath(root, cwd, myname):
345 """return the canonical path of myname, given cwd and root"""
345 """return the canonical path of myname, given cwd and root"""
346 if root == os.sep:
346 if root == os.sep:
347 rootsep = os.sep
347 rootsep = os.sep
348 elif root.endswith(os.sep):
348 elif root.endswith(os.sep):
349 rootsep = root
349 rootsep = root
350 else:
350 else:
351 rootsep = root + os.sep
351 rootsep = root + os.sep
352 name = myname
352 name = myname
353 if not os.path.isabs(name):
353 if not os.path.isabs(name):
354 name = os.path.join(root, cwd, name)
354 name = os.path.join(root, cwd, name)
355 name = os.path.normpath(name)
355 name = os.path.normpath(name)
356 if name != rootsep and name.startswith(rootsep):
356 if name != rootsep and name.startswith(rootsep):
357 name = name[len(rootsep):]
357 name = name[len(rootsep):]
358 audit_path(name)
358 audit_path(name)
359 return pconvert(name)
359 return pconvert(name)
360 elif name == root:
360 elif name == root:
361 return ''
361 return ''
362 else:
362 else:
363 # Determine whether `name' is in the hierarchy at or beneath `root',
363 # Determine whether `name' is in the hierarchy at or beneath `root',
364 # by iterating name=dirname(name) until that causes no change (can't
364 # by iterating name=dirname(name) until that causes no change (can't
365 # check name == '/', because that doesn't work on windows). For each
365 # check name == '/', because that doesn't work on windows). For each
366 # `name', compare dev/inode numbers. If they match, the list `rel'
366 # `name', compare dev/inode numbers. If they match, the list `rel'
367 # holds the reversed list of components making up the relative file
367 # holds the reversed list of components making up the relative file
368 # name we want.
368 # name we want.
369 root_st = os.stat(root)
369 root_st = os.stat(root)
370 rel = []
370 rel = []
371 while True:
371 while True:
372 try:
372 try:
373 name_st = os.stat(name)
373 name_st = os.stat(name)
374 except OSError:
374 except OSError:
375 break
375 break
376 if samestat(name_st, root_st):
376 if samestat(name_st, root_st):
377 if not rel:
377 if not rel:
378 # name was actually the same as root (maybe a symlink)
378 # name was actually the same as root (maybe a symlink)
379 return ''
379 return ''
380 rel.reverse()
380 rel.reverse()
381 name = os.path.join(*rel)
381 name = os.path.join(*rel)
382 audit_path(name)
382 audit_path(name)
383 return pconvert(name)
383 return pconvert(name)
384 dirname, basename = os.path.split(name)
384 dirname, basename = os.path.split(name)
385 rel.append(basename)
385 rel.append(basename)
386 if dirname == name:
386 if dirname == name:
387 break
387 break
388 name = dirname
388 name = dirname
389
389
390 raise Abort('%s not under root' % myname)
390 raise Abort('%s not under root' % myname)
391
391
392 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
392 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
393 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
393 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
394
394
395 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='',
395 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='',
396 src=None, globbed=False):
396 src=None, globbed=False):
397 if not globbed:
397 if not globbed:
398 names = expand_glob(names)
398 names = expand_glob(names)
399 return _matcher(canonroot, cwd, names, inc, exc, head, 'relpath', src)
399 return _matcher(canonroot, cwd, names, inc, exc, head, 'relpath', src)
400
400
401 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
401 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
402 """build a function to match a set of file patterns
402 """build a function to match a set of file patterns
403
403
404 arguments:
404 arguments:
405 canonroot - the canonical root of the tree you're matching against
405 canonroot - the canonical root of the tree you're matching against
406 cwd - the current working directory, if relevant
406 cwd - the current working directory, if relevant
407 names - patterns to find
407 names - patterns to find
408 inc - patterns to include
408 inc - patterns to include
409 exc - patterns to exclude
409 exc - patterns to exclude
410 head - a regex to prepend to patterns to control whether a match is rooted
410 head - a regex to prepend to patterns to control whether a match is rooted
411
411
412 a pattern is one of:
412 a pattern is one of:
413 'glob:<rooted glob>'
413 'glob:<rooted glob>'
414 're:<rooted regexp>'
414 're:<rooted regexp>'
415 'path:<rooted path>'
415 'path:<rooted path>'
416 'relglob:<relative glob>'
416 'relglob:<relative glob>'
417 'relpath:<relative path>'
417 'relpath:<relative path>'
418 'relre:<relative regexp>'
418 'relre:<relative regexp>'
419 '<rooted path or regexp>'
419 '<rooted path or regexp>'
420
420
421 returns:
421 returns:
422 a 3-tuple containing
422 a 3-tuple containing
423 - list of explicit non-pattern names passed in
423 - list of explicit non-pattern names passed in
424 - a bool match(filename) function
424 - a bool match(filename) function
425 - a bool indicating if any patterns were passed in
425 - a bool indicating if any patterns were passed in
426
426
427 todo:
427 todo:
428 make head regex a rooted bool
428 make head regex a rooted bool
429 """
429 """
430
430
431 def contains_glob(name):
431 def contains_glob(name):
432 for c in name:
432 for c in name:
433 if c in _globchars: return True
433 if c in _globchars: return True
434 return False
434 return False
435
435
436 def regex(kind, name, tail):
436 def regex(kind, name, tail):
437 '''convert a pattern into a regular expression'''
437 '''convert a pattern into a regular expression'''
438 if kind == 're':
438 if kind == 're':
439 return name
439 return name
440 elif kind == 'path':
440 elif kind == 'path':
441 return '^' + re.escape(name) + '(?:/|$)'
441 return '^' + re.escape(name) + '(?:/|$)'
442 elif kind == 'relglob':
442 elif kind == 'relglob':
443 return head + globre(name, '(?:|.*/)', tail)
443 return head + globre(name, '(?:|.*/)', tail)
444 elif kind == 'relpath':
444 elif kind == 'relpath':
445 return head + re.escape(name) + tail
445 return head + re.escape(name) + tail
446 elif kind == 'relre':
446 elif kind == 'relre':
447 if name.startswith('^'):
447 if name.startswith('^'):
448 return name
448 return name
449 return '.*' + name
449 return '.*' + name
450 return head + globre(name, '', tail)
450 return head + globre(name, '', tail)
451
451
452 def matchfn(pats, tail):
452 def matchfn(pats, tail):
453 """build a matching function from a set of patterns"""
453 """build a matching function from a set of patterns"""
454 if not pats:
454 if not pats:
455 return
455 return
456 matches = []
456 matches = []
457 for k, p in pats:
457 for k, p in pats:
458 try:
458 try:
459 pat = '(?:%s)' % regex(k, p, tail)
459 pat = '(?:%s)' % regex(k, p, tail)
460 matches.append(re.compile(pat).match)
460 matches.append(re.compile(pat).match)
461 except re.error:
461 except re.error:
462 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
462 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
463 else: raise Abort("invalid pattern (%s): %s" % (k, p))
463 else: raise Abort("invalid pattern (%s): %s" % (k, p))
464
464
465 def buildfn(text):
465 def buildfn(text):
466 for m in matches:
466 for m in matches:
467 r = m(text)
467 r = m(text)
468 if r:
468 if r:
469 return r
469 return r
470
470
471 return buildfn
471 return buildfn
472
472
473 def globprefix(pat):
473 def globprefix(pat):
474 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
474 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
475 root = []
475 root = []
476 for p in pat.split(os.sep):
476 for p in pat.split(os.sep):
477 if contains_glob(p): break
477 if contains_glob(p): break
478 root.append(p)
478 root.append(p)
479 return '/'.join(root)
479 return '/'.join(root)
480
480
481 pats = []
481 pats = []
482 files = []
482 files = []
483 roots = []
483 roots = []
484 for kind, name in [patkind(p, dflt_pat) for p in names]:
484 for kind, name in [patkind(p, dflt_pat) for p in names]:
485 if kind in ('glob', 'relpath'):
485 if kind in ('glob', 'relpath'):
486 name = canonpath(canonroot, cwd, name)
486 name = canonpath(canonroot, cwd, name)
487 if name == '':
487 if name == '':
488 kind, name = 'glob', '**'
488 kind, name = 'glob', '**'
489 if kind in ('glob', 'path', 're'):
489 if kind in ('glob', 'path', 're'):
490 pats.append((kind, name))
490 pats.append((kind, name))
491 if kind == 'glob':
491 if kind == 'glob':
492 root = globprefix(name)
492 root = globprefix(name)
493 if root: roots.append(root)
493 if root: roots.append(root)
494 elif kind == 'relpath':
494 elif kind == 'relpath':
495 files.append((kind, name))
495 files.append((kind, name))
496 roots.append(name)
496 roots.append(name)
497
497
498 patmatch = matchfn(pats, '$') or always
498 patmatch = matchfn(pats, '$') or always
499 filematch = matchfn(files, '(?:/|$)') or always
499 filematch = matchfn(files, '(?:/|$)') or always
500 incmatch = always
500 incmatch = always
501 if inc:
501 if inc:
502 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
502 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
503 incmatch = matchfn(inckinds, '(?:/|$)')
503 incmatch = matchfn(inckinds, '(?:/|$)')
504 excmatch = lambda fn: False
504 excmatch = lambda fn: False
505 if exc:
505 if exc:
506 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
506 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
507 excmatch = matchfn(exckinds, '(?:/|$)')
507 excmatch = matchfn(exckinds, '(?:/|$)')
508
508
509 return (roots,
509 return (roots,
510 lambda fn: (incmatch(fn) and not excmatch(fn) and
510 lambda fn: (incmatch(fn) and not excmatch(fn) and
511 (fn.endswith('/') or
511 (fn.endswith('/') or
512 (not pats and not files) or
512 (not pats and not files) or
513 (pats and patmatch(fn)) or
513 (pats and patmatch(fn)) or
514 (files and filematch(fn)))),
514 (files and filematch(fn)))),
515 (inc or exc or (pats and pats != [('glob', '**')])) and True)
515 (inc or exc or (pats and pats != [('glob', '**')])) and True)
516
516
517 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
517 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
518 '''enhanced shell command execution.
518 '''enhanced shell command execution.
519 run with environment maybe modified, maybe in different dir.
519 run with environment maybe modified, maybe in different dir.
520
520
521 if command fails and onerr is None, return status. if ui object,
521 if command fails and onerr is None, return status. if ui object,
522 print error message and return status, else raise onerr object as
522 print error message and return status, else raise onerr object as
523 exception.'''
523 exception.'''
524 def py2shell(val):
524 def py2shell(val):
525 'convert python object into string that is useful to shell'
525 'convert python object into string that is useful to shell'
526 if val in (None, False):
526 if val in (None, False):
527 return '0'
527 return '0'
528 if val == True:
528 if val == True:
529 return '1'
529 return '1'
530 return str(val)
530 return str(val)
531 oldenv = {}
531 oldenv = {}
532 for k in environ:
532 for k in environ:
533 oldenv[k] = os.environ.get(k)
533 oldenv[k] = os.environ.get(k)
534 if cwd is not None:
534 if cwd is not None:
535 oldcwd = os.getcwd()
535 oldcwd = os.getcwd()
536 origcmd = cmd
536 origcmd = cmd
537 if os.name == 'nt':
537 if os.name == 'nt':
538 cmd = '"%s"' % cmd
538 cmd = '"%s"' % cmd
539 try:
539 try:
540 for k, v in environ.iteritems():
540 for k, v in environ.iteritems():
541 os.environ[k] = py2shell(v)
541 os.environ[k] = py2shell(v)
542 if cwd is not None and oldcwd != cwd:
542 if cwd is not None and oldcwd != cwd:
543 os.chdir(cwd)
543 os.chdir(cwd)
544 rc = os.system(cmd)
544 rc = os.system(cmd)
545 if rc and onerr:
545 if rc and onerr:
546 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
546 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
547 explain_exit(rc)[0])
547 explain_exit(rc)[0])
548 if errprefix:
548 if errprefix:
549 errmsg = '%s: %s' % (errprefix, errmsg)
549 errmsg = '%s: %s' % (errprefix, errmsg)
550 try:
550 try:
551 onerr.warn(errmsg + '\n')
551 onerr.warn(errmsg + '\n')
552 except AttributeError:
552 except AttributeError:
553 raise onerr(errmsg)
553 raise onerr(errmsg)
554 return rc
554 return rc
555 finally:
555 finally:
556 for k, v in oldenv.iteritems():
556 for k, v in oldenv.iteritems():
557 if v is None:
557 if v is None:
558 del os.environ[k]
558 del os.environ[k]
559 else:
559 else:
560 os.environ[k] = v
560 os.environ[k] = v
561 if cwd is not None and oldcwd != cwd:
561 if cwd is not None and oldcwd != cwd:
562 os.chdir(oldcwd)
562 os.chdir(oldcwd)
563
563
564 def rename(src, dst):
564 def rename(src, dst):
565 """forcibly rename a file"""
565 """forcibly rename a file"""
566 try:
566 try:
567 os.rename(src, dst)
567 os.rename(src, dst)
568 except OSError, err:
568 except OSError, err:
569 # on windows, rename to existing file is not allowed, so we
569 # on windows, rename to existing file is not allowed, so we
570 # must delete destination first. but if file is open, unlink
570 # must delete destination first. but if file is open, unlink
571 # schedules it for delete but does not delete it. rename
571 # schedules it for delete but does not delete it. rename
572 # happens immediately even for open files, so we create
572 # happens immediately even for open files, so we create
573 # temporary file, delete it, rename destination to that name,
573 # temporary file, delete it, rename destination to that name,
574 # then delete that. then rename is safe to do.
574 # then delete that. then rename is safe to do.
575 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
575 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
576 os.close(fd)
576 os.close(fd)
577 os.unlink(temp)
577 os.unlink(temp)
578 os.rename(dst, temp)
578 os.rename(dst, temp)
579 os.unlink(temp)
579 os.unlink(temp)
580 os.rename(src, dst)
580 os.rename(src, dst)
581
581
582 def unlink(f):
582 def unlink(f):
583 """unlink and remove the directory if it is empty"""
583 """unlink and remove the directory if it is empty"""
584 os.unlink(f)
584 os.unlink(f)
585 # try removing directories that might now be empty
585 # try removing directories that might now be empty
586 try:
586 try:
587 os.removedirs(os.path.dirname(f))
587 os.removedirs(os.path.dirname(f))
588 except OSError:
588 except OSError:
589 pass
589 pass
590
590
591 def copyfile(src, dest):
591 def copyfile(src, dest):
592 "copy a file, preserving mode"
592 "copy a file, preserving mode"
593 try:
593 try:
594 shutil.copyfile(src, dest)
594 shutil.copyfile(src, dest)
595 shutil.copymode(src, dest)
595 shutil.copymode(src, dest)
596 except shutil.Error, inst:
596 except shutil.Error, inst:
597 raise Abort(str(inst))
597 raise Abort(str(inst))
598
598
599 def copyfiles(src, dst, hardlink=None):
599 def copyfiles(src, dst, hardlink=None):
600 """Copy a directory tree using hardlinks if possible"""
600 """Copy a directory tree using hardlinks if possible"""
601
601
602 if hardlink is None:
602 if hardlink is None:
603 hardlink = (os.stat(src).st_dev ==
603 hardlink = (os.stat(src).st_dev ==
604 os.stat(os.path.dirname(dst)).st_dev)
604 os.stat(os.path.dirname(dst)).st_dev)
605
605
606 if os.path.isdir(src):
606 if os.path.isdir(src):
607 os.mkdir(dst)
607 os.mkdir(dst)
608 for name in os.listdir(src):
608 for name in os.listdir(src):
609 srcname = os.path.join(src, name)
609 srcname = os.path.join(src, name)
610 dstname = os.path.join(dst, name)
610 dstname = os.path.join(dst, name)
611 copyfiles(srcname, dstname, hardlink)
611 copyfiles(srcname, dstname, hardlink)
612 else:
612 else:
613 if hardlink:
613 if hardlink:
614 try:
614 try:
615 os_link(src, dst)
615 os_link(src, dst)
616 except (IOError, OSError):
616 except (IOError, OSError):
617 hardlink = False
617 hardlink = False
618 shutil.copy(src, dst)
618 shutil.copy(src, dst)
619 else:
619 else:
620 shutil.copy(src, dst)
620 shutil.copy(src, dst)
621
621
622 def audit_path(path):
622 def audit_path(path):
623 """Abort if path contains dangerous components"""
623 """Abort if path contains dangerous components"""
624 parts = os.path.normcase(path).split(os.sep)
624 parts = os.path.normcase(path).split(os.sep)
625 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
625 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
626 or os.pardir in parts):
626 or os.pardir in parts):
627 raise Abort(_("path contains illegal component: %s\n") % path)
627 raise Abort(_("path contains illegal component: %s\n") % path)
628
628
629 def _makelock_file(info, pathname):
629 def _makelock_file(info, pathname):
630 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
630 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
631 os.write(ld, info)
631 os.write(ld, info)
632 os.close(ld)
632 os.close(ld)
633
633
634 def _readlock_file(pathname):
634 def _readlock_file(pathname):
635 return posixfile(pathname).read()
635 return posixfile(pathname).read()
636
636
637 def nlinks(pathname):
637 def nlinks(pathname):
638 """Return number of hardlinks for the given file."""
638 """Return number of hardlinks for the given file."""
639 return os.lstat(pathname).st_nlink
639 return os.lstat(pathname).st_nlink
640
640
641 if hasattr(os, 'link'):
641 if hasattr(os, 'link'):
642 os_link = os.link
642 os_link = os.link
643 else:
643 else:
644 def os_link(src, dst):
644 def os_link(src, dst):
645 raise OSError(0, _("Hardlinks not supported"))
645 raise OSError(0, _("Hardlinks not supported"))
646
646
647 def fstat(fp):
647 def fstat(fp):
648 '''stat file object that may not have fileno method.'''
648 '''stat file object that may not have fileno method.'''
649 try:
649 try:
650 return os.fstat(fp.fileno())
650 return os.fstat(fp.fileno())
651 except AttributeError:
651 except AttributeError:
652 return os.stat(fp.name)
652 return os.stat(fp.name)
653
653
654 posixfile = file
654 posixfile = file
655
655
656 def is_win_9x():
656 def is_win_9x():
657 '''return true if run on windows 95, 98 or me.'''
657 '''return true if run on windows 95, 98 or me.'''
658 try:
658 try:
659 return sys.getwindowsversion()[3] == 1
659 return sys.getwindowsversion()[3] == 1
660 except AttributeError:
660 except AttributeError:
661 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
661 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
662
662
663 getuser_fallback = None
663 getuser_fallback = None
664
664
665 def getuser():
665 def getuser():
666 '''return name of current user'''
666 '''return name of current user'''
667 try:
667 try:
668 return getpass.getuser()
668 return getpass.getuser()
669 except ImportError:
669 except ImportError:
670 # import of pwd will fail on windows - try fallback
670 # import of pwd will fail on windows - try fallback
671 if getuser_fallback:
671 if getuser_fallback:
672 return getuser_fallback()
672 return getuser_fallback()
673 # raised if win32api not available
673 # raised if win32api not available
674 raise Abort(_('user name not available - set USERNAME '
674 raise Abort(_('user name not available - set USERNAME '
675 'environment variable'))
675 'environment variable'))
676
676
677 def username(uid=None):
677 def username(uid=None):
678 """Return the name of the user with the given uid.
678 """Return the name of the user with the given uid.
679
679
680 If uid is None, return the name of the current user."""
680 If uid is None, return the name of the current user."""
681 try:
681 try:
682 import pwd
682 import pwd
683 if uid is None:
683 if uid is None:
684 uid = os.getuid()
684 uid = os.getuid()
685 try:
685 try:
686 return pwd.getpwuid(uid)[0]
686 return pwd.getpwuid(uid)[0]
687 except KeyError:
687 except KeyError:
688 return str(uid)
688 return str(uid)
689 except ImportError:
689 except ImportError:
690 return None
690 return None
691
691
692 def groupname(gid=None):
692 def groupname(gid=None):
693 """Return the name of the group with the given gid.
693 """Return the name of the group with the given gid.
694
694
695 If gid is None, return the name of the current group."""
695 If gid is None, return the name of the current group."""
696 try:
696 try:
697 import grp
697 import grp
698 if gid is None:
698 if gid is None:
699 gid = os.getgid()
699 gid = os.getgid()
700 try:
700 try:
701 return grp.getgrgid(gid)[0]
701 return grp.getgrgid(gid)[0]
702 except KeyError:
702 except KeyError:
703 return str(gid)
703 return str(gid)
704 except ImportError:
704 except ImportError:
705 return None
705 return None
706
706
707 # File system features
707 # File system features
708
708
709 def checkfolding(path):
709 def checkfolding(path):
710 """
710 """
711 Check whether the given path is on a case-sensitive filesystem
711 Check whether the given path is on a case-sensitive filesystem
712
712
713 Requires a path (like /foo/.hg) ending with a foldable final
713 Requires a path (like /foo/.hg) ending with a foldable final
714 directory component.
714 directory component.
715 """
715 """
716 s1 = os.stat(path)
716 s1 = os.stat(path)
717 d, b = os.path.split(path)
717 d, b = os.path.split(path)
718 p2 = os.path.join(d, b.upper())
718 p2 = os.path.join(d, b.upper())
719 if path == p2:
719 if path == p2:
720 p2 = os.path.join(d, b.lower())
720 p2 = os.path.join(d, b.lower())
721 try:
721 try:
722 s2 = os.stat(p2)
722 s2 = os.stat(p2)
723 if s2 == s1:
723 if s2 == s1:
724 return False
724 return False
725 return True
725 return True
726 except:
726 except:
727 return True
727 return True
728
728
729 def checkexec(path):
729 def checkexec(path):
730 """
730 """
731 Check whether the given path is on a filesystem with UNIX-like exec flags
731 Check whether the given path is on a filesystem with UNIX-like exec flags
732
732
733 Requires a directory (like /foo/.hg)
733 Requires a directory (like /foo/.hg)
734 """
734 """
735 fh, fn = tempfile.mkstemp("", "", path)
735 fh, fn = tempfile.mkstemp("", "", path)
736 os.close(fh)
736 os.close(fh)
737 m = os.stat(fn).st_mode
737 m = os.stat(fn).st_mode
738 os.chmod(fn, m ^ 0111)
738 os.chmod(fn, m ^ 0111)
739 r = (os.stat(fn).st_mode != m)
739 r = (os.stat(fn).st_mode != m)
740 os.unlink(fn)
740 os.unlink(fn)
741 return r
741 return r
742
742
743 def execfunc(path, fallback):
743 def execfunc(path, fallback):
744 '''return an is_exec() function with default to fallback'''
744 '''return an is_exec() function with default to fallback'''
745 if checkexec(path):
745 if checkexec(path):
746 return lambda x: is_exec(os.path.join(path, x))
746 return lambda x: is_exec(os.path.join(path, x))
747 return fallback
747 return fallback
748
748
749 def checklink(path):
749 def checklink(path):
750 """check whether the given path is on a symlink-capable filesystem"""
750 """check whether the given path is on a symlink-capable filesystem"""
751 # mktemp is not racy because symlink creation will fail if the
751 # mktemp is not racy because symlink creation will fail if the
752 # file already exists
752 # file already exists
753 name = tempfile.mktemp(dir=path)
753 name = tempfile.mktemp(dir=path)
754 try:
754 try:
755 os.symlink(".", name)
755 os.symlink(".", name)
756 os.unlink(name)
756 os.unlink(name)
757 return True
757 return True
758 except (OSError, AttributeError):
758 except (OSError, AttributeError):
759 return False
759 return False
760
760
761 def linkfunc(path, fallback):
761 def linkfunc(path, fallback):
762 '''return an is_link() function with default to fallback'''
762 '''return an is_link() function with default to fallback'''
763 if checklink(path):
763 if checklink(path):
764 return lambda x: is_link(os.path.join(path, x))
764 return lambda x: is_link(os.path.join(path, x))
765 return fallback
765 return fallback
766
766
767 # Platform specific variants
767 # Platform specific variants
768 if os.name == 'nt':
768 if os.name == 'nt':
769 import msvcrt
769 import msvcrt
770 nulldev = 'NUL:'
770 nulldev = 'NUL:'
771
771
772 class winstdout:
772 class winstdout:
773 '''stdout on windows misbehaves if sent through a pipe'''
773 '''stdout on windows misbehaves if sent through a pipe'''
774
774
775 def __init__(self, fp):
775 def __init__(self, fp):
776 self.fp = fp
776 self.fp = fp
777
777
778 def __getattr__(self, key):
778 def __getattr__(self, key):
779 return getattr(self.fp, key)
779 return getattr(self.fp, key)
780
780
781 def close(self):
781 def close(self):
782 try:
782 try:
783 self.fp.close()
783 self.fp.close()
784 except: pass
784 except: pass
785
785
786 def write(self, s):
786 def write(self, s):
787 try:
787 try:
788 return self.fp.write(s)
788 return self.fp.write(s)
789 except IOError, inst:
789 except IOError, inst:
790 if inst.errno != 0: raise
790 if inst.errno != 0: raise
791 self.close()
791 self.close()
792 raise IOError(errno.EPIPE, 'Broken pipe')
792 raise IOError(errno.EPIPE, 'Broken pipe')
793
794 def flush(self):
795 try:
796 return self.fp.flush()
797 except IOError, inst:
798 if inst.errno != errno.EINVAL: raise
799 self.close()
800 raise IOError(errno.EPIPE, 'Broken pipe')
793
801
794 sys.stdout = winstdout(sys.stdout)
802 sys.stdout = winstdout(sys.stdout)
795
803
796 def system_rcpath():
804 def system_rcpath():
797 try:
805 try:
798 return system_rcpath_win32()
806 return system_rcpath_win32()
799 except:
807 except:
800 return [r'c:\mercurial\mercurial.ini']
808 return [r'c:\mercurial\mercurial.ini']
801
809
802 def user_rcpath():
810 def user_rcpath():
803 '''return os-specific hgrc search path to the user dir'''
811 '''return os-specific hgrc search path to the user dir'''
804 try:
812 try:
805 userrc = user_rcpath_win32()
813 userrc = user_rcpath_win32()
806 except:
814 except:
807 userrc = os.path.join(os.path.expanduser('~'), 'mercurial.ini')
815 userrc = os.path.join(os.path.expanduser('~'), 'mercurial.ini')
808 path = [userrc]
816 path = [userrc]
809 userprofile = os.environ.get('USERPROFILE')
817 userprofile = os.environ.get('USERPROFILE')
810 if userprofile:
818 if userprofile:
811 path.append(os.path.join(userprofile, 'mercurial.ini'))
819 path.append(os.path.join(userprofile, 'mercurial.ini'))
812 return path
820 return path
813
821
814 def parse_patch_output(output_line):
822 def parse_patch_output(output_line):
815 """parses the output produced by patch and returns the file name"""
823 """parses the output produced by patch and returns the file name"""
816 pf = output_line[14:]
824 pf = output_line[14:]
817 if pf[0] == '`':
825 if pf[0] == '`':
818 pf = pf[1:-1] # Remove the quotes
826 pf = pf[1:-1] # Remove the quotes
819 return pf
827 return pf
820
828
821 def testpid(pid):
829 def testpid(pid):
822 '''return False if pid dead, True if running or not known'''
830 '''return False if pid dead, True if running or not known'''
823 return True
831 return True
824
832
825 def set_exec(f, mode):
833 def set_exec(f, mode):
826 pass
834 pass
827
835
828 def set_link(f, mode):
836 def set_link(f, mode):
829 pass
837 pass
830
838
831 def set_binary(fd):
839 def set_binary(fd):
832 msvcrt.setmode(fd.fileno(), os.O_BINARY)
840 msvcrt.setmode(fd.fileno(), os.O_BINARY)
833
841
834 def pconvert(path):
842 def pconvert(path):
835 return path.replace("\\", "/")
843 return path.replace("\\", "/")
836
844
837 def localpath(path):
845 def localpath(path):
838 return path.replace('/', '\\')
846 return path.replace('/', '\\')
839
847
840 def normpath(path):
848 def normpath(path):
841 return pconvert(os.path.normpath(path))
849 return pconvert(os.path.normpath(path))
842
850
843 makelock = _makelock_file
851 makelock = _makelock_file
844 readlock = _readlock_file
852 readlock = _readlock_file
845
853
846 def samestat(s1, s2):
854 def samestat(s1, s2):
847 return False
855 return False
848
856
849 # A sequence of backslashes is special iff it precedes a double quote:
857 # A sequence of backslashes is special iff it precedes a double quote:
850 # - if there's an even number of backslashes, the double quote is not
858 # - if there's an even number of backslashes, the double quote is not
851 # quoted (i.e. it ends the quoted region)
859 # quoted (i.e. it ends the quoted region)
852 # - if there's an odd number of backslashes, the double quote is quoted
860 # - if there's an odd number of backslashes, the double quote is quoted
853 # - in both cases, every pair of backslashes is unquoted into a single
861 # - in both cases, every pair of backslashes is unquoted into a single
854 # backslash
862 # backslash
855 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
863 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
856 # So, to quote a string, we must surround it in double quotes, double
864 # So, to quote a string, we must surround it in double quotes, double
857 # the number of backslashes that preceed double quotes and add another
865 # the number of backslashes that preceed double quotes and add another
858 # backslash before every double quote (being careful with the double
866 # backslash before every double quote (being careful with the double
859 # quote we've appended to the end)
867 # quote we've appended to the end)
860 _quotere = None
868 _quotere = None
861 def shellquote(s):
869 def shellquote(s):
862 global _quotere
870 global _quotere
863 if _quotere is None:
871 if _quotere is None:
864 _quotere = re.compile(r'(\\*)("|\\$)')
872 _quotere = re.compile(r'(\\*)("|\\$)')
865 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
873 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
866
874
867 def explain_exit(code):
875 def explain_exit(code):
868 return _("exited with status %d") % code, code
876 return _("exited with status %d") % code, code
869
877
870 # if you change this stub into a real check, please try to implement the
878 # if you change this stub into a real check, please try to implement the
871 # username and groupname functions above, too.
879 # username and groupname functions above, too.
872 def isowner(fp, st=None):
880 def isowner(fp, st=None):
873 return True
881 return True
874
882
875 try:
883 try:
876 # override functions with win32 versions if possible
884 # override functions with win32 versions if possible
877 from util_win32 import *
885 from util_win32 import *
878 if not is_win_9x():
886 if not is_win_9x():
879 posixfile = posixfile_nt
887 posixfile = posixfile_nt
880 except ImportError:
888 except ImportError:
881 pass
889 pass
882
890
883 else:
891 else:
884 nulldev = '/dev/null'
892 nulldev = '/dev/null'
885 _umask = os.umask(0)
893 _umask = os.umask(0)
886 os.umask(_umask)
894 os.umask(_umask)
887
895
888 def rcfiles(path):
896 def rcfiles(path):
889 rcs = [os.path.join(path, 'hgrc')]
897 rcs = [os.path.join(path, 'hgrc')]
890 rcdir = os.path.join(path, 'hgrc.d')
898 rcdir = os.path.join(path, 'hgrc.d')
891 try:
899 try:
892 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
900 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
893 if f.endswith(".rc")])
901 if f.endswith(".rc")])
894 except OSError:
902 except OSError:
895 pass
903 pass
896 return rcs
904 return rcs
897
905
898 def system_rcpath():
906 def system_rcpath():
899 path = []
907 path = []
900 # old mod_python does not set sys.argv
908 # old mod_python does not set sys.argv
901 if len(getattr(sys, 'argv', [])) > 0:
909 if len(getattr(sys, 'argv', [])) > 0:
902 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
910 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
903 '/../etc/mercurial'))
911 '/../etc/mercurial'))
904 path.extend(rcfiles('/etc/mercurial'))
912 path.extend(rcfiles('/etc/mercurial'))
905 return path
913 return path
906
914
907 def user_rcpath():
915 def user_rcpath():
908 return [os.path.expanduser('~/.hgrc')]
916 return [os.path.expanduser('~/.hgrc')]
909
917
910 def parse_patch_output(output_line):
918 def parse_patch_output(output_line):
911 """parses the output produced by patch and returns the file name"""
919 """parses the output produced by patch and returns the file name"""
912 pf = output_line[14:]
920 pf = output_line[14:]
913 if pf.startswith("'") and pf.endswith("'") and " " in pf:
921 if pf.startswith("'") and pf.endswith("'") and " " in pf:
914 pf = pf[1:-1] # Remove the quotes
922 pf = pf[1:-1] # Remove the quotes
915 return pf
923 return pf
916
924
917 def is_exec(f):
925 def is_exec(f):
918 """check whether a file is executable"""
926 """check whether a file is executable"""
919 return (os.lstat(f).st_mode & 0100 != 0)
927 return (os.lstat(f).st_mode & 0100 != 0)
920
928
921 def set_exec(f, mode):
929 def set_exec(f, mode):
922 s = os.lstat(f).st_mode
930 s = os.lstat(f).st_mode
923 if (s & 0100 != 0) == mode:
931 if (s & 0100 != 0) == mode:
924 return
932 return
925 if mode:
933 if mode:
926 # Turn on +x for every +r bit when making a file executable
934 # Turn on +x for every +r bit when making a file executable
927 # and obey umask.
935 # and obey umask.
928 os.chmod(f, s | (s & 0444) >> 2 & ~_umask)
936 os.chmod(f, s | (s & 0444) >> 2 & ~_umask)
929 else:
937 else:
930 os.chmod(f, s & 0666)
938 os.chmod(f, s & 0666)
931
939
932 def is_link(f):
940 def is_link(f):
933 """check whether a file is a symlink"""
941 """check whether a file is a symlink"""
934 return (os.lstat(f).st_mode & 0120000 == 0120000)
942 return (os.lstat(f).st_mode & 0120000 == 0120000)
935
943
936 def set_link(f, mode):
944 def set_link(f, mode):
937 """make a file a symbolic link/regular file
945 """make a file a symbolic link/regular file
938
946
939 if a file is changed to a link, its contents become the link data
947 if a file is changed to a link, its contents become the link data
940 if a link is changed to a file, its link data become its contents
948 if a link is changed to a file, its link data become its contents
941 """
949 """
942
950
943 m = is_link(f)
951 m = is_link(f)
944 if m == bool(mode):
952 if m == bool(mode):
945 return
953 return
946
954
947 if mode: # switch file to link
955 if mode: # switch file to link
948 data = file(f).read()
956 data = file(f).read()
949 os.unlink(f)
957 os.unlink(f)
950 os.symlink(data, f)
958 os.symlink(data, f)
951 else:
959 else:
952 data = os.readlink(f)
960 data = os.readlink(f)
953 os.unlink(f)
961 os.unlink(f)
954 file(f, "w").write(data)
962 file(f, "w").write(data)
955
963
956 def set_binary(fd):
964 def set_binary(fd):
957 pass
965 pass
958
966
959 def pconvert(path):
967 def pconvert(path):
960 return path
968 return path
961
969
962 def localpath(path):
970 def localpath(path):
963 return path
971 return path
964
972
965 normpath = os.path.normpath
973 normpath = os.path.normpath
966 samestat = os.path.samestat
974 samestat = os.path.samestat
967
975
968 def makelock(info, pathname):
976 def makelock(info, pathname):
969 try:
977 try:
970 os.symlink(info, pathname)
978 os.symlink(info, pathname)
971 except OSError, why:
979 except OSError, why:
972 if why.errno == errno.EEXIST:
980 if why.errno == errno.EEXIST:
973 raise
981 raise
974 else:
982 else:
975 _makelock_file(info, pathname)
983 _makelock_file(info, pathname)
976
984
977 def readlock(pathname):
985 def readlock(pathname):
978 try:
986 try:
979 return os.readlink(pathname)
987 return os.readlink(pathname)
980 except OSError, why:
988 except OSError, why:
981 if why.errno == errno.EINVAL:
989 if why.errno == errno.EINVAL:
982 return _readlock_file(pathname)
990 return _readlock_file(pathname)
983 else:
991 else:
984 raise
992 raise
985
993
986 def shellquote(s):
994 def shellquote(s):
987 return "'%s'" % s.replace("'", "'\\''")
995 return "'%s'" % s.replace("'", "'\\''")
988
996
989 def testpid(pid):
997 def testpid(pid):
990 '''return False if pid dead, True if running or not sure'''
998 '''return False if pid dead, True if running or not sure'''
991 try:
999 try:
992 os.kill(pid, 0)
1000 os.kill(pid, 0)
993 return True
1001 return True
994 except OSError, inst:
1002 except OSError, inst:
995 return inst.errno != errno.ESRCH
1003 return inst.errno != errno.ESRCH
996
1004
997 def explain_exit(code):
1005 def explain_exit(code):
998 """return a 2-tuple (desc, code) describing a process's status"""
1006 """return a 2-tuple (desc, code) describing a process's status"""
999 if os.WIFEXITED(code):
1007 if os.WIFEXITED(code):
1000 val = os.WEXITSTATUS(code)
1008 val = os.WEXITSTATUS(code)
1001 return _("exited with status %d") % val, val
1009 return _("exited with status %d") % val, val
1002 elif os.WIFSIGNALED(code):
1010 elif os.WIFSIGNALED(code):
1003 val = os.WTERMSIG(code)
1011 val = os.WTERMSIG(code)
1004 return _("killed by signal %d") % val, val
1012 return _("killed by signal %d") % val, val
1005 elif os.WIFSTOPPED(code):
1013 elif os.WIFSTOPPED(code):
1006 val = os.WSTOPSIG(code)
1014 val = os.WSTOPSIG(code)
1007 return _("stopped by signal %d") % val, val
1015 return _("stopped by signal %d") % val, val
1008 raise ValueError(_("invalid exit code"))
1016 raise ValueError(_("invalid exit code"))
1009
1017
1010 def isowner(fp, st=None):
1018 def isowner(fp, st=None):
1011 """Return True if the file object f belongs to the current user.
1019 """Return True if the file object f belongs to the current user.
1012
1020
1013 The return value of a util.fstat(f) may be passed as the st argument.
1021 The return value of a util.fstat(f) may be passed as the st argument.
1014 """
1022 """
1015 if st is None:
1023 if st is None:
1016 st = fstat(fp)
1024 st = fstat(fp)
1017 return st.st_uid == os.getuid()
1025 return st.st_uid == os.getuid()
1018
1026
1019 def _buildencodefun():
1027 def _buildencodefun():
1020 e = '_'
1028 e = '_'
1021 win_reserved = [ord(x) for x in '\\:*?"<>|']
1029 win_reserved = [ord(x) for x in '\\:*?"<>|']
1022 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
1030 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
1023 for x in (range(32) + range(126, 256) + win_reserved):
1031 for x in (range(32) + range(126, 256) + win_reserved):
1024 cmap[chr(x)] = "~%02x" % x
1032 cmap[chr(x)] = "~%02x" % x
1025 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
1033 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
1026 cmap[chr(x)] = e + chr(x).lower()
1034 cmap[chr(x)] = e + chr(x).lower()
1027 dmap = {}
1035 dmap = {}
1028 for k, v in cmap.iteritems():
1036 for k, v in cmap.iteritems():
1029 dmap[v] = k
1037 dmap[v] = k
1030 def decode(s):
1038 def decode(s):
1031 i = 0
1039 i = 0
1032 while i < len(s):
1040 while i < len(s):
1033 for l in xrange(1, 4):
1041 for l in xrange(1, 4):
1034 try:
1042 try:
1035 yield dmap[s[i:i+l]]
1043 yield dmap[s[i:i+l]]
1036 i += l
1044 i += l
1037 break
1045 break
1038 except KeyError:
1046 except KeyError:
1039 pass
1047 pass
1040 else:
1048 else:
1041 raise KeyError
1049 raise KeyError
1042 return (lambda s: "".join([cmap[c] for c in s]),
1050 return (lambda s: "".join([cmap[c] for c in s]),
1043 lambda s: "".join(list(decode(s))))
1051 lambda s: "".join(list(decode(s))))
1044
1052
1045 encodefilename, decodefilename = _buildencodefun()
1053 encodefilename, decodefilename = _buildencodefun()
1046
1054
1047 def encodedopener(openerfn, fn):
1055 def encodedopener(openerfn, fn):
1048 def o(path, *args, **kw):
1056 def o(path, *args, **kw):
1049 return openerfn(fn(path), *args, **kw)
1057 return openerfn(fn(path), *args, **kw)
1050 return o
1058 return o
1051
1059
1052 def opener(base, audit=True):
1060 def opener(base, audit=True):
1053 """
1061 """
1054 return a function that opens files relative to base
1062 return a function that opens files relative to base
1055
1063
1056 this function is used to hide the details of COW semantics and
1064 this function is used to hide the details of COW semantics and
1057 remote file access from higher level code.
1065 remote file access from higher level code.
1058 """
1066 """
1059 p = base
1067 p = base
1060 audit_p = audit
1068 audit_p = audit
1061
1069
1062 def mktempcopy(name):
1070 def mktempcopy(name):
1063 d, fn = os.path.split(name)
1071 d, fn = os.path.split(name)
1064 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1072 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1065 os.close(fd)
1073 os.close(fd)
1066 ofp = posixfile(temp, "wb")
1074 ofp = posixfile(temp, "wb")
1067 try:
1075 try:
1068 try:
1076 try:
1069 ifp = posixfile(name, "rb")
1077 ifp = posixfile(name, "rb")
1070 except IOError, inst:
1078 except IOError, inst:
1071 if not getattr(inst, 'filename', None):
1079 if not getattr(inst, 'filename', None):
1072 inst.filename = name
1080 inst.filename = name
1073 raise
1081 raise
1074 for chunk in filechunkiter(ifp):
1082 for chunk in filechunkiter(ifp):
1075 ofp.write(chunk)
1083 ofp.write(chunk)
1076 ifp.close()
1084 ifp.close()
1077 ofp.close()
1085 ofp.close()
1078 except:
1086 except:
1079 try: os.unlink(temp)
1087 try: os.unlink(temp)
1080 except: pass
1088 except: pass
1081 raise
1089 raise
1082 st = os.lstat(name)
1090 st = os.lstat(name)
1083 os.chmod(temp, st.st_mode)
1091 os.chmod(temp, st.st_mode)
1084 return temp
1092 return temp
1085
1093
1086 class atomictempfile(posixfile):
1094 class atomictempfile(posixfile):
1087 """the file will only be copied when rename is called"""
1095 """the file will only be copied when rename is called"""
1088 def __init__(self, name, mode):
1096 def __init__(self, name, mode):
1089 self.__name = name
1097 self.__name = name
1090 self.temp = mktempcopy(name)
1098 self.temp = mktempcopy(name)
1091 posixfile.__init__(self, self.temp, mode)
1099 posixfile.__init__(self, self.temp, mode)
1092 def rename(self):
1100 def rename(self):
1093 if not self.closed:
1101 if not self.closed:
1094 posixfile.close(self)
1102 posixfile.close(self)
1095 rename(self.temp, localpath(self.__name))
1103 rename(self.temp, localpath(self.__name))
1096 def __del__(self):
1104 def __del__(self):
1097 if not self.closed:
1105 if not self.closed:
1098 try:
1106 try:
1099 os.unlink(self.temp)
1107 os.unlink(self.temp)
1100 except: pass
1108 except: pass
1101 posixfile.close(self)
1109 posixfile.close(self)
1102
1110
1103 class atomicfile(atomictempfile):
1111 class atomicfile(atomictempfile):
1104 """the file will only be copied on close"""
1112 """the file will only be copied on close"""
1105 def __init__(self, name, mode):
1113 def __init__(self, name, mode):
1106 atomictempfile.__init__(self, name, mode)
1114 atomictempfile.__init__(self, name, mode)
1107 def close(self):
1115 def close(self):
1108 self.rename()
1116 self.rename()
1109 def __del__(self):
1117 def __del__(self):
1110 self.rename()
1118 self.rename()
1111
1119
1112 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
1120 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
1113 if audit_p:
1121 if audit_p:
1114 audit_path(path)
1122 audit_path(path)
1115 f = os.path.join(p, path)
1123 f = os.path.join(p, path)
1116
1124
1117 if not text:
1125 if not text:
1118 mode += "b" # for that other OS
1126 mode += "b" # for that other OS
1119
1127
1120 if mode[0] != "r":
1128 if mode[0] != "r":
1121 try:
1129 try:
1122 nlink = nlinks(f)
1130 nlink = nlinks(f)
1123 except OSError:
1131 except OSError:
1124 d = os.path.dirname(f)
1132 d = os.path.dirname(f)
1125 if not os.path.isdir(d):
1133 if not os.path.isdir(d):
1126 os.makedirs(d)
1134 os.makedirs(d)
1127 else:
1135 else:
1128 if atomic:
1136 if atomic:
1129 return atomicfile(f, mode)
1137 return atomicfile(f, mode)
1130 elif atomictemp:
1138 elif atomictemp:
1131 return atomictempfile(f, mode)
1139 return atomictempfile(f, mode)
1132 if nlink > 1:
1140 if nlink > 1:
1133 rename(mktempcopy(f), f)
1141 rename(mktempcopy(f), f)
1134 return posixfile(f, mode)
1142 return posixfile(f, mode)
1135
1143
1136 return o
1144 return o
1137
1145
1138 class chunkbuffer(object):
1146 class chunkbuffer(object):
1139 """Allow arbitrary sized chunks of data to be efficiently read from an
1147 """Allow arbitrary sized chunks of data to be efficiently read from an
1140 iterator over chunks of arbitrary size."""
1148 iterator over chunks of arbitrary size."""
1141
1149
1142 def __init__(self, in_iter, targetsize = 2**16):
1150 def __init__(self, in_iter, targetsize = 2**16):
1143 """in_iter is the iterator that's iterating over the input chunks.
1151 """in_iter is the iterator that's iterating over the input chunks.
1144 targetsize is how big a buffer to try to maintain."""
1152 targetsize is how big a buffer to try to maintain."""
1145 self.in_iter = iter(in_iter)
1153 self.in_iter = iter(in_iter)
1146 self.buf = ''
1154 self.buf = ''
1147 self.targetsize = int(targetsize)
1155 self.targetsize = int(targetsize)
1148 if self.targetsize <= 0:
1156 if self.targetsize <= 0:
1149 raise ValueError(_("targetsize must be greater than 0, was %d") %
1157 raise ValueError(_("targetsize must be greater than 0, was %d") %
1150 targetsize)
1158 targetsize)
1151 self.iterempty = False
1159 self.iterempty = False
1152
1160
1153 def fillbuf(self):
1161 def fillbuf(self):
1154 """Ignore target size; read every chunk from iterator until empty."""
1162 """Ignore target size; read every chunk from iterator until empty."""
1155 if not self.iterempty:
1163 if not self.iterempty:
1156 collector = cStringIO.StringIO()
1164 collector = cStringIO.StringIO()
1157 collector.write(self.buf)
1165 collector.write(self.buf)
1158 for ch in self.in_iter:
1166 for ch in self.in_iter:
1159 collector.write(ch)
1167 collector.write(ch)
1160 self.buf = collector.getvalue()
1168 self.buf = collector.getvalue()
1161 self.iterempty = True
1169 self.iterempty = True
1162
1170
1163 def read(self, l):
1171 def read(self, l):
1164 """Read L bytes of data from the iterator of chunks of data.
1172 """Read L bytes of data from the iterator of chunks of data.
1165 Returns less than L bytes if the iterator runs dry."""
1173 Returns less than L bytes if the iterator runs dry."""
1166 if l > len(self.buf) and not self.iterempty:
1174 if l > len(self.buf) and not self.iterempty:
1167 # Clamp to a multiple of self.targetsize
1175 # Clamp to a multiple of self.targetsize
1168 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1176 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1169 collector = cStringIO.StringIO()
1177 collector = cStringIO.StringIO()
1170 collector.write(self.buf)
1178 collector.write(self.buf)
1171 collected = len(self.buf)
1179 collected = len(self.buf)
1172 for chunk in self.in_iter:
1180 for chunk in self.in_iter:
1173 collector.write(chunk)
1181 collector.write(chunk)
1174 collected += len(chunk)
1182 collected += len(chunk)
1175 if collected >= targetsize:
1183 if collected >= targetsize:
1176 break
1184 break
1177 if collected < targetsize:
1185 if collected < targetsize:
1178 self.iterempty = True
1186 self.iterempty = True
1179 self.buf = collector.getvalue()
1187 self.buf = collector.getvalue()
1180 s, self.buf = self.buf[:l], buffer(self.buf, l)
1188 s, self.buf = self.buf[:l], buffer(self.buf, l)
1181 return s
1189 return s
1182
1190
1183 def filechunkiter(f, size=65536, limit=None):
1191 def filechunkiter(f, size=65536, limit=None):
1184 """Create a generator that produces the data in the file size
1192 """Create a generator that produces the data in the file size
1185 (default 65536) bytes at a time, up to optional limit (default is
1193 (default 65536) bytes at a time, up to optional limit (default is
1186 to read all data). Chunks may be less than size bytes if the
1194 to read all data). Chunks may be less than size bytes if the
1187 chunk is the last chunk in the file, or the file is a socket or
1195 chunk is the last chunk in the file, or the file is a socket or
1188 some other type of file that sometimes reads less data than is
1196 some other type of file that sometimes reads less data than is
1189 requested."""
1197 requested."""
1190 assert size >= 0
1198 assert size >= 0
1191 assert limit is None or limit >= 0
1199 assert limit is None or limit >= 0
1192 while True:
1200 while True:
1193 if limit is None: nbytes = size
1201 if limit is None: nbytes = size
1194 else: nbytes = min(limit, size)
1202 else: nbytes = min(limit, size)
1195 s = nbytes and f.read(nbytes)
1203 s = nbytes and f.read(nbytes)
1196 if not s: break
1204 if not s: break
1197 if limit: limit -= len(s)
1205 if limit: limit -= len(s)
1198 yield s
1206 yield s
1199
1207
1200 def makedate():
1208 def makedate():
1201 lt = time.localtime()
1209 lt = time.localtime()
1202 if lt[8] == 1 and time.daylight:
1210 if lt[8] == 1 and time.daylight:
1203 tz = time.altzone
1211 tz = time.altzone
1204 else:
1212 else:
1205 tz = time.timezone
1213 tz = time.timezone
1206 return time.mktime(lt), tz
1214 return time.mktime(lt), tz
1207
1215
1208 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1216 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1209 """represent a (unixtime, offset) tuple as a localized time.
1217 """represent a (unixtime, offset) tuple as a localized time.
1210 unixtime is seconds since the epoch, and offset is the time zone's
1218 unixtime is seconds since the epoch, and offset is the time zone's
1211 number of seconds away from UTC. if timezone is false, do not
1219 number of seconds away from UTC. if timezone is false, do not
1212 append time zone to string."""
1220 append time zone to string."""
1213 t, tz = date or makedate()
1221 t, tz = date or makedate()
1214 s = time.strftime(format, time.gmtime(float(t) - tz))
1222 s = time.strftime(format, time.gmtime(float(t) - tz))
1215 if timezone:
1223 if timezone:
1216 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1224 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1217 return s
1225 return s
1218
1226
1219 def strdate(string, format, defaults):
1227 def strdate(string, format, defaults):
1220 """parse a localized time string and return a (unixtime, offset) tuple.
1228 """parse a localized time string and return a (unixtime, offset) tuple.
1221 if the string cannot be parsed, ValueError is raised."""
1229 if the string cannot be parsed, ValueError is raised."""
1222 def timezone(string):
1230 def timezone(string):
1223 tz = string.split()[-1]
1231 tz = string.split()[-1]
1224 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1232 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1225 tz = int(tz)
1233 tz = int(tz)
1226 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1234 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1227 return offset
1235 return offset
1228 if tz == "GMT" or tz == "UTC":
1236 if tz == "GMT" or tz == "UTC":
1229 return 0
1237 return 0
1230 return None
1238 return None
1231
1239
1232 # NOTE: unixtime = localunixtime + offset
1240 # NOTE: unixtime = localunixtime + offset
1233 offset, date = timezone(string), string
1241 offset, date = timezone(string), string
1234 if offset != None:
1242 if offset != None:
1235 date = " ".join(string.split()[:-1])
1243 date = " ".join(string.split()[:-1])
1236
1244
1237 # add missing elements from defaults
1245 # add missing elements from defaults
1238 for part in defaults:
1246 for part in defaults:
1239 found = [True for p in part if ("%"+p) in format]
1247 found = [True for p in part if ("%"+p) in format]
1240 if not found:
1248 if not found:
1241 date += "@" + defaults[part]
1249 date += "@" + defaults[part]
1242 format += "@%" + part[0]
1250 format += "@%" + part[0]
1243
1251
1244 timetuple = time.strptime(date, format)
1252 timetuple = time.strptime(date, format)
1245 localunixtime = int(calendar.timegm(timetuple))
1253 localunixtime = int(calendar.timegm(timetuple))
1246 if offset is None:
1254 if offset is None:
1247 # local timezone
1255 # local timezone
1248 unixtime = int(time.mktime(timetuple))
1256 unixtime = int(time.mktime(timetuple))
1249 offset = unixtime - localunixtime
1257 offset = unixtime - localunixtime
1250 else:
1258 else:
1251 unixtime = localunixtime + offset
1259 unixtime = localunixtime + offset
1252 return unixtime, offset
1260 return unixtime, offset
1253
1261
1254 def parsedate(string, formats=None, defaults=None):
1262 def parsedate(string, formats=None, defaults=None):
1255 """parse a localized time string and return a (unixtime, offset) tuple.
1263 """parse a localized time string and return a (unixtime, offset) tuple.
1256 The date may be a "unixtime offset" string or in one of the specified
1264 The date may be a "unixtime offset" string or in one of the specified
1257 formats."""
1265 formats."""
1258 if not string:
1266 if not string:
1259 return 0, 0
1267 return 0, 0
1260 if not formats:
1268 if not formats:
1261 formats = defaultdateformats
1269 formats = defaultdateformats
1262 string = string.strip()
1270 string = string.strip()
1263 try:
1271 try:
1264 when, offset = map(int, string.split(' '))
1272 when, offset = map(int, string.split(' '))
1265 except ValueError:
1273 except ValueError:
1266 # fill out defaults
1274 # fill out defaults
1267 if not defaults:
1275 if not defaults:
1268 defaults = {}
1276 defaults = {}
1269 now = makedate()
1277 now = makedate()
1270 for part in "d mb yY HI M S".split():
1278 for part in "d mb yY HI M S".split():
1271 if part not in defaults:
1279 if part not in defaults:
1272 if part[0] in "HMS":
1280 if part[0] in "HMS":
1273 defaults[part] = "00"
1281 defaults[part] = "00"
1274 elif part[0] in "dm":
1282 elif part[0] in "dm":
1275 defaults[part] = "1"
1283 defaults[part] = "1"
1276 else:
1284 else:
1277 defaults[part] = datestr(now, "%" + part[0], False)
1285 defaults[part] = datestr(now, "%" + part[0], False)
1278
1286
1279 for format in formats:
1287 for format in formats:
1280 try:
1288 try:
1281 when, offset = strdate(string, format, defaults)
1289 when, offset = strdate(string, format, defaults)
1282 except ValueError:
1290 except ValueError:
1283 pass
1291 pass
1284 else:
1292 else:
1285 break
1293 break
1286 else:
1294 else:
1287 raise Abort(_('invalid date: %r ') % string)
1295 raise Abort(_('invalid date: %r ') % string)
1288 # validate explicit (probably user-specified) date and
1296 # validate explicit (probably user-specified) date and
1289 # time zone offset. values must fit in signed 32 bits for
1297 # time zone offset. values must fit in signed 32 bits for
1290 # current 32-bit linux runtimes. timezones go from UTC-12
1298 # current 32-bit linux runtimes. timezones go from UTC-12
1291 # to UTC+14
1299 # to UTC+14
1292 if abs(when) > 0x7fffffff:
1300 if abs(when) > 0x7fffffff:
1293 raise Abort(_('date exceeds 32 bits: %d') % when)
1301 raise Abort(_('date exceeds 32 bits: %d') % when)
1294 if offset < -50400 or offset > 43200:
1302 if offset < -50400 or offset > 43200:
1295 raise Abort(_('impossible time zone offset: %d') % offset)
1303 raise Abort(_('impossible time zone offset: %d') % offset)
1296 return when, offset
1304 return when, offset
1297
1305
1298 def matchdate(date):
1306 def matchdate(date):
1299 """Return a function that matches a given date match specifier
1307 """Return a function that matches a given date match specifier
1300
1308
1301 Formats include:
1309 Formats include:
1302
1310
1303 '{date}' match a given date to the accuracy provided
1311 '{date}' match a given date to the accuracy provided
1304
1312
1305 '<{date}' on or before a given date
1313 '<{date}' on or before a given date
1306
1314
1307 '>{date}' on or after a given date
1315 '>{date}' on or after a given date
1308
1316
1309 """
1317 """
1310
1318
1311 def lower(date):
1319 def lower(date):
1312 return parsedate(date, extendeddateformats)[0]
1320 return parsedate(date, extendeddateformats)[0]
1313
1321
1314 def upper(date):
1322 def upper(date):
1315 d = dict(mb="12", HI="23", M="59", S="59")
1323 d = dict(mb="12", HI="23", M="59", S="59")
1316 for days in "31 30 29".split():
1324 for days in "31 30 29".split():
1317 try:
1325 try:
1318 d["d"] = days
1326 d["d"] = days
1319 return parsedate(date, extendeddateformats, d)[0]
1327 return parsedate(date, extendeddateformats, d)[0]
1320 except:
1328 except:
1321 pass
1329 pass
1322 d["d"] = "28"
1330 d["d"] = "28"
1323 return parsedate(date, extendeddateformats, d)[0]
1331 return parsedate(date, extendeddateformats, d)[0]
1324
1332
1325 if date[0] == "<":
1333 if date[0] == "<":
1326 when = upper(date[1:])
1334 when = upper(date[1:])
1327 return lambda x: x <= when
1335 return lambda x: x <= when
1328 elif date[0] == ">":
1336 elif date[0] == ">":
1329 when = lower(date[1:])
1337 when = lower(date[1:])
1330 return lambda x: x >= when
1338 return lambda x: x >= when
1331 elif date[0] == "-":
1339 elif date[0] == "-":
1332 try:
1340 try:
1333 days = int(date[1:])
1341 days = int(date[1:])
1334 except ValueError:
1342 except ValueError:
1335 raise Abort(_("invalid day spec: %s") % date[1:])
1343 raise Abort(_("invalid day spec: %s") % date[1:])
1336 when = makedate()[0] - days * 3600 * 24
1344 when = makedate()[0] - days * 3600 * 24
1337 return lambda x: x >= when
1345 return lambda x: x >= when
1338 elif " to " in date:
1346 elif " to " in date:
1339 a, b = date.split(" to ")
1347 a, b = date.split(" to ")
1340 start, stop = lower(a), upper(b)
1348 start, stop = lower(a), upper(b)
1341 return lambda x: x >= start and x <= stop
1349 return lambda x: x >= start and x <= stop
1342 else:
1350 else:
1343 start, stop = lower(date), upper(date)
1351 start, stop = lower(date), upper(date)
1344 return lambda x: x >= start and x <= stop
1352 return lambda x: x >= start and x <= stop
1345
1353
1346 def shortuser(user):
1354 def shortuser(user):
1347 """Return a short representation of a user name or email address."""
1355 """Return a short representation of a user name or email address."""
1348 f = user.find('@')
1356 f = user.find('@')
1349 if f >= 0:
1357 if f >= 0:
1350 user = user[:f]
1358 user = user[:f]
1351 f = user.find('<')
1359 f = user.find('<')
1352 if f >= 0:
1360 if f >= 0:
1353 user = user[f+1:]
1361 user = user[f+1:]
1354 f = user.find(' ')
1362 f = user.find(' ')
1355 if f >= 0:
1363 if f >= 0:
1356 user = user[:f]
1364 user = user[:f]
1357 f = user.find('.')
1365 f = user.find('.')
1358 if f >= 0:
1366 if f >= 0:
1359 user = user[:f]
1367 user = user[:f]
1360 return user
1368 return user
1361
1369
1362 def ellipsis(text, maxlength=400):
1370 def ellipsis(text, maxlength=400):
1363 """Trim string to at most maxlength (default: 400) characters."""
1371 """Trim string to at most maxlength (default: 400) characters."""
1364 if len(text) <= maxlength:
1372 if len(text) <= maxlength:
1365 return text
1373 return text
1366 else:
1374 else:
1367 return "%s..." % (text[:maxlength-3])
1375 return "%s..." % (text[:maxlength-3])
1368
1376
1369 def walkrepos(path):
1377 def walkrepos(path):
1370 '''yield every hg repository under path, recursively.'''
1378 '''yield every hg repository under path, recursively.'''
1371 def errhandler(err):
1379 def errhandler(err):
1372 if err.filename == path:
1380 if err.filename == path:
1373 raise err
1381 raise err
1374
1382
1375 for root, dirs, files in os.walk(path, onerror=errhandler):
1383 for root, dirs, files in os.walk(path, onerror=errhandler):
1376 for d in dirs:
1384 for d in dirs:
1377 if d == '.hg':
1385 if d == '.hg':
1378 yield root
1386 yield root
1379 dirs[:] = []
1387 dirs[:] = []
1380 break
1388 break
1381
1389
1382 _rcpath = None
1390 _rcpath = None
1383
1391
1384 def os_rcpath():
1392 def os_rcpath():
1385 '''return default os-specific hgrc search path'''
1393 '''return default os-specific hgrc search path'''
1386 path = system_rcpath()
1394 path = system_rcpath()
1387 path.extend(user_rcpath())
1395 path.extend(user_rcpath())
1388 path = [os.path.normpath(f) for f in path]
1396 path = [os.path.normpath(f) for f in path]
1389 return path
1397 return path
1390
1398
1391 def rcpath():
1399 def rcpath():
1392 '''return hgrc search path. if env var HGRCPATH is set, use it.
1400 '''return hgrc search path. if env var HGRCPATH is set, use it.
1393 for each item in path, if directory, use files ending in .rc,
1401 for each item in path, if directory, use files ending in .rc,
1394 else use item.
1402 else use item.
1395 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1403 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1396 if no HGRCPATH, use default os-specific path.'''
1404 if no HGRCPATH, use default os-specific path.'''
1397 global _rcpath
1405 global _rcpath
1398 if _rcpath is None:
1406 if _rcpath is None:
1399 if 'HGRCPATH' in os.environ:
1407 if 'HGRCPATH' in os.environ:
1400 _rcpath = []
1408 _rcpath = []
1401 for p in os.environ['HGRCPATH'].split(os.pathsep):
1409 for p in os.environ['HGRCPATH'].split(os.pathsep):
1402 if not p: continue
1410 if not p: continue
1403 if os.path.isdir(p):
1411 if os.path.isdir(p):
1404 for f in os.listdir(p):
1412 for f in os.listdir(p):
1405 if f.endswith('.rc'):
1413 if f.endswith('.rc'):
1406 _rcpath.append(os.path.join(p, f))
1414 _rcpath.append(os.path.join(p, f))
1407 else:
1415 else:
1408 _rcpath.append(p)
1416 _rcpath.append(p)
1409 else:
1417 else:
1410 _rcpath = os_rcpath()
1418 _rcpath = os_rcpath()
1411 return _rcpath
1419 return _rcpath
1412
1420
1413 def bytecount(nbytes):
1421 def bytecount(nbytes):
1414 '''return byte count formatted as readable string, with units'''
1422 '''return byte count formatted as readable string, with units'''
1415
1423
1416 units = (
1424 units = (
1417 (100, 1<<30, _('%.0f GB')),
1425 (100, 1<<30, _('%.0f GB')),
1418 (10, 1<<30, _('%.1f GB')),
1426 (10, 1<<30, _('%.1f GB')),
1419 (1, 1<<30, _('%.2f GB')),
1427 (1, 1<<30, _('%.2f GB')),
1420 (100, 1<<20, _('%.0f MB')),
1428 (100, 1<<20, _('%.0f MB')),
1421 (10, 1<<20, _('%.1f MB')),
1429 (10, 1<<20, _('%.1f MB')),
1422 (1, 1<<20, _('%.2f MB')),
1430 (1, 1<<20, _('%.2f MB')),
1423 (100, 1<<10, _('%.0f KB')),
1431 (100, 1<<10, _('%.0f KB')),
1424 (10, 1<<10, _('%.1f KB')),
1432 (10, 1<<10, _('%.1f KB')),
1425 (1, 1<<10, _('%.2f KB')),
1433 (1, 1<<10, _('%.2f KB')),
1426 (1, 1, _('%.0f bytes')),
1434 (1, 1, _('%.0f bytes')),
1427 )
1435 )
1428
1436
1429 for multiplier, divisor, format in units:
1437 for multiplier, divisor, format in units:
1430 if nbytes >= divisor * multiplier:
1438 if nbytes >= divisor * multiplier:
1431 return format % (nbytes / float(divisor))
1439 return format % (nbytes / float(divisor))
1432 return units[-1][2] % nbytes
1440 return units[-1][2] % nbytes
1433
1441
1434 def drop_scheme(scheme, path):
1442 def drop_scheme(scheme, path):
1435 sc = scheme + ':'
1443 sc = scheme + ':'
1436 if path.startswith(sc):
1444 if path.startswith(sc):
1437 path = path[len(sc):]
1445 path = path[len(sc):]
1438 if path.startswith('//'):
1446 if path.startswith('//'):
1439 path = path[2:]
1447 path = path[2:]
1440 return path
1448 return path
@@ -1,34 +1,36 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init test
3 hg init test
4 cd test
4 cd test
5 echo foo>foo
5 echo foo>foo
6 hg commit -A -d '0 0' -m 1
6 hg commit -A -d '0 0' -m 1
7 hg --config server.uncompressed=True serve -p 20059 -d --pid-file=../hg1.pid
7 hg --config server.uncompressed=True serve -p 20059 -d --pid-file=../hg1.pid
8 hg serve -p 20060 -d --pid-file=../hg2.pid
8 hg serve -p 20060 -d --pid-file=../hg2.pid
9 # Test server address cannot be reused
10 hg serve -p 20060 2>&1 | sed -e 's/abort: cannot start server:.*/abort: cannot start server:/'
9 cd ..
11 cd ..
10 cat hg1.pid hg2.pid >> $DAEMON_PIDS
12 cat hg1.pid hg2.pid >> $DAEMON_PIDS
11
13
12 echo % clone via stream
14 echo % clone via stream
13 http_proxy= hg clone --uncompressed http://localhost:20059/ copy 2>&1 | \
15 http_proxy= hg clone --uncompressed http://localhost:20059/ copy 2>&1 | \
14 sed -e 's/[0-9][0-9.]*/XXX/g' -e 's/[KM]\(B\/sec\)/X\1/'
16 sed -e 's/[0-9][0-9.]*/XXX/g' -e 's/[KM]\(B\/sec\)/X\1/'
15 hg verify -R copy
17 hg verify -R copy
16
18
17 echo % try to clone via stream, should use pull instead
19 echo % try to clone via stream, should use pull instead
18 http_proxy= hg clone --uncompressed http://localhost:20060/ copy2
20 http_proxy= hg clone --uncompressed http://localhost:20060/ copy2
19
21
20 echo % clone via pull
22 echo % clone via pull
21 http_proxy= hg clone http://localhost:20059/ copy-pull
23 http_proxy= hg clone http://localhost:20059/ copy-pull
22 hg verify -R copy-pull
24 hg verify -R copy-pull
23
25
24 cd test
26 cd test
25 echo bar > bar
27 echo bar > bar
26 hg commit -A -d '1 0' -m 2
28 hg commit -A -d '1 0' -m 2
27 cd ..
29 cd ..
28
30
29 echo % pull
31 echo % pull
30 cd copy-pull
32 cd copy-pull
31 echo '[hooks]' >> .hg/hgrc
33 echo '[hooks]' >> .hg/hgrc
32 echo 'changegroup = echo changegroup: u=$HG_URL' >> .hg/hgrc
34 echo 'changegroup = echo changegroup: u=$HG_URL' >> .hg/hgrc
33 hg pull
35 hg pull
34 cd ..
36 cd ..
@@ -1,40 +1,41 b''
1 adding foo
1 adding foo
2 abort: cannot start server:
2 % clone via stream
3 % clone via stream
3 streaming all changes
4 streaming all changes
4 XXX files to transfer, XXX bytes of data
5 XXX files to transfer, XXX bytes of data
5 transferred XXX bytes in XXX seconds (XXX XB/sec)
6 transferred XXX bytes in XXX seconds (XXX XB/sec)
6 XXX files updated, XXX files merged, XXX files removed, XXX files unresolved
7 XXX files updated, XXX files merged, XXX files removed, XXX files unresolved
7 checking changesets
8 checking changesets
8 checking manifests
9 checking manifests
9 crosschecking files in changesets and manifests
10 crosschecking files in changesets and manifests
10 checking files
11 checking files
11 1 files, 1 changesets, 1 total revisions
12 1 files, 1 changesets, 1 total revisions
12 % try to clone via stream, should use pull instead
13 % try to clone via stream, should use pull instead
13 requesting all changes
14 requesting all changes
14 adding changesets
15 adding changesets
15 adding manifests
16 adding manifests
16 adding file changes
17 adding file changes
17 added 1 changesets with 1 changes to 1 files
18 added 1 changesets with 1 changes to 1 files
18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 % clone via pull
20 % clone via pull
20 requesting all changes
21 requesting all changes
21 adding changesets
22 adding changesets
22 adding manifests
23 adding manifests
23 adding file changes
24 adding file changes
24 added 1 changesets with 1 changes to 1 files
25 added 1 changesets with 1 changes to 1 files
25 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
26 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
26 checking changesets
27 checking changesets
27 checking manifests
28 checking manifests
28 crosschecking files in changesets and manifests
29 crosschecking files in changesets and manifests
29 checking files
30 checking files
30 1 files, 1 changesets, 1 total revisions
31 1 files, 1 changesets, 1 total revisions
31 adding bar
32 adding bar
32 % pull
33 % pull
33 changegroup: u=http://localhost:20059/
34 changegroup: u=http://localhost:20059/
34 pulling from http://localhost:20059/
35 pulling from http://localhost:20059/
35 searching for changes
36 searching for changes
36 adding changesets
37 adding changesets
37 adding manifests
38 adding manifests
38 adding file changes
39 adding file changes
39 added 1 changesets with 1 changes to 1 files
40 added 1 changesets with 1 changes to 1 files
40 (run 'hg update' to get a working copy)
41 (run 'hg update' to get a working copy)
@@ -1,117 +1,120 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 echo "[extensions]" >> $HGRCPATH
3 echo "[extensions]" >> $HGRCPATH
4 echo "mq=" >> $HGRCPATH
4 echo "mq=" >> $HGRCPATH
5
5
6 hg init
6 hg init
7 hg qinit
7 hg qinit
8
8
9 echo x > x
9 echo x > x
10 hg ci -Ama
10 hg ci -Ama
11
11
12 hg qnew a.patch
12 hg qnew a.patch
13 echo a > a
13 echo a > a
14 hg add a
14 hg add a
15 hg qrefresh
15 hg qrefresh
16
16
17 hg qnew b.patch
17 hg qnew b.patch
18 echo b > b
18 echo b > b
19 hg add b
19 hg add b
20 hg qrefresh
20 hg qrefresh
21
21
22 hg qnew c.patch
22 hg qnew c.patch
23 echo c > c
23 echo c > c
24 hg add c
24 hg add c
25 hg qrefresh
25 hg qrefresh
26
26
27 hg qpop -a
27 hg qpop -a
28
28
29 echo % should fail
29 echo % should fail
30 hg qguard does-not-exist.patch +bleh
31
32 echo % should fail
30 hg qguard +fail
33 hg qguard +fail
31
34
32 hg qpush
35 hg qpush
33 echo % should guard a.patch
36 echo % should guard a.patch
34 hg qguard +a
37 hg qguard +a
35 echo % should print +a
38 echo % should print +a
36 hg qguard
39 hg qguard
37 hg qpop
40 hg qpop
38
41
39 hg qguard a.patch
42 hg qguard a.patch
40 echo % should push b.patch
43 echo % should push b.patch
41 hg qpush
44 hg qpush
42
45
43 hg qpop
46 hg qpop
44 hg qselect a
47 hg qselect a
45 echo % should push a.patch
48 echo % should push a.patch
46 hg qpush
49 hg qpush
47
50
48 hg qguard c.patch -a
51 hg qguard c.patch -a
49 echo % should print -a
52 echo % should print -a
50 hg qguard c.patch
53 hg qguard c.patch
51
54
52 echo % should skip c.patch
55 echo % should skip c.patch
53 hg qpush -a
56 hg qpush -a
54
57
55 hg qguard -n c.patch
58 hg qguard -n c.patch
56 echo % should push c.patch
59 echo % should push c.patch
57 hg qpush -a
60 hg qpush -a
58
61
59 hg qpop -a
62 hg qpop -a
60 hg qselect -n
63 hg qselect -n
61 echo % should push all
64 echo % should push all
62 hg qpush -a
65 hg qpush -a
63
66
64 hg qpop -a
67 hg qpop -a
65 hg qguard a.patch +1
68 hg qguard a.patch +1
66 hg qguard b.patch +2
69 hg qguard b.patch +2
67 hg qselect 1
70 hg qselect 1
68 echo % should push a.patch, not b.patch
71 echo % should push a.patch, not b.patch
69 hg qpush
72 hg qpush
70 hg qpush
73 hg qpush
71 hg qpop -a
74 hg qpop -a
72
75
73 hg qselect 2
76 hg qselect 2
74 echo % should push b.patch
77 echo % should push b.patch
75 hg qpush
78 hg qpush
76 hg qpop -a
79 hg qpop -a
77
80
78 hg qselect 1 2
81 hg qselect 1 2
79 echo % should push a.patch, b.patch
82 echo % should push a.patch, b.patch
80 hg qpush
83 hg qpush
81 hg qpush
84 hg qpush
82 hg qpop -a
85 hg qpop -a
83
86
84 hg qguard a.patch +1 +2 -3
87 hg qguard a.patch +1 +2 -3
85 hg qselect 1 2 3
88 hg qselect 1 2 3
86 echo % list patches and guards
89 echo % list patches and guards
87 hg qguard -l
90 hg qguard -l
88 echo % list series
91 echo % list series
89 hg qseries -v
92 hg qseries -v
90 echo % list guards
93 echo % list guards
91 hg qselect
94 hg qselect
92 echo % should push b.patch
95 echo % should push b.patch
93 hg qpush
96 hg qpush
94
97
95 hg qpush -a
98 hg qpush -a
96 hg qselect -n --reapply
99 hg qselect -n --reapply
97 echo % guards in series file: +1 +2 -3
100 echo % guards in series file: +1 +2 -3
98 hg qselect -s
101 hg qselect -s
99 echo % should show c.patch
102 echo % should show c.patch
100 hg qapplied
103 hg qapplied
101
104
102 hg qrename a.patch new.patch
105 hg qrename a.patch new.patch
103 echo % should show :
106 echo % should show :
104 echo % new.patch: +1 +2 -3
107 echo % new.patch: +1 +2 -3
105 echo % b.patch: +2
108 echo % b.patch: +2
106 echo % c.patch: unguarded
109 echo % c.patch: unguarded
107 hg qguard -l
110 hg qguard -l
108
111
109 hg qnew d.patch
112 hg qnew d.patch
110 hg qpop
113 hg qpop
111 echo % should show new.patch and b.patch as Guarded, c.patch as Applied
114 echo % should show new.patch and b.patch as Guarded, c.patch as Applied
112 echo % and d.patch as Unapplied
115 echo % and d.patch as Unapplied
113 hg qseries -v
116 hg qseries -v
114
117
115 hg qguard d.patch +2
118 hg qguard d.patch +2
116 echo % new.patch, b.patch: Guarded. c.patch: Applied. d.patch: Guarded.
119 echo % new.patch, b.patch: Guarded. c.patch: Applied. d.patch: Guarded.
117 hg qseries -v
120 hg qseries -v
@@ -1,103 +1,105 b''
1 adding x
1 adding x
2 Patch queue now empty
2 Patch queue now empty
3 % should fail
3 % should fail
4 abort: no patch named does-not-exist.patch
5 % should fail
4 abort: no patches applied
6 abort: no patches applied
5 applying a.patch
7 applying a.patch
6 Now at: a.patch
8 Now at: a.patch
7 % should guard a.patch
9 % should guard a.patch
8 % should print +a
10 % should print +a
9 a.patch: +a
11 a.patch: +a
10 Patch queue now empty
12 Patch queue now empty
11 a.patch: +a
13 a.patch: +a
12 % should push b.patch
14 % should push b.patch
13 applying b.patch
15 applying b.patch
14 Now at: b.patch
16 Now at: b.patch
15 Patch queue now empty
17 Patch queue now empty
16 number of unguarded, unapplied patches has changed from 2 to 3
18 number of unguarded, unapplied patches has changed from 2 to 3
17 % should push a.patch
19 % should push a.patch
18 applying a.patch
20 applying a.patch
19 Now at: a.patch
21 Now at: a.patch
20 % should print -a
22 % should print -a
21 c.patch: -a
23 c.patch: -a
22 % should skip c.patch
24 % should skip c.patch
23 applying b.patch
25 applying b.patch
24 skipping c.patch - guarded by '-a'
26 skipping c.patch - guarded by '-a'
25 Now at: b.patch
27 Now at: b.patch
26 % should push c.patch
28 % should push c.patch
27 applying c.patch
29 applying c.patch
28 Now at: c.patch
30 Now at: c.patch
29 Patch queue now empty
31 Patch queue now empty
30 guards deactivated
32 guards deactivated
31 number of unguarded, unapplied patches has changed from 3 to 2
33 number of unguarded, unapplied patches has changed from 3 to 2
32 % should push all
34 % should push all
33 applying b.patch
35 applying b.patch
34 applying c.patch
36 applying c.patch
35 Now at: c.patch
37 Now at: c.patch
36 Patch queue now empty
38 Patch queue now empty
37 number of unguarded, unapplied patches has changed from 1 to 2
39 number of unguarded, unapplied patches has changed from 1 to 2
38 % should push a.patch, not b.patch
40 % should push a.patch, not b.patch
39 applying a.patch
41 applying a.patch
40 Now at: a.patch
42 Now at: a.patch
41 applying c.patch
43 applying c.patch
42 Now at: c.patch
44 Now at: c.patch
43 Patch queue now empty
45 Patch queue now empty
44 % should push b.patch
46 % should push b.patch
45 applying b.patch
47 applying b.patch
46 Now at: b.patch
48 Now at: b.patch
47 Patch queue now empty
49 Patch queue now empty
48 number of unguarded, unapplied patches has changed from 2 to 3
50 number of unguarded, unapplied patches has changed from 2 to 3
49 % should push a.patch, b.patch
51 % should push a.patch, b.patch
50 applying a.patch
52 applying a.patch
51 Now at: a.patch
53 Now at: a.patch
52 applying b.patch
54 applying b.patch
53 Now at: b.patch
55 Now at: b.patch
54 Patch queue now empty
56 Patch queue now empty
55 number of unguarded, unapplied patches has changed from 3 to 2
57 number of unguarded, unapplied patches has changed from 3 to 2
56 % list patches and guards
58 % list patches and guards
57 a.patch: +1 +2 -3
59 a.patch: +1 +2 -3
58 b.patch: +2
60 b.patch: +2
59 c.patch: unguarded
61 c.patch: unguarded
60 % list series
62 % list series
61 0 G a.patch
63 0 G a.patch
62 1 U b.patch
64 1 U b.patch
63 2 U c.patch
65 2 U c.patch
64 % list guards
66 % list guards
65 1
67 1
66 2
68 2
67 3
69 3
68 % should push b.patch
70 % should push b.patch
69 applying b.patch
71 applying b.patch
70 Now at: b.patch
72 Now at: b.patch
71 applying c.patch
73 applying c.patch
72 Now at: c.patch
74 Now at: c.patch
73 guards deactivated
75 guards deactivated
74 popping guarded patches
76 popping guarded patches
75 Patch queue now empty
77 Patch queue now empty
76 reapplying unguarded patches
78 reapplying unguarded patches
77 applying c.patch
79 applying c.patch
78 Now at: c.patch
80 Now at: c.patch
79 % guards in series file: +1 +2 -3
81 % guards in series file: +1 +2 -3
80 +1
82 +1
81 +2
83 +2
82 -3
84 -3
83 % should show c.patch
85 % should show c.patch
84 c.patch
86 c.patch
85 % should show :
87 % should show :
86 % new.patch: +1 +2 -3
88 % new.patch: +1 +2 -3
87 % b.patch: +2
89 % b.patch: +2
88 % c.patch: unguarded
90 % c.patch: unguarded
89 new.patch: +1 +2 -3
91 new.patch: +1 +2 -3
90 b.patch: +2
92 b.patch: +2
91 c.patch: unguarded
93 c.patch: unguarded
92 Now at: c.patch
94 Now at: c.patch
93 % should show new.patch and b.patch as Guarded, c.patch as Applied
95 % should show new.patch and b.patch as Guarded, c.patch as Applied
94 % and d.patch as Unapplied
96 % and d.patch as Unapplied
95 0 G new.patch
97 0 G new.patch
96 1 G b.patch
98 1 G b.patch
97 2 A c.patch
99 2 A c.patch
98 3 U d.patch
100 3 U d.patch
99 % new.patch, b.patch: Guarded. c.patch: Applied. d.patch: Guarded.
101 % new.patch, b.patch: Guarded. c.patch: Applied. d.patch: Guarded.
100 0 G new.patch
102 0 G new.patch
101 1 G b.patch
103 1 G b.patch
102 2 A c.patch
104 2 A c.patch
103 3 G d.patch
105 3 G d.patch
General Comments 0
You need to be logged in to leave comments. Login now