##// END OF EJS Templates
cleanup: drop variables for unused return values...
Peter Arrenbrecht -
r7874:d812029c default
parent child Browse files
Show More
@@ -1,353 +1,353 b''
1 1 # CVS conversion code inspired by hg-cvs-import and git-cvsimport
2 2
3 3 import os, locale, re, socket, errno
4 4 from cStringIO import StringIO
5 5 from mercurial import util
6 6 from mercurial.i18n import _
7 7
8 8 from common import NoRepo, commit, converter_source, checktool
9 9 import cvsps
10 10
11 11 class convert_cvs(converter_source):
12 12 def __init__(self, ui, path, rev=None):
13 13 super(convert_cvs, self).__init__(ui, path, rev=rev)
14 14
15 15 cvs = os.path.join(path, "CVS")
16 16 if not os.path.exists(cvs):
17 17 raise NoRepo("%s does not look like a CVS checkout" % path)
18 18
19 19 checktool('cvs')
20 20 self.cmd = ui.config('convert', 'cvsps', 'builtin')
21 21 cvspsexe = self.cmd.split(None, 1)[0]
22 22 self.builtin = cvspsexe == 'builtin'
23 23
24 24 if not self.builtin:
25 25 checktool(cvspsexe)
26 26
27 27 self.changeset = {}
28 28 self.files = {}
29 29 self.tags = {}
30 30 self.lastbranch = {}
31 31 self.parent = {}
32 32 self.socket = None
33 33 self.cvsroot = file(os.path.join(cvs, "Root")).read()[:-1]
34 34 self.cvsrepo = file(os.path.join(cvs, "Repository")).read()[:-1]
35 35 self.encoding = locale.getpreferredencoding()
36 36
37 37 self._parse(ui)
38 38 self._connect()
39 39
40 40 def _parse(self, ui):
41 41 if self.changeset:
42 42 return
43 43
44 44 maxrev = 0
45 45 cmd = self.cmd
46 46 if self.rev:
47 47 # TODO: handle tags
48 48 try:
49 49 # patchset number?
50 50 maxrev = int(self.rev)
51 51 except ValueError:
52 52 try:
53 53 # date
54 54 util.parsedate(self.rev, ['%Y/%m/%d %H:%M:%S'])
55 55 cmd = '%s -d "1970/01/01 00:00:01" -d "%s"' % (cmd, self.rev)
56 56 except util.Abort:
57 57 raise util.Abort(_('revision %s is not a patchset number or date') % self.rev)
58 58
59 59 d = os.getcwd()
60 60 try:
61 61 os.chdir(self.path)
62 62 id = None
63 63 state = 0
64 64 filerevids = {}
65 65
66 66 if self.builtin:
67 67 # builtin cvsps code
68 68 ui.status(_('using builtin cvsps\n'))
69 69
70 70 db = cvsps.createlog(ui, cache='update')
71 71 db = cvsps.createchangeset(ui, db,
72 72 fuzz=int(ui.config('convert', 'cvsps.fuzz', 60)),
73 73 mergeto=ui.config('convert', 'cvsps.mergeto', None),
74 74 mergefrom=ui.config('convert', 'cvsps.mergefrom', None))
75 75
76 76 for cs in db:
77 77 if maxrev and cs.id>maxrev:
78 78 break
79 79 id = str(cs.id)
80 80 cs.author = self.recode(cs.author)
81 81 self.lastbranch[cs.branch] = id
82 82 cs.comment = self.recode(cs.comment)
83 83 date = util.datestr(cs.date)
84 84 self.tags.update(dict.fromkeys(cs.tags, id))
85 85
86 86 files = {}
87 87 for f in cs.entries:
88 88 files[f.file] = "%s%s" % ('.'.join([str(x) for x in f.revision]),
89 89 ['', '(DEAD)'][f.dead])
90 90
91 91 # add current commit to set
92 92 c = commit(author=cs.author, date=date,
93 93 parents=[str(p.id) for p in cs.parents],
94 94 desc=cs.comment, branch=cs.branch or '')
95 95 self.changeset[id] = c
96 96 self.files[id] = files
97 97 else:
98 98 # external cvsps
99 99 for l in util.popen(cmd):
100 100 if state == 0: # header
101 101 if l.startswith("PatchSet"):
102 102 id = l[9:-2]
103 103 if maxrev and int(id) > maxrev:
104 104 # ignore everything
105 105 state = 3
106 106 elif l.startswith("Date:"):
107 107 date = util.parsedate(l[6:-1], ["%Y/%m/%d %H:%M:%S"])
108 108 date = util.datestr(date)
109 109 elif l.startswith("Branch:"):
110 110 branch = l[8:-1]
111 111 self.parent[id] = self.lastbranch.get(branch, 'bad')
112 112 self.lastbranch[branch] = id
113 113 elif l.startswith("Ancestor branch:"):
114 114 ancestor = l[17:-1]
115 115 # figure out the parent later
116 116 self.parent[id] = self.lastbranch[ancestor]
117 117 elif l.startswith("Author:"):
118 118 author = self.recode(l[8:-1])
119 119 elif l.startswith("Tag:") or l.startswith("Tags:"):
120 120 t = l[l.index(':')+1:]
121 121 t = [ut.strip() for ut in t.split(',')]
122 122 if (len(t) > 1) or (t[0] and (t[0] != "(none)")):
123 123 self.tags.update(dict.fromkeys(t, id))
124 124 elif l.startswith("Log:"):
125 125 # switch to gathering log
126 126 state = 1
127 127 log = ""
128 128 elif state == 1: # log
129 129 if l == "Members: \n":
130 130 # switch to gathering members
131 131 files = {}
132 132 oldrevs = []
133 133 log = self.recode(log[:-1])
134 134 state = 2
135 135 else:
136 136 # gather log
137 137 log += l
138 138 elif state == 2: # members
139 139 if l == "\n": # start of next entry
140 140 state = 0
141 141 p = [self.parent[id]]
142 142 if id == "1":
143 143 p = []
144 144 if branch == "HEAD":
145 145 branch = ""
146 146 if branch:
147 147 latest = 0
148 148 # the last changeset that contains a base
149 149 # file is our parent
150 150 for r in oldrevs:
151 151 latest = max(filerevids.get(r, 0), latest)
152 152 if latest:
153 153 p = [latest]
154 154
155 155 # add current commit to set
156 156 c = commit(author=author, date=date, parents=p,
157 157 desc=log, branch=branch)
158 158 self.changeset[id] = c
159 159 self.files[id] = files
160 160 else:
161 161 colon = l.rfind(':')
162 162 file = l[1:colon]
163 163 rev = l[colon+1:-2]
164 164 oldrev, rev = rev.split("->")
165 165 files[file] = rev
166 166
167 167 # save some information for identifying branch points
168 168 oldrevs.append("%s:%s" % (oldrev, file))
169 169 filerevids["%s:%s" % (rev, file)] = id
170 170 elif state == 3:
171 171 # swallow all input
172 172 continue
173 173
174 174 self.heads = self.lastbranch.values()
175 175 finally:
176 176 os.chdir(d)
177 177
178 178 def _connect(self):
179 179 root = self.cvsroot
180 180 conntype = None
181 181 user, host = None, None
182 182 cmd = ['cvs', 'server']
183 183
184 184 self.ui.status(_("connecting to %s\n") % root)
185 185
186 186 if root.startswith(":pserver:"):
187 187 root = root[9:]
188 188 m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
189 189 root)
190 190 if m:
191 191 conntype = "pserver"
192 192 user, passw, serv, port, root = m.groups()
193 193 if not user:
194 194 user = "anonymous"
195 195 if not port:
196 196 port = 2401
197 197 else:
198 198 port = int(port)
199 199 format0 = ":pserver:%s@%s:%s" % (user, serv, root)
200 200 format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
201 201
202 202 if not passw:
203 203 passw = "A"
204 204 cvspass = os.path.expanduser("~/.cvspass")
205 205 try:
206 206 pf = open(cvspass)
207 207 for line in pf.read().splitlines():
208 208 part1, part2 = line.split(' ', 1)
209 209 if part1 == '/1':
210 210 # /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
211 211 part1, part2 = part2.split(' ', 1)
212 212 format = format1
213 213 else:
214 214 # :pserver:user@example.com:/cvsroot/foo Ah<Z
215 215 format = format0
216 216 if part1 == format:
217 217 passw = part2
218 218 break
219 219 pf.close()
220 220 except IOError, inst:
221 221 if inst.errno != errno.ENOENT:
222 222 if not getattr(inst, 'filename', None):
223 223 inst.filename = cvspass
224 224 raise
225 225
226 226 sck = socket.socket()
227 227 sck.connect((serv, port))
228 228 sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
229 229 "END AUTH REQUEST", ""]))
230 230 if sck.recv(128) != "I LOVE YOU\n":
231 231 raise util.Abort(_("CVS pserver authentication failed"))
232 232
233 233 self.writep = self.readp = sck.makefile('r+')
234 234
235 235 if not conntype and root.startswith(":local:"):
236 236 conntype = "local"
237 237 root = root[7:]
238 238
239 239 if not conntype:
240 240 # :ext:user@host/home/user/path/to/cvsroot
241 241 if root.startswith(":ext:"):
242 242 root = root[5:]
243 243 m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
244 244 # Do not take Windows path "c:\foo\bar" for a connection strings
245 245 if os.path.isdir(root) or not m:
246 246 conntype = "local"
247 247 else:
248 248 conntype = "rsh"
249 249 user, host, root = m.group(1), m.group(2), m.group(3)
250 250
251 251 if conntype != "pserver":
252 252 if conntype == "rsh":
253 253 rsh = os.environ.get("CVS_RSH") or "ssh"
254 254 if user:
255 255 cmd = [rsh, '-l', user, host] + cmd
256 256 else:
257 257 cmd = [rsh, host] + cmd
258 258
259 259 # popen2 does not support argument lists under Windows
260 260 cmd = [util.shellquote(arg) for arg in cmd]
261 261 cmd = util.quotecommand(' '.join(cmd))
262 262 self.writep, self.readp = util.popen2(cmd, 'b')
263 263
264 264 self.realroot = root
265 265
266 266 self.writep.write("Root %s\n" % root)
267 267 self.writep.write("Valid-responses ok error Valid-requests Mode"
268 268 " M Mbinary E Checked-in Created Updated"
269 269 " Merged Removed\n")
270 270 self.writep.write("valid-requests\n")
271 271 self.writep.flush()
272 272 r = self.readp.readline()
273 273 if not r.startswith("Valid-requests"):
274 274 raise util.Abort(_("server sucks"))
275 275 if "UseUnchanged" in r:
276 276 self.writep.write("UseUnchanged\n")
277 277 self.writep.flush()
278 278 r = self.readp.readline()
279 279
280 280 def getheads(self):
281 281 return self.heads
282 282
283 283 def _getfile(self, name, rev):
284 284
285 285 def chunkedread(fp, count):
286 286 # file-objects returned by socked.makefile() do not handle
287 287 # large read() requests very well.
288 288 chunksize = 65536
289 289 output = StringIO()
290 290 while count > 0:
291 291 data = fp.read(min(count, chunksize))
292 292 if not data:
293 293 raise util.Abort(_("%d bytes missing from remote file") % count)
294 294 count -= len(data)
295 295 output.write(data)
296 296 return output.getvalue()
297 297
298 298 if rev.endswith("(DEAD)"):
299 299 raise IOError
300 300
301 301 args = ("-N -P -kk -r %s --" % rev).split()
302 302 args.append(self.cvsrepo + '/' + name)
303 303 for x in args:
304 304 self.writep.write("Argument %s\n" % x)
305 305 self.writep.write("Directory .\n%s\nco\n" % self.realroot)
306 306 self.writep.flush()
307 307
308 308 data = ""
309 309 while 1:
310 310 line = self.readp.readline()
311 311 if line.startswith("Created ") or line.startswith("Updated "):
312 312 self.readp.readline() # path
313 313 self.readp.readline() # entries
314 314 mode = self.readp.readline()[:-1]
315 315 count = int(self.readp.readline()[:-1])
316 316 data = chunkedread(self.readp, count)
317 317 elif line.startswith(" "):
318 318 data += line[1:]
319 319 elif line.startswith("M "):
320 320 pass
321 321 elif line.startswith("Mbinary "):
322 322 count = int(self.readp.readline()[:-1])
323 323 data = chunkedread(self.readp, count)
324 324 else:
325 325 if line == "ok\n":
326 326 return (data, "x" in mode and "x" or "")
327 327 elif line.startswith("E "):
328 328 self.ui.warn(_("cvs server: %s\n") % line[2:])
329 329 elif line.startswith("Remove"):
330 l = self.readp.readline()
330 self.readp.readline()
331 331 else:
332 332 raise util.Abort(_("unknown CVS response: %s") % line)
333 333
334 334 def getfile(self, file, rev):
335 335 data, mode = self._getfile(file, rev)
336 336 self.modecache[(file, rev)] = mode
337 337 return data
338 338
339 339 def getmode(self, file, rev):
340 340 return self.modecache[(file, rev)]
341 341
342 342 def getchanges(self, rev):
343 343 self.modecache = {}
344 344 return util.sort(self.files[rev].items()), {}
345 345
346 346 def getcommit(self, rev):
347 347 return self.changeset[rev]
348 348
349 349 def gettags(self):
350 350 return self.tags
351 351
352 352 def getchangedfiles(self, rev, i):
353 353 return util.sort(self.files[rev].keys())
@@ -1,335 +1,335 b''
1 1 # hg backend for convert extension
2 2
3 3 # Notes for hg->hg conversion:
4 4 #
5 5 # * Old versions of Mercurial didn't trim the whitespace from the ends
6 6 # of commit messages, but new versions do. Changesets created by
7 7 # those older versions, then converted, may thus have different
8 8 # hashes for changesets that are otherwise identical.
9 9 #
10 10 # * By default, the source revision is stored in the converted
11 11 # revision. This will cause the converted revision to have a
12 12 # different identity than the source. To avoid this, use the
13 13 # following option: "--config convert.hg.saverev=false"
14 14
15 15
16 16 import os, time
17 17 from mercurial.i18n import _
18 18 from mercurial.node import bin, hex, nullid
19 19 from mercurial import hg, util, context, error
20 20
21 21 from common import NoRepo, commit, converter_source, converter_sink
22 22
23 23 class mercurial_sink(converter_sink):
24 24 def __init__(self, ui, path):
25 25 converter_sink.__init__(self, ui, path)
26 26 self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
27 27 self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
28 28 self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
29 29 self.lastbranch = None
30 30 if os.path.isdir(path) and len(os.listdir(path)) > 0:
31 31 try:
32 32 self.repo = hg.repository(self.ui, path)
33 33 if not self.repo.local():
34 34 raise NoRepo(_('%s is not a local Mercurial repo') % path)
35 35 except error.RepoError, err:
36 36 ui.print_exc()
37 37 raise NoRepo(err.args[0])
38 38 else:
39 39 try:
40 40 ui.status(_('initializing destination %s repository\n') % path)
41 41 self.repo = hg.repository(self.ui, path, create=True)
42 42 if not self.repo.local():
43 43 raise NoRepo(_('%s is not a local Mercurial repo') % path)
44 44 self.created.append(path)
45 45 except error.RepoError, err:
46 46 ui.print_exc()
47 47 raise NoRepo("could not create hg repo %s as sink" % path)
48 48 self.lock = None
49 49 self.wlock = None
50 50 self.filemapmode = False
51 51
52 52 def before(self):
53 53 self.ui.debug(_('run hg sink pre-conversion action\n'))
54 54 self.wlock = self.repo.wlock()
55 55 self.lock = self.repo.lock()
56 56
57 57 def after(self):
58 58 self.ui.debug(_('run hg sink post-conversion action\n'))
59 59 self.lock = None
60 60 self.wlock = None
61 61
62 62 def revmapfile(self):
63 63 return os.path.join(self.path, ".hg", "shamap")
64 64
65 65 def authorfile(self):
66 66 return os.path.join(self.path, ".hg", "authormap")
67 67
68 68 def getheads(self):
69 69 h = self.repo.changelog.heads()
70 70 return [ hex(x) for x in h ]
71 71
72 72 def setbranch(self, branch, pbranches):
73 73 if not self.clonebranches:
74 74 return
75 75
76 76 setbranch = (branch != self.lastbranch)
77 77 self.lastbranch = branch
78 78 if not branch:
79 79 branch = 'default'
80 80 pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
81 81 pbranch = pbranches and pbranches[0][1] or 'default'
82 82
83 83 branchpath = os.path.join(self.path, branch)
84 84 if setbranch:
85 85 self.after()
86 86 try:
87 87 self.repo = hg.repository(self.ui, branchpath)
88 88 except:
89 89 self.repo = hg.repository(self.ui, branchpath, create=True)
90 90 self.before()
91 91
92 92 # pbranches may bring revisions from other branches (merge parents)
93 93 # Make sure we have them, or pull them.
94 94 missings = {}
95 95 for b in pbranches:
96 96 try:
97 97 self.repo.lookup(b[0])
98 98 except:
99 99 missings.setdefault(b[1], []).append(b[0])
100 100
101 101 if missings:
102 102 self.after()
103 103 for pbranch, heads in missings.iteritems():
104 104 pbranchpath = os.path.join(self.path, pbranch)
105 105 prepo = hg.repository(self.ui, pbranchpath)
106 106 self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
107 107 self.repo.pull(prepo, [prepo.lookup(h) for h in heads])
108 108 self.before()
109 109
110 110 def putcommit(self, files, copies, parents, commit, source):
111 111
112 112 files = dict(files)
113 113 def getfilectx(repo, memctx, f):
114 114 v = files[f]
115 115 data = source.getfile(f, v)
116 116 e = source.getmode(f, v)
117 117 return context.memfilectx(f, data, 'l' in e, 'x' in e, copies.get(f))
118 118
119 119 pl = []
120 120 for p in parents:
121 121 if p not in pl:
122 122 pl.append(p)
123 123 parents = pl
124 124 nparents = len(parents)
125 125 if self.filemapmode and nparents == 1:
126 126 m1node = self.repo.changelog.read(bin(parents[0]))[0]
127 127 parent = parents[0]
128 128
129 129 if len(parents) < 2: parents.append("0" * 40)
130 130 if len(parents) < 2: parents.append("0" * 40)
131 131 p2 = parents.pop(0)
132 132
133 133 text = commit.desc
134 134 extra = commit.extra.copy()
135 135 if self.branchnames and commit.branch:
136 136 extra['branch'] = commit.branch
137 137 if commit.rev:
138 138 extra['convert_revision'] = commit.rev
139 139
140 140 while parents:
141 141 p1 = p2
142 142 p2 = parents.pop(0)
143 143 ctx = context.memctx(self.repo, (p1, p2), text, files.keys(), getfilectx,
144 144 commit.author, commit.date, extra)
145 a = self.repo.commitctx(ctx)
145 self.repo.commitctx(ctx)
146 146 text = "(octopus merge fixup)\n"
147 147 p2 = hex(self.repo.changelog.tip())
148 148
149 149 if self.filemapmode and nparents == 1:
150 150 man = self.repo.manifest
151 151 mnode = self.repo.changelog.read(bin(p2))[0]
152 152 if not man.cmp(m1node, man.revision(mnode)):
153 153 self.repo.rollback()
154 154 return parent
155 155 return p2
156 156
157 157 def puttags(self, tags):
158 158 try:
159 159 parentctx = self.repo[self.tagsbranch]
160 160 tagparent = parentctx.node()
161 161 except error.RepoError, inst:
162 162 parentctx = None
163 163 tagparent = nullid
164 164
165 165 try:
166 166 oldlines = util.sort(parentctx['.hgtags'].data().splitlines(1))
167 167 except:
168 168 oldlines = []
169 169
170 170 newlines = util.sort([("%s %s\n" % (tags[tag], tag)) for tag in tags])
171 171
172 172 if newlines == oldlines:
173 173 return None
174 174 data = "".join(newlines)
175 175
176 176 def getfilectx(repo, memctx, f):
177 177 return context.memfilectx(f, data, False, False, None)
178 178
179 179 self.ui.status(_("updating tags\n"))
180 180 date = "%s 0" % int(time.mktime(time.gmtime()))
181 181 extra = {'branch': self.tagsbranch}
182 182 ctx = context.memctx(self.repo, (tagparent, None), "update tags",
183 183 [".hgtags"], getfilectx, "convert-repo", date,
184 184 extra)
185 185 self.repo.commitctx(ctx)
186 186 return hex(self.repo.changelog.tip())
187 187
188 188 def setfilemapmode(self, active):
189 189 self.filemapmode = active
190 190
191 191 class mercurial_source(converter_source):
192 192 def __init__(self, ui, path, rev=None):
193 193 converter_source.__init__(self, ui, path, rev)
194 194 self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
195 195 self.ignored = {}
196 196 self.saverev = ui.configbool('convert', 'hg.saverev', False)
197 197 try:
198 198 self.repo = hg.repository(self.ui, path)
199 199 # try to provoke an exception if this isn't really a hg
200 200 # repo, but some other bogus compatible-looking url
201 201 if not self.repo.local():
202 202 raise error.RepoError()
203 203 except error.RepoError:
204 204 ui.print_exc()
205 205 raise NoRepo("%s is not a local Mercurial repo" % path)
206 206 self.lastrev = None
207 207 self.lastctx = None
208 208 self._changescache = None
209 209 self.convertfp = None
210 210 # Restrict converted revisions to startrev descendants
211 211 startnode = ui.config('convert', 'hg.startrev')
212 212 if startnode is not None:
213 213 try:
214 214 startnode = self.repo.lookup(startnode)
215 215 except error.RepoError:
216 216 raise util.Abort(_('%s is not a valid start revision')
217 217 % startnode)
218 218 startrev = self.repo.changelog.rev(startnode)
219 219 children = {startnode: 1}
220 220 for rev in self.repo.changelog.descendants(startrev):
221 221 children[self.repo.changelog.node(rev)] = 1
222 222 self.keep = children.__contains__
223 223 else:
224 224 self.keep = util.always
225 225
226 226 def changectx(self, rev):
227 227 if self.lastrev != rev:
228 228 self.lastctx = self.repo[rev]
229 229 self.lastrev = rev
230 230 return self.lastctx
231 231
232 232 def parents(self, ctx):
233 233 return [p.node() for p in ctx.parents()
234 234 if p and self.keep(p.node())]
235 235
236 236 def getheads(self):
237 237 if self.rev:
238 238 heads = [self.repo[self.rev].node()]
239 239 else:
240 240 heads = self.repo.heads()
241 241 return [hex(h) for h in heads if self.keep(h)]
242 242
243 243 def getfile(self, name, rev):
244 244 try:
245 245 return self.changectx(rev)[name].data()
246 246 except error.LookupError, err:
247 247 raise IOError(err)
248 248
249 249 def getmode(self, name, rev):
250 250 return self.changectx(rev).manifest().flags(name)
251 251
252 252 def getchanges(self, rev):
253 253 ctx = self.changectx(rev)
254 254 parents = self.parents(ctx)
255 255 if not parents:
256 256 files = util.sort(ctx.manifest().keys())
257 257 if self.ignoreerrors:
258 258 # calling getcopies() is a simple way to detect missing
259 259 # revlogs and populate self.ignored
260 260 self.getcopies(ctx, files)
261 261 return [(f, rev) for f in files if f not in self.ignored], {}
262 262 if self._changescache and self._changescache[0] == rev:
263 263 m, a, r = self._changescache[1]
264 264 else:
265 265 m, a, r = self.repo.status(parents[0], ctx.node())[:3]
266 266 # getcopies() detects missing revlogs early, run it before
267 267 # filtering the changes.
268 268 copies = self.getcopies(ctx, m + a)
269 269 changes = [(name, rev) for name in m + a + r
270 270 if name not in self.ignored]
271 271 return util.sort(changes), copies
272 272
273 273 def getcopies(self, ctx, files):
274 274 copies = {}
275 275 for name in files:
276 276 if name in self.ignored:
277 277 continue
278 278 try:
279 279 copysource, copynode = ctx.filectx(name).renamed()
280 280 if copysource in self.ignored or not self.keep(copynode):
281 281 continue
282 282 copies[name] = copysource
283 283 except TypeError:
284 284 pass
285 285 except error.LookupError, e:
286 286 if not self.ignoreerrors:
287 287 raise
288 288 self.ignored[name] = 1
289 289 self.ui.warn(_('ignoring: %s\n') % e)
290 290 return copies
291 291
292 292 def getcommit(self, rev):
293 293 ctx = self.changectx(rev)
294 294 parents = [hex(p) for p in self.parents(ctx)]
295 295 if self.saverev:
296 296 crev = rev
297 297 else:
298 298 crev = None
299 299 return commit(author=ctx.user(), date=util.datestr(ctx.date()),
300 300 desc=ctx.description(), rev=crev, parents=parents,
301 301 branch=ctx.branch(), extra=ctx.extra())
302 302
303 303 def gettags(self):
304 304 tags = [t for t in self.repo.tagslist() if t[0] != 'tip']
305 305 return dict([(name, hex(node)) for name, node in tags
306 306 if self.keep(node)])
307 307
308 308 def getchangedfiles(self, rev, i):
309 309 ctx = self.changectx(rev)
310 310 parents = self.parents(ctx)
311 311 if not parents and i is None:
312 312 i = 0
313 313 changes = [], ctx.manifest().keys(), []
314 314 else:
315 315 i = i or 0
316 316 changes = self.repo.status(parents[i], ctx.node())[:3]
317 317 changes = [[f for f in l if f not in self.ignored] for l in changes]
318 318
319 319 if i == 0:
320 320 self._changescache = (rev, changes)
321 321
322 322 return changes[0] + changes[1] + changes[2]
323 323
324 324 def converted(self, rev, destrev):
325 325 if self.convertfp is None:
326 326 self.convertfp = open(os.path.join(self.path, '.hg', 'shamap'),
327 327 'a')
328 328 self.convertfp.write('%s %s\n' % (destrev, rev))
329 329 self.convertfp.flush()
330 330
331 331 def before(self):
332 332 self.ui.debug(_('run hg source pre-conversion action\n'))
333 333
334 334 def after(self):
335 335 self.ui.debug(_('run hg source post-conversion action\n'))
@@ -1,1167 +1,1167 b''
1 1 # Subversion 1.4/1.5 Python API backend
2 2 #
3 3 # Copyright(C) 2007 Daniel Holth et al
4 4 #
5 5 # Configuration options:
6 6 #
7 7 # convert.svn.trunk
8 8 # Relative path to the trunk (default: "trunk")
9 9 # convert.svn.branches
10 10 # Relative path to tree of branches (default: "branches")
11 11 # convert.svn.tags
12 12 # Relative path to tree of tags (default: "tags")
13 13 #
14 14 # Set these in a hgrc, or on the command line as follows:
15 15 #
16 16 # hg convert --config convert.svn.trunk=wackoname [...]
17 17
18 18 import locale
19 19 import os
20 20 import re
21 21 import sys
22 22 import cPickle as pickle
23 23 import tempfile
24 24 import urllib
25 25
26 26 from mercurial import strutil, util
27 27 from mercurial.i18n import _
28 28
29 29 # Subversion stuff. Works best with very recent Python SVN bindings
30 30 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
31 31 # these bindings.
32 32
33 33 from cStringIO import StringIO
34 34
35 35 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
36 36 from common import commandline, converter_source, converter_sink, mapfile
37 37
38 38 try:
39 39 from svn.core import SubversionException, Pool
40 40 import svn
41 41 import svn.client
42 42 import svn.core
43 43 import svn.ra
44 44 import svn.delta
45 45 import transport
46 46 except ImportError:
47 47 pass
48 48
49 49 class SvnPathNotFound(Exception):
50 50 pass
51 51
52 52 def geturl(path):
53 53 try:
54 54 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
55 55 except SubversionException:
56 56 pass
57 57 if os.path.isdir(path):
58 58 path = os.path.normpath(os.path.abspath(path))
59 59 if os.name == 'nt':
60 60 path = '/' + util.normpath(path)
61 61 return 'file://%s' % urllib.quote(path)
62 62 return path
63 63
64 64 def optrev(number):
65 65 optrev = svn.core.svn_opt_revision_t()
66 66 optrev.kind = svn.core.svn_opt_revision_number
67 67 optrev.value.number = number
68 68 return optrev
69 69
70 70 class changedpath(object):
71 71 def __init__(self, p):
72 72 self.copyfrom_path = p.copyfrom_path
73 73 self.copyfrom_rev = p.copyfrom_rev
74 74 self.action = p.action
75 75
76 76 def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
77 77 strict_node_history=False):
78 78 protocol = -1
79 79 def receiver(orig_paths, revnum, author, date, message, pool):
80 80 if orig_paths is not None:
81 81 for k, v in orig_paths.iteritems():
82 82 orig_paths[k] = changedpath(v)
83 83 pickle.dump((orig_paths, revnum, author, date, message),
84 84 fp, protocol)
85 85
86 86 try:
87 87 # Use an ra of our own so that our parent can consume
88 88 # our results without confusing the server.
89 89 t = transport.SvnRaTransport(url=url)
90 90 svn.ra.get_log(t.ra, paths, start, end, limit,
91 91 discover_changed_paths,
92 92 strict_node_history,
93 93 receiver)
94 94 except SubversionException, (inst, num):
95 95 pickle.dump(num, fp, protocol)
96 96 except IOError:
97 97 # Caller may interrupt the iteration
98 98 pickle.dump(None, fp, protocol)
99 99 else:
100 100 pickle.dump(None, fp, protocol)
101 101 fp.close()
102 102 # With large history, cleanup process goes crazy and suddenly
103 103 # consumes *huge* amount of memory. The output file being closed,
104 104 # there is no need for clean termination.
105 105 os._exit(0)
106 106
107 107 def debugsvnlog(ui, **opts):
108 108 """Fetch SVN log in a subprocess and channel them back to parent to
109 109 avoid memory collection issues.
110 110 """
111 111 util.set_binary(sys.stdin)
112 112 util.set_binary(sys.stdout)
113 113 args = decodeargs(sys.stdin.read())
114 114 get_log_child(sys.stdout, *args)
115 115
116 116 class logstream:
117 117 """Interruptible revision log iterator."""
118 118 def __init__(self, stdout):
119 119 self._stdout = stdout
120 120
121 121 def __iter__(self):
122 122 while True:
123 123 entry = pickle.load(self._stdout)
124 124 try:
125 125 orig_paths, revnum, author, date, message = entry
126 126 except:
127 127 if entry is None:
128 128 break
129 129 raise SubversionException("child raised exception", entry)
130 130 yield entry
131 131
132 132 def close(self):
133 133 if self._stdout:
134 134 self._stdout.close()
135 135 self._stdout = None
136 136
137 137 # SVN conversion code stolen from bzr-svn and tailor
138 138 #
139 139 # Subversion looks like a versioned filesystem, branches structures
140 140 # are defined by conventions and not enforced by the tool. First,
141 141 # we define the potential branches (modules) as "trunk" and "branches"
142 142 # children directories. Revisions are then identified by their
143 143 # module and revision number (and a repository identifier).
144 144 #
145 145 # The revision graph is really a tree (or a forest). By default, a
146 146 # revision parent is the previous revision in the same module. If the
147 147 # module directory is copied/moved from another module then the
148 148 # revision is the module root and its parent the source revision in
149 149 # the parent module. A revision has at most one parent.
150 150 #
151 151 class svn_source(converter_source):
152 152 def __init__(self, ui, url, rev=None):
153 153 super(svn_source, self).__init__(ui, url, rev=rev)
154 154
155 155 try:
156 156 SubversionException
157 157 except NameError:
158 158 raise MissingTool(_('Subversion python bindings could not be loaded'))
159 159
160 160 try:
161 161 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
162 162 if version < (1, 4):
163 163 raise MissingTool(_('Subversion python bindings %d.%d found, '
164 164 '1.4 or later required') % version)
165 165 except AttributeError:
166 166 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
167 167 'or later required'))
168 168
169 169 self.encoding = locale.getpreferredencoding()
170 170 self.lastrevs = {}
171 171
172 172 latest = None
173 173 try:
174 174 # Support file://path@rev syntax. Useful e.g. to convert
175 175 # deleted branches.
176 176 at = url.rfind('@')
177 177 if at >= 0:
178 178 latest = int(url[at+1:])
179 179 url = url[:at]
180 except ValueError, e:
180 except ValueError:
181 181 pass
182 182 self.url = geturl(url)
183 183 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
184 184 try:
185 185 self.transport = transport.SvnRaTransport(url=self.url)
186 186 self.ra = self.transport.ra
187 187 self.ctx = self.transport.client
188 188 self.baseurl = svn.ra.get_repos_root(self.ra)
189 189 # Module is either empty or a repository path starting with
190 190 # a slash and not ending with a slash.
191 191 self.module = urllib.unquote(self.url[len(self.baseurl):])
192 192 self.prevmodule = None
193 193 self.rootmodule = self.module
194 194 self.commits = {}
195 195 self.paths = {}
196 196 self.uuid = svn.ra.get_uuid(self.ra).decode(self.encoding)
197 197 except SubversionException, e:
198 198 ui.print_exc()
199 199 raise NoRepo("%s does not look like a Subversion repo" % self.url)
200 200
201 201 if rev:
202 202 try:
203 203 latest = int(rev)
204 204 except ValueError:
205 205 raise util.Abort(_('svn: revision %s is not an integer') % rev)
206 206
207 207 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
208 208 try:
209 209 self.startrev = int(self.startrev)
210 210 if self.startrev < 0:
211 211 self.startrev = 0
212 212 except ValueError:
213 213 raise util.Abort(_('svn: start revision %s is not an integer')
214 214 % self.startrev)
215 215
216 216 try:
217 217 self.get_blacklist()
218 218 except IOError, e:
219 219 pass
220 220
221 221 self.head = self.latest(self.module, latest)
222 222 if not self.head:
223 223 raise util.Abort(_('no revision found in module %s') %
224 224 self.module.encode(self.encoding))
225 225 self.last_changed = self.revnum(self.head)
226 226
227 227 self._changescache = None
228 228
229 229 if os.path.exists(os.path.join(url, '.svn/entries')):
230 230 self.wc = url
231 231 else:
232 232 self.wc = None
233 233 self.convertfp = None
234 234
235 235 def setrevmap(self, revmap):
236 236 lastrevs = {}
237 237 for revid in revmap.iterkeys():
238 238 uuid, module, revnum = self.revsplit(revid)
239 239 lastrevnum = lastrevs.setdefault(module, revnum)
240 240 if revnum > lastrevnum:
241 241 lastrevs[module] = revnum
242 242 self.lastrevs = lastrevs
243 243
244 244 def exists(self, path, optrev):
245 245 try:
246 246 svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
247 247 optrev, False, self.ctx)
248 248 return True
249 249 except SubversionException, err:
250 250 return False
251 251
252 252 def getheads(self):
253 253
254 254 def isdir(path, revnum):
255 255 kind = self._checkpath(path, revnum)
256 256 return kind == svn.core.svn_node_dir
257 257
258 258 def getcfgpath(name, rev):
259 259 cfgpath = self.ui.config('convert', 'svn.' + name)
260 260 if cfgpath is not None and cfgpath.strip() == '':
261 261 return None
262 262 path = (cfgpath or name).strip('/')
263 263 if not self.exists(path, rev):
264 264 if cfgpath:
265 265 raise util.Abort(_('expected %s to be at %r, but not found')
266 266 % (name, path))
267 267 return None
268 268 self.ui.note(_('found %s at %r\n') % (name, path))
269 269 return path
270 270
271 271 rev = optrev(self.last_changed)
272 272 oldmodule = ''
273 273 trunk = getcfgpath('trunk', rev)
274 274 self.tags = getcfgpath('tags', rev)
275 275 branches = getcfgpath('branches', rev)
276 276
277 277 # If the project has a trunk or branches, we will extract heads
278 278 # from them. We keep the project root otherwise.
279 279 if trunk:
280 280 oldmodule = self.module or ''
281 281 self.module += '/' + trunk
282 282 self.head = self.latest(self.module, self.last_changed)
283 283 if not self.head:
284 284 raise util.Abort(_('no revision found in module %s') %
285 285 self.module.encode(self.encoding))
286 286
287 287 # First head in the list is the module's head
288 288 self.heads = [self.head]
289 289 if self.tags is not None:
290 290 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
291 291
292 292 # Check if branches bring a few more heads to the list
293 293 if branches:
294 294 rpath = self.url.strip('/')
295 295 branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
296 296 rev, False, self.ctx)
297 297 for branch in branchnames.keys():
298 298 module = '%s/%s/%s' % (oldmodule, branches, branch)
299 299 if not isdir(module, self.last_changed):
300 300 continue
301 301 brevid = self.latest(module, self.last_changed)
302 302 if not brevid:
303 303 self.ui.note(_('ignoring empty branch %s\n') %
304 304 branch.encode(self.encoding))
305 305 continue
306 306 self.ui.note(_('found branch %s at %d\n') %
307 307 (branch, self.revnum(brevid)))
308 308 self.heads.append(brevid)
309 309
310 310 if self.startrev and self.heads:
311 311 if len(self.heads) > 1:
312 312 raise util.Abort(_('svn: start revision is not supported with '
313 313 'with more than one branch'))
314 314 revnum = self.revnum(self.heads[0])
315 315 if revnum < self.startrev:
316 316 raise util.Abort(_('svn: no revision found after start revision %d')
317 317 % self.startrev)
318 318
319 319 return self.heads
320 320
321 321 def getfile(self, file, rev):
322 322 data, mode = self._getfile(file, rev)
323 323 self.modecache[(file, rev)] = mode
324 324 return data
325 325
326 326 def getmode(self, file, rev):
327 327 return self.modecache[(file, rev)]
328 328
329 329 def getchanges(self, rev):
330 330 if self._changescache and self._changescache[0] == rev:
331 331 return self._changescache[1]
332 332 self._changescache = None
333 333 self.modecache = {}
334 334 (paths, parents) = self.paths[rev]
335 335 if parents:
336 336 files, copies = self.expandpaths(rev, paths, parents)
337 337 else:
338 338 # Perform a full checkout on roots
339 339 uuid, module, revnum = self.revsplit(rev)
340 340 entries = svn.client.ls(self.baseurl + urllib.quote(module),
341 341 optrev(revnum), True, self.ctx)
342 342 files = [n for n,e in entries.iteritems()
343 343 if e.kind == svn.core.svn_node_file]
344 344 copies = {}
345 345
346 346 files.sort()
347 347 files = zip(files, [rev] * len(files))
348 348
349 349 # caller caches the result, so free it here to release memory
350 350 del self.paths[rev]
351 351 return (files, copies)
352 352
353 353 def getchangedfiles(self, rev, i):
354 354 changes = self.getchanges(rev)
355 355 self._changescache = (rev, changes)
356 356 return [f[0] for f in changes[0]]
357 357
358 358 def getcommit(self, rev):
359 359 if rev not in self.commits:
360 360 uuid, module, revnum = self.revsplit(rev)
361 361 self.module = module
362 362 self.reparent(module)
363 363 # We assume that:
364 364 # - requests for revisions after "stop" come from the
365 365 # revision graph backward traversal. Cache all of them
366 366 # down to stop, they will be used eventually.
367 367 # - requests for revisions before "stop" come to get
368 368 # isolated branches parents. Just fetch what is needed.
369 369 stop = self.lastrevs.get(module, 0)
370 370 if revnum < stop:
371 371 stop = revnum + 1
372 372 self._fetch_revisions(revnum, stop)
373 373 commit = self.commits[rev]
374 374 # caller caches the result, so free it here to release memory
375 375 del self.commits[rev]
376 376 return commit
377 377
378 378 def gettags(self):
379 379 tags = {}
380 380 if self.tags is None:
381 381 return tags
382 382
383 383 # svn tags are just a convention, project branches left in a
384 384 # 'tags' directory. There is no other relationship than
385 385 # ancestry, which is expensive to discover and makes them hard
386 386 # to update incrementally. Worse, past revisions may be
387 387 # referenced by tags far away in the future, requiring a deep
388 388 # history traversal on every calculation. Current code
389 389 # performs a single backward traversal, tracking moves within
390 390 # the tags directory (tag renaming) and recording a new tag
391 391 # everytime a project is copied from outside the tags
392 392 # directory. It also lists deleted tags, this behaviour may
393 393 # change in the future.
394 394 pendings = []
395 395 tagspath = self.tags
396 396 start = svn.ra.get_latest_revnum(self.ra)
397 397 try:
398 398 for entry in self._getlog([self.tags], start, self.startrev):
399 399 origpaths, revnum, author, date, message = entry
400 400 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
401 401 in origpaths.iteritems() if e.copyfrom_path]
402 402 copies.sort()
403 403 # Apply moves/copies from more specific to general
404 404 copies.reverse()
405 405
406 406 srctagspath = tagspath
407 407 if copies and copies[-1][2] == tagspath:
408 408 # Track tags directory moves
409 409 srctagspath = copies.pop()[0]
410 410
411 411 for source, sourcerev, dest in copies:
412 412 if not dest.startswith(tagspath + '/'):
413 413 continue
414 414 for tag in pendings:
415 415 if tag[0].startswith(dest):
416 416 tagpath = source + tag[0][len(dest):]
417 417 tag[:2] = [tagpath, sourcerev]
418 418 break
419 419 else:
420 420 pendings.append([source, sourcerev, dest.split('/')[-1]])
421 421
422 422 # Tell tag renamings from tag creations
423 423 remainings = []
424 424 for source, sourcerev, tagname in pendings:
425 425 if source.startswith(srctagspath):
426 426 remainings.append([source, sourcerev, tagname])
427 427 continue
428 428 # From revision may be fake, get one with changes
429 429 try:
430 430 tagid = self.latest(source, sourcerev)
431 431 if tagid:
432 432 tags[tagname] = tagid
433 433 except SvnPathNotFound:
434 434 # It happens when we are following directories we assumed
435 435 # were copied with their parents but were really created
436 436 # in the tag directory.
437 437 pass
438 438 pendings = remainings
439 439 tagspath = srctagspath
440 440
441 441 except SubversionException, (inst, num):
442 442 self.ui.note(_('no tags found at revision %d\n') % start)
443 443 return tags
444 444
445 445 def converted(self, rev, destrev):
446 446 if not self.wc:
447 447 return
448 448 if self.convertfp is None:
449 449 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
450 450 'a')
451 451 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
452 452 self.convertfp.flush()
453 453
454 454 # -- helper functions --
455 455
456 456 def revid(self, revnum, module=None):
457 457 if not module:
458 458 module = self.module
459 459 return u"svn:%s%s@%s" % (self.uuid, module.decode(self.encoding),
460 460 revnum)
461 461
462 462 def revnum(self, rev):
463 463 return int(rev.split('@')[-1])
464 464
465 465 def revsplit(self, rev):
466 466 url, revnum = strutil.rsplit(rev.encode(self.encoding), '@', 1)
467 467 revnum = int(revnum)
468 468 parts = url.split('/', 1)
469 469 uuid = parts.pop(0)[4:]
470 470 mod = ''
471 471 if parts:
472 472 mod = '/' + parts[0]
473 473 return uuid, mod, revnum
474 474
475 475 def latest(self, path, stop=0):
476 476 """Find the latest revid affecting path, up to stop. It may return
477 477 a revision in a different module, since a branch may be moved without
478 478 a change being reported. Return None if computed module does not
479 479 belong to rootmodule subtree.
480 480 """
481 481 if not path.startswith(self.rootmodule):
482 482 # Requests on foreign branches may be forbidden at server level
483 483 self.ui.debug(_('ignoring foreign branch %r\n') % path)
484 484 return None
485 485
486 486 if not stop:
487 487 stop = svn.ra.get_latest_revnum(self.ra)
488 488 try:
489 489 prevmodule = self.reparent('')
490 490 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
491 491 self.reparent(prevmodule)
492 492 except SubversionException:
493 493 dirent = None
494 494 if not dirent:
495 495 raise SvnPathNotFound(_('%s not found up to revision %d') % (path, stop))
496 496
497 497 # stat() gives us the previous revision on this line of development, but
498 498 # it might be in *another module*. Fetch the log and detect renames down
499 499 # to the latest revision.
500 500 stream = self._getlog([path], stop, dirent.created_rev)
501 501 try:
502 502 for entry in stream:
503 503 paths, revnum, author, date, message = entry
504 504 if revnum <= dirent.created_rev:
505 505 break
506 506
507 507 for p in paths:
508 508 if not path.startswith(p) or not paths[p].copyfrom_path:
509 509 continue
510 510 newpath = paths[p].copyfrom_path + path[len(p):]
511 511 self.ui.debug(_("branch renamed from %s to %s at %d\n") %
512 512 (path, newpath, revnum))
513 513 path = newpath
514 514 break
515 515 finally:
516 516 stream.close()
517 517
518 518 if not path.startswith(self.rootmodule):
519 519 self.ui.debug(_('ignoring foreign branch %r\n') % path)
520 520 return None
521 521 return self.revid(dirent.created_rev, path)
522 522
523 523 def get_blacklist(self):
524 524 """Avoid certain revision numbers.
525 525 It is not uncommon for two nearby revisions to cancel each other
526 526 out, e.g. 'I copied trunk into a subdirectory of itself instead
527 527 of making a branch'. The converted repository is significantly
528 528 smaller if we ignore such revisions."""
529 529 self.blacklist = util.set()
530 530 blacklist = self.blacklist
531 531 for line in file("blacklist.txt", "r"):
532 532 if not line.startswith("#"):
533 533 try:
534 534 svn_rev = int(line.strip())
535 535 blacklist.add(svn_rev)
536 536 except ValueError, e:
537 537 pass # not an integer or a comment
538 538
539 539 def is_blacklisted(self, svn_rev):
540 540 return svn_rev in self.blacklist
541 541
542 542 def reparent(self, module):
543 543 """Reparent the svn transport and return the previous parent."""
544 544 if self.prevmodule == module:
545 545 return module
546 546 svnurl = self.baseurl + urllib.quote(module)
547 547 prevmodule = self.prevmodule
548 548 if prevmodule is None:
549 549 prevmodule = ''
550 550 self.ui.debug(_("reparent to %s\n") % svnurl)
551 551 svn.ra.reparent(self.ra, svnurl)
552 552 self.prevmodule = module
553 553 return prevmodule
554 554
555 555 def expandpaths(self, rev, paths, parents):
556 556 entries = []
557 557 copyfrom = {} # Map of entrypath, revision for finding source of deleted revisions.
558 558 copies = {}
559 559
560 560 new_module, revnum = self.revsplit(rev)[1:]
561 561 if new_module != self.module:
562 562 self.module = new_module
563 563 self.reparent(self.module)
564 564
565 565 for path, ent in paths:
566 566 entrypath = self.getrelpath(path)
567 567 entry = entrypath.decode(self.encoding)
568 568
569 569 kind = self._checkpath(entrypath, revnum)
570 570 if kind == svn.core.svn_node_file:
571 571 entries.append(self.recode(entry))
572 572 if not ent.copyfrom_path or not parents:
573 573 continue
574 574 # Copy sources not in parent revisions cannot be represented,
575 575 # ignore their origin for now
576 576 pmodule, prevnum = self.revsplit(parents[0])[1:]
577 577 if ent.copyfrom_rev < prevnum:
578 578 continue
579 579 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
580 580 if not copyfrom_path:
581 581 continue
582 582 self.ui.debug(_("copied to %s from %s@%s\n") %
583 583 (entrypath, copyfrom_path, ent.copyfrom_rev))
584 584 copies[self.recode(entry)] = self.recode(copyfrom_path)
585 585 elif kind == 0: # gone, but had better be a deleted *file*
586 586 self.ui.debug(_("gone from %s\n") % ent.copyfrom_rev)
587 587
588 588 # if a branch is created but entries are removed in the same
589 589 # changeset, get the right fromrev
590 590 # parents cannot be empty here, you cannot remove things from
591 591 # a root revision.
592 592 uuid, old_module, fromrev = self.revsplit(parents[0])
593 593
594 594 basepath = old_module + "/" + self.getrelpath(path)
595 595 entrypath = basepath
596 596
597 597 def lookup_parts(p):
598 598 rc = None
599 599 parts = p.split("/")
600 600 for i in range(len(parts)):
601 601 part = "/".join(parts[:i])
602 602 info = part, copyfrom.get(part, None)
603 603 if info[1] is not None:
604 604 self.ui.debug(_("found parent directory %s\n") % info[1])
605 605 rc = info
606 606 return rc
607 607
608 608 self.ui.debug(_("base, entry %s %s\n") % (basepath, entrypath))
609 609
610 610 frompath, froment = lookup_parts(entrypath) or (None, revnum - 1)
611 611
612 612 # need to remove fragment from lookup_parts and replace with copyfrom_path
613 613 if frompath is not None:
614 614 self.ui.debug(_("munge-o-matic\n"))
615 615 self.ui.debug(entrypath + '\n')
616 616 self.ui.debug(entrypath[len(frompath):] + '\n')
617 617 entrypath = froment.copyfrom_path + entrypath[len(frompath):]
618 618 fromrev = froment.copyfrom_rev
619 619 self.ui.debug(_("info: %s %s %s %s\n") % (frompath, froment, ent, entrypath))
620 620
621 621 # We can avoid the reparent calls if the module has not changed
622 622 # but it probably does not worth the pain.
623 623 prevmodule = self.reparent('')
624 624 fromkind = svn.ra.check_path(self.ra, entrypath.strip('/'), fromrev)
625 625 self.reparent(prevmodule)
626 626
627 627 if fromkind == svn.core.svn_node_file: # a deleted file
628 628 entries.append(self.recode(entry))
629 629 elif fromkind == svn.core.svn_node_dir:
630 630 # print "Deleted/moved non-file:", revnum, path, ent
631 631 # children = self._find_children(path, revnum - 1)
632 632 # print "find children %s@%d from %d action %s" % (path, revnum, ent.copyfrom_rev, ent.action)
633 633 # Sometimes this is tricky. For example: in
634 634 # The Subversion Repository revision 6940 a dir
635 635 # was copied and one of its files was deleted
636 636 # from the new location in the same commit. This
637 637 # code can't deal with that yet.
638 638 if ent.action == 'C':
639 639 children = self._find_children(path, fromrev)
640 640 else:
641 641 oroot = entrypath.strip('/')
642 642 nroot = path.strip('/')
643 643 children = self._find_children(oroot, fromrev)
644 644 children = [s.replace(oroot,nroot) for s in children]
645 645 # Mark all [files, not directories] as deleted.
646 646 for child in children:
647 647 # Can we move a child directory and its
648 648 # parent in the same commit? (probably can). Could
649 649 # cause problems if instead of revnum -1,
650 650 # we have to look in (copyfrom_path, revnum - 1)
651 651 entrypath = self.getrelpath("/" + child, module=old_module)
652 652 if entrypath:
653 653 entry = self.recode(entrypath.decode(self.encoding))
654 654 if entry in copies:
655 655 # deleted file within a copy
656 656 del copies[entry]
657 657 else:
658 658 entries.append(entry)
659 659 else:
660 660 self.ui.debug(_('unknown path in revision %d: %s\n') % \
661 661 (revnum, path))
662 662 elif kind == svn.core.svn_node_dir:
663 663 # Should probably synthesize normal file entries
664 664 # and handle as above to clean up copy/rename handling.
665 665
666 666 # If the directory just had a prop change,
667 667 # then we shouldn't need to look for its children.
668 668 if ent.action == 'M':
669 669 continue
670 670
671 671 # Also this could create duplicate entries. Not sure
672 672 # whether this will matter. Maybe should make entries a set.
673 673 # print "Changed directory", revnum, path, ent.action, ent.copyfrom_path, ent.copyfrom_rev
674 674 # This will fail if a directory was copied
675 675 # from another branch and then some of its files
676 676 # were deleted in the same transaction.
677 677 children = util.sort(self._find_children(path, revnum))
678 678 for child in children:
679 679 # Can we move a child directory and its
680 680 # parent in the same commit? (probably can). Could
681 681 # cause problems if instead of revnum -1,
682 682 # we have to look in (copyfrom_path, revnum - 1)
683 683 entrypath = self.getrelpath("/" + child)
684 684 # print child, self.module, entrypath
685 685 if entrypath:
686 686 # Need to filter out directories here...
687 687 kind = self._checkpath(entrypath, revnum)
688 688 if kind != svn.core.svn_node_dir:
689 689 entries.append(self.recode(entrypath))
690 690
691 691 # Copies here (must copy all from source)
692 692 # Probably not a real problem for us if
693 693 # source does not exist
694 694 if not ent.copyfrom_path or not parents:
695 695 continue
696 696 # Copy sources not in parent revisions cannot be represented,
697 697 # ignore their origin for now
698 698 pmodule, prevnum = self.revsplit(parents[0])[1:]
699 699 if ent.copyfrom_rev < prevnum:
700 700 continue
701 701 copyfrompath = ent.copyfrom_path.decode(self.encoding)
702 702 copyfrompath = self.getrelpath(copyfrompath, pmodule)
703 703 if not copyfrompath:
704 704 continue
705 705 copyfrom[path] = ent
706 706 self.ui.debug(_("mark %s came from %s:%d\n")
707 707 % (path, copyfrompath, ent.copyfrom_rev))
708 708 children = self._find_children(ent.copyfrom_path, ent.copyfrom_rev)
709 709 children.sort()
710 710 for child in children:
711 711 entrypath = self.getrelpath("/" + child, pmodule)
712 712 if not entrypath:
713 713 continue
714 714 entry = entrypath.decode(self.encoding)
715 715 copytopath = path + entry[len(copyfrompath):]
716 716 copytopath = self.getrelpath(copytopath)
717 717 copies[self.recode(copytopath)] = self.recode(entry, pmodule)
718 718
719 719 return (util.unique(entries), copies)
720 720
721 721 def _fetch_revisions(self, from_revnum, to_revnum):
722 722 if from_revnum < to_revnum:
723 723 from_revnum, to_revnum = to_revnum, from_revnum
724 724
725 725 self.child_cset = None
726 726
727 727 def parselogentry(orig_paths, revnum, author, date, message):
728 728 """Return the parsed commit object or None, and True if
729 729 the revision is a branch root.
730 730 """
731 731 self.ui.debug(_("parsing revision %d (%d changes)\n") %
732 732 (revnum, len(orig_paths)))
733 733
734 734 branched = False
735 735 rev = self.revid(revnum)
736 736 # branch log might return entries for a parent we already have
737 737
738 738 if (rev in self.commits or revnum < to_revnum):
739 739 return None, branched
740 740
741 741 parents = []
742 742 # check whether this revision is the start of a branch or part
743 743 # of a branch renaming
744 744 orig_paths = util.sort(orig_paths.items())
745 745 root_paths = [(p,e) for p,e in orig_paths if self.module.startswith(p)]
746 746 if root_paths:
747 747 path, ent = root_paths[-1]
748 748 if ent.copyfrom_path:
749 749 branched = True
750 750 newpath = ent.copyfrom_path + self.module[len(path):]
751 751 # ent.copyfrom_rev may not be the actual last revision
752 752 previd = self.latest(newpath, ent.copyfrom_rev)
753 753 if previd is not None:
754 754 prevmodule, prevnum = self.revsplit(previd)[1:]
755 755 if prevnum >= self.startrev:
756 756 parents = [previd]
757 757 self.ui.note(_('found parent of branch %s at %d: %s\n') %
758 758 (self.module, prevnum, prevmodule))
759 759 else:
760 760 self.ui.debug(_("no copyfrom path, don't know what to do.\n"))
761 761
762 762 paths = []
763 763 # filter out unrelated paths
764 764 for path, ent in orig_paths:
765 765 if self.getrelpath(path) is None:
766 766 continue
767 767 paths.append((path, ent))
768 768
769 769 # Example SVN datetime. Includes microseconds.
770 770 # ISO-8601 conformant
771 771 # '2007-01-04T17:35:00.902377Z'
772 772 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
773 773
774 774 log = message and self.recode(message) or ''
775 775 author = author and self.recode(author) or ''
776 776 try:
777 777 branch = self.module.split("/")[-1]
778 778 if branch == 'trunk':
779 779 branch = ''
780 780 except IndexError:
781 781 branch = None
782 782
783 783 cset = commit(author=author,
784 784 date=util.datestr(date),
785 785 desc=log,
786 786 parents=parents,
787 787 branch=branch,
788 788 rev=rev.encode('utf-8'))
789 789
790 790 self.commits[rev] = cset
791 791 # The parents list is *shared* among self.paths and the
792 792 # commit object. Both will be updated below.
793 793 self.paths[rev] = (paths, cset.parents)
794 794 if self.child_cset and not self.child_cset.parents:
795 795 self.child_cset.parents[:] = [rev]
796 796 self.child_cset = cset
797 797 return cset, branched
798 798
799 799 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
800 800 (self.module, from_revnum, to_revnum))
801 801
802 802 try:
803 803 firstcset = None
804 804 lastonbranch = False
805 805 stream = self._getlog([self.module], from_revnum, to_revnum)
806 806 try:
807 807 for entry in stream:
808 808 paths, revnum, author, date, message = entry
809 809 if revnum < self.startrev:
810 810 lastonbranch = True
811 811 break
812 812 if self.is_blacklisted(revnum):
813 813 self.ui.note(_('skipping blacklisted revision %d\n')
814 814 % revnum)
815 815 continue
816 816 if paths is None:
817 817 self.ui.debug(_('revision %d has no entries\n') % revnum)
818 818 continue
819 819 cset, lastonbranch = parselogentry(paths, revnum, author,
820 820 date, message)
821 821 if cset:
822 822 firstcset = cset
823 823 if lastonbranch:
824 824 break
825 825 finally:
826 826 stream.close()
827 827
828 828 if not lastonbranch and firstcset and not firstcset.parents:
829 829 # The first revision of the sequence (the last fetched one)
830 830 # has invalid parents if not a branch root. Find the parent
831 831 # revision now, if any.
832 832 try:
833 833 firstrevnum = self.revnum(firstcset.rev)
834 834 if firstrevnum > 1:
835 835 latest = self.latest(self.module, firstrevnum - 1)
836 836 if latest:
837 837 firstcset.parents.append(latest)
838 838 except SvnPathNotFound:
839 839 pass
840 840 except SubversionException, (inst, num):
841 841 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
842 842 raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
843 843 raise
844 844
845 845 def _getfile(self, file, rev):
846 846 # TODO: ra.get_file transmits the whole file instead of diffs.
847 847 mode = ''
848 848 try:
849 849 new_module, revnum = self.revsplit(rev)[1:]
850 850 if self.module != new_module:
851 851 self.module = new_module
852 852 self.reparent(self.module)
853 853 io = StringIO()
854 854 info = svn.ra.get_file(self.ra, file, revnum, io)
855 855 data = io.getvalue()
856 856 # ra.get_files() seems to keep a reference on the input buffer
857 857 # preventing collection. Release it explicitely.
858 858 io.close()
859 859 if isinstance(info, list):
860 860 info = info[-1]
861 861 mode = ("svn:executable" in info) and 'x' or ''
862 862 mode = ("svn:special" in info) and 'l' or mode
863 863 except SubversionException, e:
864 864 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
865 865 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
866 866 if e.apr_err in notfound: # File not found
867 867 raise IOError()
868 868 raise
869 869 if mode == 'l':
870 870 link_prefix = "link "
871 871 if data.startswith(link_prefix):
872 872 data = data[len(link_prefix):]
873 873 return data, mode
874 874
875 875 def _find_children(self, path, revnum):
876 876 path = path.strip('/')
877 877 pool = Pool()
878 878 rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
879 879 return ['%s/%s' % (path, x) for x in
880 880 svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool).keys()]
881 881
882 882 def getrelpath(self, path, module=None):
883 883 if module is None:
884 884 module = self.module
885 885 # Given the repository url of this wc, say
886 886 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
887 887 # extract the "entry" portion (a relative path) from what
888 888 # svn log --xml says, ie
889 889 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
890 890 # that is to say "tests/PloneTestCase.py"
891 891 if path.startswith(module):
892 892 relative = path.rstrip('/')[len(module):]
893 893 if relative.startswith('/'):
894 894 return relative[1:]
895 895 elif relative == '':
896 896 return relative
897 897
898 898 # The path is outside our tracked tree...
899 899 self.ui.debug(_('%r is not under %r, ignoring\n') % (path, module))
900 900 return None
901 901
902 902 def _checkpath(self, path, revnum):
903 903 # ra.check_path does not like leading slashes very much, it leads
904 904 # to PROPFIND subversion errors
905 905 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
906 906
907 907 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
908 908 strict_node_history=False):
909 909 # Normalize path names, svn >= 1.5 only wants paths relative to
910 910 # supplied URL
911 911 relpaths = []
912 912 for p in paths:
913 913 if not p.startswith('/'):
914 914 p = self.module + '/' + p
915 915 relpaths.append(p.strip('/'))
916 916 args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
917 917 strict_node_history]
918 918 arg = encodeargs(args)
919 919 hgexe = util.hgexecutable()
920 920 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
921 921 stdin, stdout = util.popen2(cmd, 'b')
922 922 stdin.write(arg)
923 923 stdin.close()
924 924 return logstream(stdout)
925 925
926 926 pre_revprop_change = '''#!/bin/sh
927 927
928 928 REPOS="$1"
929 929 REV="$2"
930 930 USER="$3"
931 931 PROPNAME="$4"
932 932 ACTION="$5"
933 933
934 934 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
935 935 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
936 936 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
937 937
938 938 echo "Changing prohibited revision property" >&2
939 939 exit 1
940 940 '''
941 941
942 942 class svn_sink(converter_sink, commandline):
943 943 commit_re = re.compile(r'Committed revision (\d+).', re.M)
944 944
945 945 def prerun(self):
946 946 if self.wc:
947 947 os.chdir(self.wc)
948 948
949 949 def postrun(self):
950 950 if self.wc:
951 951 os.chdir(self.cwd)
952 952
953 953 def join(self, name):
954 954 return os.path.join(self.wc, '.svn', name)
955 955
956 956 def revmapfile(self):
957 957 return self.join('hg-shamap')
958 958
959 959 def authorfile(self):
960 960 return self.join('hg-authormap')
961 961
962 962 def __init__(self, ui, path):
963 963 converter_sink.__init__(self, ui, path)
964 964 commandline.__init__(self, ui, 'svn')
965 965 self.delete = []
966 966 self.setexec = []
967 967 self.delexec = []
968 968 self.copies = []
969 969 self.wc = None
970 970 self.cwd = os.getcwd()
971 971
972 972 path = os.path.realpath(path)
973 973
974 974 created = False
975 975 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
976 976 self.wc = path
977 977 self.run0('update')
978 978 else:
979 979 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
980 980
981 981 if os.path.isdir(os.path.dirname(path)):
982 982 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
983 983 ui.status(_('initializing svn repo %r\n') %
984 984 os.path.basename(path))
985 985 commandline(ui, 'svnadmin').run0('create', path)
986 986 created = path
987 987 path = util.normpath(path)
988 988 if not path.startswith('/'):
989 989 path = '/' + path
990 990 path = 'file://' + path
991 991
992 992 ui.status(_('initializing svn wc %r\n') % os.path.basename(wcpath))
993 993 self.run0('checkout', path, wcpath)
994 994
995 995 self.wc = wcpath
996 996 self.opener = util.opener(self.wc)
997 997 self.wopener = util.opener(self.wc)
998 998 self.childmap = mapfile(ui, self.join('hg-childmap'))
999 999 self.is_exec = util.checkexec(self.wc) and util.is_exec or None
1000 1000
1001 1001 if created:
1002 1002 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1003 1003 fp = open(hook, 'w')
1004 1004 fp.write(pre_revprop_change)
1005 1005 fp.close()
1006 1006 util.set_flags(hook, False, True)
1007 1007
1008 1008 xport = transport.SvnRaTransport(url=geturl(path))
1009 1009 self.uuid = svn.ra.get_uuid(xport.ra)
1010 1010
1011 1011 def wjoin(self, *names):
1012 1012 return os.path.join(self.wc, *names)
1013 1013
1014 1014 def putfile(self, filename, flags, data):
1015 1015 if 'l' in flags:
1016 1016 self.wopener.symlink(data, filename)
1017 1017 else:
1018 1018 try:
1019 1019 if os.path.islink(self.wjoin(filename)):
1020 1020 os.unlink(filename)
1021 1021 except OSError:
1022 1022 pass
1023 1023 self.wopener(filename, 'w').write(data)
1024 1024
1025 1025 if self.is_exec:
1026 1026 was_exec = self.is_exec(self.wjoin(filename))
1027 1027 else:
1028 1028 # On filesystems not supporting execute-bit, there is no way
1029 1029 # to know if it is set but asking subversion. Setting it
1030 1030 # systematically is just as expensive and much simpler.
1031 1031 was_exec = 'x' not in flags
1032 1032
1033 1033 util.set_flags(self.wjoin(filename), False, 'x' in flags)
1034 1034 if was_exec:
1035 1035 if 'x' not in flags:
1036 1036 self.delexec.append(filename)
1037 1037 else:
1038 1038 if 'x' in flags:
1039 1039 self.setexec.append(filename)
1040 1040
1041 1041 def _copyfile(self, source, dest):
1042 1042 # SVN's copy command pukes if the destination file exists, but
1043 1043 # our copyfile method expects to record a copy that has
1044 1044 # already occurred. Cross the semantic gap.
1045 1045 wdest = self.wjoin(dest)
1046 1046 exists = os.path.exists(wdest)
1047 1047 if exists:
1048 1048 fd, tempname = tempfile.mkstemp(
1049 1049 prefix='hg-copy-', dir=os.path.dirname(wdest))
1050 1050 os.close(fd)
1051 1051 os.unlink(tempname)
1052 1052 os.rename(wdest, tempname)
1053 1053 try:
1054 1054 self.run0('copy', source, dest)
1055 1055 finally:
1056 1056 if exists:
1057 1057 try:
1058 1058 os.unlink(wdest)
1059 1059 except OSError:
1060 1060 pass
1061 1061 os.rename(tempname, wdest)
1062 1062
1063 1063 def dirs_of(self, files):
1064 1064 dirs = util.set()
1065 1065 for f in files:
1066 1066 if os.path.isdir(self.wjoin(f)):
1067 1067 dirs.add(f)
1068 1068 for i in strutil.rfindall(f, '/'):
1069 1069 dirs.add(f[:i])
1070 1070 return dirs
1071 1071
1072 1072 def add_dirs(self, files):
1073 1073 add_dirs = [d for d in util.sort(self.dirs_of(files))
1074 1074 if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
1075 1075 if add_dirs:
1076 1076 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1077 1077 return add_dirs
1078 1078
1079 1079 def add_files(self, files):
1080 1080 if files:
1081 1081 self.xargs(files, 'add', quiet=True)
1082 1082 return files
1083 1083
1084 1084 def tidy_dirs(self, names):
1085 1085 dirs = util.sort(self.dirs_of(names))
1086 1086 dirs.reverse()
1087 1087 deleted = []
1088 1088 for d in dirs:
1089 1089 wd = self.wjoin(d)
1090 1090 if os.listdir(wd) == '.svn':
1091 1091 self.run0('delete', d)
1092 1092 deleted.append(d)
1093 1093 return deleted
1094 1094
1095 1095 def addchild(self, parent, child):
1096 1096 self.childmap[parent] = child
1097 1097
1098 1098 def revid(self, rev):
1099 1099 return u"svn:%s@%s" % (self.uuid, rev)
1100 1100
1101 1101 def putcommit(self, files, copies, parents, commit, source):
1102 1102 # Apply changes to working copy
1103 1103 for f, v in files:
1104 1104 try:
1105 1105 data = source.getfile(f, v)
1106 1106 except IOError, inst:
1107 1107 self.delete.append(f)
1108 1108 else:
1109 1109 e = source.getmode(f, v)
1110 1110 self.putfile(f, e, data)
1111 1111 if f in copies:
1112 1112 self.copies.append([copies[f], f])
1113 1113 files = [f[0] for f in files]
1114 1114
1115 1115 for parent in parents:
1116 1116 try:
1117 1117 return self.revid(self.childmap[parent])
1118 1118 except KeyError:
1119 1119 pass
1120 1120 entries = util.set(self.delete)
1121 1121 files = util.frozenset(files)
1122 1122 entries.update(self.add_dirs(files.difference(entries)))
1123 1123 if self.copies:
1124 1124 for s, d in self.copies:
1125 1125 self._copyfile(s, d)
1126 1126 self.copies = []
1127 1127 if self.delete:
1128 1128 self.xargs(self.delete, 'delete')
1129 1129 self.delete = []
1130 1130 entries.update(self.add_files(files.difference(entries)))
1131 1131 entries.update(self.tidy_dirs(entries))
1132 1132 if self.delexec:
1133 1133 self.xargs(self.delexec, 'propdel', 'svn:executable')
1134 1134 self.delexec = []
1135 1135 if self.setexec:
1136 1136 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1137 1137 self.setexec = []
1138 1138
1139 1139 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1140 1140 fp = os.fdopen(fd, 'w')
1141 1141 fp.write(commit.desc)
1142 1142 fp.close()
1143 1143 try:
1144 1144 output = self.run0('commit',
1145 1145 username=util.shortuser(commit.author),
1146 1146 file=messagefile,
1147 1147 encoding='utf-8')
1148 1148 try:
1149 1149 rev = self.commit_re.search(output).group(1)
1150 1150 except AttributeError:
1151 1151 self.ui.warn(_('unexpected svn output:\n'))
1152 1152 self.ui.warn(output)
1153 1153 raise util.Abort(_('unable to cope with svn output'))
1154 1154 if commit.rev:
1155 1155 self.run('propset', 'hg:convert-rev', commit.rev,
1156 1156 revprop=True, revision=rev)
1157 1157 if commit.branch and commit.branch != 'default':
1158 1158 self.run('propset', 'hg:convert-branch', commit.branch,
1159 1159 revprop=True, revision=rev)
1160 1160 for parent in parents:
1161 1161 self.addchild(parent, rev)
1162 1162 return self.revid(rev)
1163 1163 finally:
1164 1164 os.unlink(messagefile)
1165 1165
1166 1166 def puttags(self, tags):
1167 1167 self.ui.warn(_('XXX TAGS NOT IMPLEMENTED YET\n'))
@@ -1,2589 +1,2589 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 '''patch management and development
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use "hg help command" for more details):
18 18
19 19 prepare repository to work with patches qinit
20 20 create new patch qnew
21 21 import existing patch qimport
22 22
23 23 print patch series qseries
24 24 print applied patches qapplied
25 25 print name of top applied patch qtop
26 26
27 27 add known patch to applied stack qpush
28 28 remove patch from applied stack qpop
29 29 refresh contents of top applied patch qrefresh
30 30 '''
31 31
32 32 from mercurial.i18n import _
33 33 from mercurial.node import bin, hex, short, nullid, nullrev
34 34 from mercurial import commands, cmdutil, hg, patch, util
35 35 from mercurial import repair, extensions, url, error
36 36 import os, sys, re, errno
37 37
38 38 commands.norepo += " qclone"
39 39
40 40 # Patch names looks like unix-file names.
41 41 # They must be joinable with queue directory and result in the patch path.
42 42 normname = util.normpath
43 43
44 44 class statusentry:
45 45 def __init__(self, rev, name=None):
46 46 if not name:
47 47 fields = rev.split(':', 1)
48 48 if len(fields) == 2:
49 49 self.rev, self.name = fields
50 50 else:
51 51 self.rev, self.name = None, None
52 52 else:
53 53 self.rev, self.name = rev, name
54 54
55 55 def __str__(self):
56 56 return self.rev + ':' + self.name
57 57
58 58 class patchheader(object):
59 59 def __init__(self, message, comments, user, date, haspatch):
60 60 self.message = message
61 61 self.comments = comments
62 62 self.user = user
63 63 self.date = date
64 64 self.haspatch = haspatch
65 65
66 66 def setuser(self, user):
67 67 if not self.setheader(['From: ', '# User '], user):
68 68 try:
69 69 patchheaderat = self.comments.index('# HG changeset patch')
70 70 self.comments.insert(patchheaderat + 1,'# User ' + user)
71 71 except ValueError:
72 72 self.comments = ['From: ' + user, ''] + self.comments
73 73 self.user = user
74 74
75 75 def setdate(self, date):
76 76 if self.setheader(['# Date '], date):
77 77 self.date = date
78 78
79 79 def setmessage(self, message):
80 80 if self.comments:
81 81 self._delmsg()
82 82 self.message = [message]
83 83 self.comments += self.message
84 84
85 85 def setheader(self, prefixes, new):
86 86 '''Update all references to a field in the patch header.
87 87 If none found, add it email style.'''
88 88 res = False
89 89 for prefix in prefixes:
90 90 for i in xrange(len(self.comments)):
91 91 if self.comments[i].startswith(prefix):
92 92 self.comments[i] = prefix + new
93 93 res = True
94 94 break
95 95 return res
96 96
97 97 def __str__(self):
98 98 if not self.comments:
99 99 return ''
100 100 return '\n'.join(self.comments) + '\n\n'
101 101
102 102 def _delmsg(self):
103 103 '''Remove existing message, keeping the rest of the comments fields.
104 104 If comments contains 'subject: ', message will prepend
105 105 the field and a blank line.'''
106 106 if self.message:
107 107 subj = 'subject: ' + self.message[0].lower()
108 108 for i in xrange(len(self.comments)):
109 109 if subj == self.comments[i].lower():
110 110 del self.comments[i]
111 111 self.message = self.message[2:]
112 112 break
113 113 ci = 0
114 114 for mi in xrange(len(self.message)):
115 115 while self.message[mi] != self.comments[ci]:
116 116 ci += 1
117 117 del self.comments[ci]
118 118
119 119 class queue:
120 120 def __init__(self, ui, path, patchdir=None):
121 121 self.basepath = path
122 122 self.path = patchdir or os.path.join(path, "patches")
123 123 self.opener = util.opener(self.path)
124 124 self.ui = ui
125 125 self.applied = []
126 126 self.full_series = []
127 127 self.applied_dirty = 0
128 128 self.series_dirty = 0
129 129 self.series_path = "series"
130 130 self.status_path = "status"
131 131 self.guards_path = "guards"
132 132 self.active_guards = None
133 133 self.guards_dirty = False
134 134 self._diffopts = None
135 135
136 136 if os.path.exists(self.join(self.series_path)):
137 137 self.full_series = self.opener(self.series_path).read().splitlines()
138 138 self.parse_series()
139 139
140 140 if os.path.exists(self.join(self.status_path)):
141 141 lines = self.opener(self.status_path).read().splitlines()
142 142 self.applied = [statusentry(l) for l in lines]
143 143
144 144 def diffopts(self):
145 145 if self._diffopts is None:
146 146 self._diffopts = patch.diffopts(self.ui)
147 147 return self._diffopts
148 148
149 149 def join(self, *p):
150 150 return os.path.join(self.path, *p)
151 151
152 152 def find_series(self, patch):
153 153 pre = re.compile("(\s*)([^#]+)")
154 154 index = 0
155 155 for l in self.full_series:
156 156 m = pre.match(l)
157 157 if m:
158 158 s = m.group(2)
159 159 s = s.rstrip()
160 160 if s == patch:
161 161 return index
162 162 index += 1
163 163 return None
164 164
165 165 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
166 166
167 167 def parse_series(self):
168 168 self.series = []
169 169 self.series_guards = []
170 170 for l in self.full_series:
171 171 h = l.find('#')
172 172 if h == -1:
173 173 patch = l
174 174 comment = ''
175 175 elif h == 0:
176 176 continue
177 177 else:
178 178 patch = l[:h]
179 179 comment = l[h:]
180 180 patch = patch.strip()
181 181 if patch:
182 182 if patch in self.series:
183 183 raise util.Abort(_('%s appears more than once in %s') %
184 184 (patch, self.join(self.series_path)))
185 185 self.series.append(patch)
186 186 self.series_guards.append(self.guard_re.findall(comment))
187 187
188 188 def check_guard(self, guard):
189 189 if not guard:
190 190 return _('guard cannot be an empty string')
191 191 bad_chars = '# \t\r\n\f'
192 192 first = guard[0]
193 193 for c in '-+':
194 194 if first == c:
195 195 return (_('guard %r starts with invalid character: %r') %
196 196 (guard, c))
197 197 for c in bad_chars:
198 198 if c in guard:
199 199 return _('invalid character in guard %r: %r') % (guard, c)
200 200
201 201 def set_active(self, guards):
202 202 for guard in guards:
203 203 bad = self.check_guard(guard)
204 204 if bad:
205 205 raise util.Abort(bad)
206 206 guards = util.sort(util.unique(guards))
207 207 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
208 208 self.active_guards = guards
209 209 self.guards_dirty = True
210 210
211 211 def active(self):
212 212 if self.active_guards is None:
213 213 self.active_guards = []
214 214 try:
215 215 guards = self.opener(self.guards_path).read().split()
216 216 except IOError, err:
217 217 if err.errno != errno.ENOENT: raise
218 218 guards = []
219 219 for i, guard in enumerate(guards):
220 220 bad = self.check_guard(guard)
221 221 if bad:
222 222 self.ui.warn('%s:%d: %s\n' %
223 223 (self.join(self.guards_path), i + 1, bad))
224 224 else:
225 225 self.active_guards.append(guard)
226 226 return self.active_guards
227 227
228 228 def set_guards(self, idx, guards):
229 229 for g in guards:
230 230 if len(g) < 2:
231 231 raise util.Abort(_('guard %r too short') % g)
232 232 if g[0] not in '-+':
233 233 raise util.Abort(_('guard %r starts with invalid char') % g)
234 234 bad = self.check_guard(g[1:])
235 235 if bad:
236 236 raise util.Abort(bad)
237 237 drop = self.guard_re.sub('', self.full_series[idx])
238 238 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
239 239 self.parse_series()
240 240 self.series_dirty = True
241 241
242 242 def pushable(self, idx):
243 243 if isinstance(idx, str):
244 244 idx = self.series.index(idx)
245 245 patchguards = self.series_guards[idx]
246 246 if not patchguards:
247 247 return True, None
248 248 guards = self.active()
249 249 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
250 250 if exactneg:
251 251 return False, exactneg[0]
252 252 pos = [g for g in patchguards if g[0] == '+']
253 253 exactpos = [g for g in pos if g[1:] in guards]
254 254 if pos:
255 255 if exactpos:
256 256 return True, exactpos[0]
257 257 return False, pos
258 258 return True, ''
259 259
260 260 def explain_pushable(self, idx, all_patches=False):
261 261 write = all_patches and self.ui.write or self.ui.warn
262 262 if all_patches or self.ui.verbose:
263 263 if isinstance(idx, str):
264 264 idx = self.series.index(idx)
265 265 pushable, why = self.pushable(idx)
266 266 if all_patches and pushable:
267 267 if why is None:
268 268 write(_('allowing %s - no guards in effect\n') %
269 269 self.series[idx])
270 270 else:
271 271 if not why:
272 272 write(_('allowing %s - no matching negative guards\n') %
273 273 self.series[idx])
274 274 else:
275 275 write(_('allowing %s - guarded by %r\n') %
276 276 (self.series[idx], why))
277 277 if not pushable:
278 278 if why:
279 279 write(_('skipping %s - guarded by %r\n') %
280 280 (self.series[idx], why))
281 281 else:
282 282 write(_('skipping %s - no matching guards\n') %
283 283 self.series[idx])
284 284
285 285 def save_dirty(self):
286 286 def write_list(items, path):
287 287 fp = self.opener(path, 'w')
288 288 for i in items:
289 289 fp.write("%s\n" % i)
290 290 fp.close()
291 291 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
292 292 if self.series_dirty: write_list(self.full_series, self.series_path)
293 293 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
294 294
295 295 def readheaders(self, patch):
296 296 def eatdiff(lines):
297 297 while lines:
298 298 l = lines[-1]
299 299 if (l.startswith("diff -") or
300 300 l.startswith("Index:") or
301 301 l.startswith("===========")):
302 302 del lines[-1]
303 303 else:
304 304 break
305 305 def eatempty(lines):
306 306 while lines:
307 307 l = lines[-1]
308 308 if re.match('\s*$', l):
309 309 del lines[-1]
310 310 else:
311 311 break
312 312
313 313 pf = self.join(patch)
314 314 message = []
315 315 comments = []
316 316 user = None
317 317 date = None
318 318 format = None
319 319 subject = None
320 320 diffstart = 0
321 321
322 322 for line in file(pf):
323 323 line = line.rstrip()
324 324 if line.startswith('diff --git'):
325 325 diffstart = 2
326 326 break
327 327 if diffstart:
328 328 if line.startswith('+++ '):
329 329 diffstart = 2
330 330 break
331 331 if line.startswith("--- "):
332 332 diffstart = 1
333 333 continue
334 334 elif format == "hgpatch":
335 335 # parse values when importing the result of an hg export
336 336 if line.startswith("# User "):
337 337 user = line[7:]
338 338 elif line.startswith("# Date "):
339 339 date = line[7:]
340 340 elif not line.startswith("# ") and line:
341 341 message.append(line)
342 342 format = None
343 343 elif line == '# HG changeset patch':
344 344 format = "hgpatch"
345 345 elif (format != "tagdone" and (line.startswith("Subject: ") or
346 346 line.startswith("subject: "))):
347 347 subject = line[9:]
348 348 format = "tag"
349 349 elif (format != "tagdone" and (line.startswith("From: ") or
350 350 line.startswith("from: "))):
351 351 user = line[6:]
352 352 format = "tag"
353 353 elif format == "tag" and line == "":
354 354 # when looking for tags (subject: from: etc) they
355 355 # end once you find a blank line in the source
356 356 format = "tagdone"
357 357 elif message or line:
358 358 message.append(line)
359 359 comments.append(line)
360 360
361 361 eatdiff(message)
362 362 eatdiff(comments)
363 363 eatempty(message)
364 364 eatempty(comments)
365 365
366 366 # make sure message isn't empty
367 367 if format and format.startswith("tag") and subject:
368 368 message.insert(0, "")
369 369 message.insert(0, subject)
370 370 return patchheader(message, comments, user, date, diffstart > 1)
371 371
372 372 def removeundo(self, repo):
373 373 undo = repo.sjoin('undo')
374 374 if not os.path.exists(undo):
375 375 return
376 376 try:
377 377 os.unlink(undo)
378 378 except OSError, inst:
379 379 self.ui.warn(_('error removing undo: %s\n') % str(inst))
380 380
381 381 def printdiff(self, repo, node1, node2=None, files=None,
382 382 fp=None, changes=None, opts={}):
383 383 m = cmdutil.match(repo, files, opts)
384 384 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
385 385 write = fp is None and repo.ui.write or fp.write
386 386 for chunk in chunks:
387 387 write(chunk)
388 388
389 389 def mergeone(self, repo, mergeq, head, patch, rev):
390 390 # first try just applying the patch
391 391 (err, n) = self.apply(repo, [ patch ], update_status=False,
392 392 strict=True, merge=rev)
393 393
394 394 if err == 0:
395 395 return (err, n)
396 396
397 397 if n is None:
398 398 raise util.Abort(_("apply failed for patch %s") % patch)
399 399
400 400 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
401 401
402 402 # apply failed, strip away that rev and merge.
403 403 hg.clean(repo, head)
404 404 self.strip(repo, n, update=False, backup='strip')
405 405
406 406 ctx = repo[rev]
407 407 ret = hg.merge(repo, rev)
408 408 if ret:
409 409 raise util.Abort(_("update returned %d") % ret)
410 410 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
411 411 if n == None:
412 412 raise util.Abort(_("repo commit failed"))
413 413 try:
414 414 ph = mergeq.readheaders(patch)
415 415 except:
416 416 raise util.Abort(_("unable to read %s") % patch)
417 417
418 418 patchf = self.opener(patch, "w")
419 419 comments = str(ph)
420 420 if comments:
421 421 patchf.write(comments)
422 422 self.printdiff(repo, head, n, fp=patchf)
423 423 patchf.close()
424 424 self.removeundo(repo)
425 425 return (0, n)
426 426
427 427 def qparents(self, repo, rev=None):
428 428 if rev is None:
429 429 (p1, p2) = repo.dirstate.parents()
430 430 if p2 == nullid:
431 431 return p1
432 432 if len(self.applied) == 0:
433 433 return None
434 434 return bin(self.applied[-1].rev)
435 435 pp = repo.changelog.parents(rev)
436 436 if pp[1] != nullid:
437 437 arevs = [ x.rev for x in self.applied ]
438 438 p0 = hex(pp[0])
439 439 p1 = hex(pp[1])
440 440 if p0 in arevs:
441 441 return pp[0]
442 442 if p1 in arevs:
443 443 return pp[1]
444 444 return pp[0]
445 445
446 446 def mergepatch(self, repo, mergeq, series):
447 447 if len(self.applied) == 0:
448 448 # each of the patches merged in will have two parents. This
449 449 # can confuse the qrefresh, qdiff, and strip code because it
450 450 # needs to know which parent is actually in the patch queue.
451 451 # so, we insert a merge marker with only one parent. This way
452 452 # the first patch in the queue is never a merge patch
453 453 #
454 454 pname = ".hg.patches.merge.marker"
455 455 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
456 456 self.removeundo(repo)
457 457 self.applied.append(statusentry(hex(n), pname))
458 458 self.applied_dirty = 1
459 459
460 460 head = self.qparents(repo)
461 461
462 462 for patch in series:
463 463 patch = mergeq.lookup(patch, strict=True)
464 464 if not patch:
465 465 self.ui.warn(_("patch %s does not exist\n") % patch)
466 466 return (1, None)
467 467 pushable, reason = self.pushable(patch)
468 468 if not pushable:
469 469 self.explain_pushable(patch, all_patches=True)
470 470 continue
471 471 info = mergeq.isapplied(patch)
472 472 if not info:
473 473 self.ui.warn(_("patch %s is not applied\n") % patch)
474 474 return (1, None)
475 475 rev = bin(info[1])
476 476 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
477 477 if head:
478 478 self.applied.append(statusentry(hex(head), patch))
479 479 self.applied_dirty = 1
480 480 if err:
481 481 return (err, head)
482 482 self.save_dirty()
483 483 return (0, head)
484 484
485 485 def patch(self, repo, patchfile):
486 486 '''Apply patchfile to the working directory.
487 487 patchfile: file name of patch'''
488 488 files = {}
489 489 try:
490 490 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
491 491 files=files)
492 492 except Exception, inst:
493 493 self.ui.note(str(inst) + '\n')
494 494 if not self.ui.verbose:
495 495 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
496 496 return (False, files, False)
497 497
498 498 return (True, files, fuzz)
499 499
500 500 def apply(self, repo, series, list=False, update_status=True,
501 501 strict=False, patchdir=None, merge=None, all_files={}):
502 502 wlock = lock = tr = None
503 503 try:
504 504 wlock = repo.wlock()
505 505 lock = repo.lock()
506 506 tr = repo.transaction()
507 507 try:
508 508 ret = self._apply(repo, series, list, update_status,
509 509 strict, patchdir, merge, all_files=all_files)
510 510 tr.close()
511 511 self.save_dirty()
512 512 return ret
513 513 except:
514 514 try:
515 515 tr.abort()
516 516 finally:
517 517 repo.invalidate()
518 518 repo.dirstate.invalidate()
519 519 raise
520 520 finally:
521 521 del tr, lock, wlock
522 522 self.removeundo(repo)
523 523
524 524 def _apply(self, repo, series, list=False, update_status=True,
525 525 strict=False, patchdir=None, merge=None, all_files={}):
526 526 # TODO unify with commands.py
527 527 if not patchdir:
528 528 patchdir = self.path
529 529 err = 0
530 530 n = None
531 531 for patchname in series:
532 532 pushable, reason = self.pushable(patchname)
533 533 if not pushable:
534 534 self.explain_pushable(patchname, all_patches=True)
535 535 continue
536 536 self.ui.warn(_("applying %s\n") % patchname)
537 537 pf = os.path.join(patchdir, patchname)
538 538
539 539 try:
540 540 ph = self.readheaders(patchname)
541 541 except:
542 542 self.ui.warn(_("Unable to read %s\n") % patchname)
543 543 err = 1
544 544 break
545 545
546 546 message = ph.message
547 547 if not message:
548 548 message = _("imported patch %s\n") % patchname
549 549 else:
550 550 if list:
551 551 message.append(_("\nimported patch %s") % patchname)
552 552 message = '\n'.join(message)
553 553
554 554 if ph.haspatch:
555 555 (patcherr, files, fuzz) = self.patch(repo, pf)
556 556 all_files.update(files)
557 557 patcherr = not patcherr
558 558 else:
559 559 self.ui.warn(_("patch %s is empty\n") % patchname)
560 560 patcherr, files, fuzz = 0, [], 0
561 561
562 562 if merge and files:
563 563 # Mark as removed/merged and update dirstate parent info
564 564 removed = []
565 565 merged = []
566 566 for f in files:
567 567 if os.path.exists(repo.wjoin(f)):
568 568 merged.append(f)
569 569 else:
570 570 removed.append(f)
571 571 for f in removed:
572 572 repo.dirstate.remove(f)
573 573 for f in merged:
574 574 repo.dirstate.merge(f)
575 575 p1, p2 = repo.dirstate.parents()
576 576 repo.dirstate.setparents(p1, merge)
577 577
578 578 files = patch.updatedir(self.ui, repo, files)
579 579 match = cmdutil.matchfiles(repo, files or [])
580 580 n = repo.commit(files, message, ph.user, ph.date, match=match,
581 581 force=True)
582 582
583 583 if n == None:
584 584 raise util.Abort(_("repo commit failed"))
585 585
586 586 if update_status:
587 587 self.applied.append(statusentry(hex(n), patchname))
588 588
589 589 if patcherr:
590 590 self.ui.warn(_("patch failed, rejects left in working dir\n"))
591 591 err = 1
592 592 break
593 593
594 594 if fuzz and strict:
595 595 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
596 596 err = 1
597 597 break
598 598 return (err, n)
599 599
600 600 def _clean_series(self, patches):
601 601 indices = util.sort([self.find_series(p) for p in patches])
602 602 for i in indices[-1::-1]:
603 603 del self.full_series[i]
604 604 self.parse_series()
605 605 self.series_dirty = 1
606 606
607 607 def finish(self, repo, revs):
608 608 revs.sort()
609 609 firstrev = repo[self.applied[0].rev].rev()
610 610 appliedbase = 0
611 611 patches = []
612 612 for rev in util.sort(revs):
613 613 if rev < firstrev:
614 614 raise util.Abort(_('revision %d is not managed') % rev)
615 615 base = bin(self.applied[appliedbase].rev)
616 616 node = repo.changelog.node(rev)
617 617 if node != base:
618 618 raise util.Abort(_('cannot delete revision %d above '
619 619 'applied patches') % rev)
620 620 patches.append(self.applied[appliedbase].name)
621 621 appliedbase += 1
622 622
623 623 r = self.qrepo()
624 624 if r:
625 625 r.remove(patches, True)
626 626 else:
627 627 for p in patches:
628 628 os.unlink(self.join(p))
629 629
630 630 del self.applied[:appliedbase]
631 631 self.applied_dirty = 1
632 632 self._clean_series(patches)
633 633
634 634 def delete(self, repo, patches, opts):
635 635 if not patches and not opts.get('rev'):
636 636 raise util.Abort(_('qdelete requires at least one revision or '
637 637 'patch name'))
638 638
639 639 realpatches = []
640 640 for patch in patches:
641 641 patch = self.lookup(patch, strict=True)
642 642 info = self.isapplied(patch)
643 643 if info:
644 644 raise util.Abort(_("cannot delete applied patch %s") % patch)
645 645 if patch not in self.series:
646 646 raise util.Abort(_("patch %s not in series file") % patch)
647 647 realpatches.append(patch)
648 648
649 649 appliedbase = 0
650 650 if opts.get('rev'):
651 651 if not self.applied:
652 652 raise util.Abort(_('no patches applied'))
653 653 revs = cmdutil.revrange(repo, opts['rev'])
654 654 if len(revs) > 1 and revs[0] > revs[1]:
655 655 revs.reverse()
656 656 for rev in revs:
657 657 if appliedbase >= len(self.applied):
658 658 raise util.Abort(_("revision %d is not managed") % rev)
659 659
660 660 base = bin(self.applied[appliedbase].rev)
661 661 node = repo.changelog.node(rev)
662 662 if node != base:
663 663 raise util.Abort(_("cannot delete revision %d above "
664 664 "applied patches") % rev)
665 665 realpatches.append(self.applied[appliedbase].name)
666 666 appliedbase += 1
667 667
668 668 if not opts.get('keep'):
669 669 r = self.qrepo()
670 670 if r:
671 671 r.remove(realpatches, True)
672 672 else:
673 673 for p in realpatches:
674 674 os.unlink(self.join(p))
675 675
676 676 if appliedbase:
677 677 del self.applied[:appliedbase]
678 678 self.applied_dirty = 1
679 679 self._clean_series(realpatches)
680 680
681 681 def check_toppatch(self, repo):
682 682 if len(self.applied) > 0:
683 683 top = bin(self.applied[-1].rev)
684 684 pp = repo.dirstate.parents()
685 685 if top not in pp:
686 686 raise util.Abort(_("working directory revision is not qtip"))
687 687 return top
688 688 return None
689 689 def check_localchanges(self, repo, force=False, refresh=True):
690 690 m, a, r, d = repo.status()[:4]
691 691 if m or a or r or d:
692 692 if not force:
693 693 if refresh:
694 694 raise util.Abort(_("local changes found, refresh first"))
695 695 else:
696 696 raise util.Abort(_("local changes found"))
697 697 return m, a, r, d
698 698
699 699 _reserved = ('series', 'status', 'guards')
700 700 def check_reserved_name(self, name):
701 701 if (name in self._reserved or name.startswith('.hg')
702 702 or name.startswith('.mq')):
703 703 raise util.Abort(_('"%s" cannot be used as the name of a patch')
704 704 % name)
705 705
706 706 def new(self, repo, patchfn, *pats, **opts):
707 707 """options:
708 708 msg: a string or a no-argument function returning a string
709 709 """
710 710 msg = opts.get('msg')
711 711 force = opts.get('force')
712 712 user = opts.get('user')
713 713 date = opts.get('date')
714 714 if date:
715 715 date = util.parsedate(date)
716 716 self.check_reserved_name(patchfn)
717 717 if os.path.exists(self.join(patchfn)):
718 718 raise util.Abort(_('patch "%s" already exists') % patchfn)
719 719 if opts.get('include') or opts.get('exclude') or pats:
720 720 match = cmdutil.match(repo, pats, opts)
721 721 # detect missing files in pats
722 722 def badfn(f, msg):
723 723 raise util.Abort('%s: %s' % (f, msg))
724 724 match.bad = badfn
725 725 m, a, r, d = repo.status(match=match)[:4]
726 726 else:
727 727 m, a, r, d = self.check_localchanges(repo, force)
728 728 match = cmdutil.matchfiles(repo, m + a + r)
729 729 commitfiles = m + a + r
730 730 self.check_toppatch(repo)
731 731 insert = self.full_series_end()
732 732 wlock = repo.wlock()
733 733 try:
734 734 # if patch file write fails, abort early
735 735 p = self.opener(patchfn, "w")
736 736 try:
737 737 if date:
738 738 p.write("# HG changeset patch\n")
739 739 if user:
740 740 p.write("# User " + user + "\n")
741 741 p.write("# Date %d %d\n\n" % date)
742 742 elif user:
743 743 p.write("From: " + user + "\n\n")
744 744
745 745 if callable(msg):
746 746 msg = msg()
747 747 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
748 748 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
749 749 if n == None:
750 750 raise util.Abort(_("repo commit failed"))
751 751 try:
752 752 self.full_series[insert:insert] = [patchfn]
753 753 self.applied.append(statusentry(hex(n), patchfn))
754 754 self.parse_series()
755 755 self.series_dirty = 1
756 756 self.applied_dirty = 1
757 757 if msg:
758 758 msg = msg + "\n\n"
759 759 p.write(msg)
760 760 if commitfiles:
761 761 diffopts = self.diffopts()
762 762 if opts.get('git'): diffopts.git = True
763 763 parent = self.qparents(repo, n)
764 764 chunks = patch.diff(repo, node1=parent, node2=n,
765 765 match=match, opts=diffopts)
766 766 for chunk in chunks:
767 767 p.write(chunk)
768 768 p.close()
769 769 wlock = None
770 770 r = self.qrepo()
771 771 if r: r.add([patchfn])
772 772 except:
773 773 repo.rollback()
774 774 raise
775 775 except Exception:
776 776 patchpath = self.join(patchfn)
777 777 try:
778 778 os.unlink(patchpath)
779 779 except:
780 780 self.ui.warn(_('error unlinking %s\n') % patchpath)
781 781 raise
782 782 self.removeundo(repo)
783 783 finally:
784 784 del wlock
785 785
786 786 def strip(self, repo, rev, update=True, backup="all", force=None):
787 787 wlock = lock = None
788 788 try:
789 789 wlock = repo.wlock()
790 790 lock = repo.lock()
791 791
792 792 if update:
793 793 self.check_localchanges(repo, force=force, refresh=False)
794 794 urev = self.qparents(repo, rev)
795 795 hg.clean(repo, urev)
796 796 repo.dirstate.write()
797 797
798 798 self.removeundo(repo)
799 799 repair.strip(self.ui, repo, rev, backup)
800 800 # strip may have unbundled a set of backed up revisions after
801 801 # the actual strip
802 802 self.removeundo(repo)
803 803 finally:
804 804 del lock, wlock
805 805
806 806 def isapplied(self, patch):
807 807 """returns (index, rev, patch)"""
808 808 for i in xrange(len(self.applied)):
809 809 a = self.applied[i]
810 810 if a.name == patch:
811 811 return (i, a.rev, a.name)
812 812 return None
813 813
814 814 # if the exact patch name does not exist, we try a few
815 815 # variations. If strict is passed, we try only #1
816 816 #
817 817 # 1) a number to indicate an offset in the series file
818 818 # 2) a unique substring of the patch name was given
819 819 # 3) patchname[-+]num to indicate an offset in the series file
820 820 def lookup(self, patch, strict=False):
821 821 patch = patch and str(patch)
822 822
823 823 def partial_name(s):
824 824 if s in self.series:
825 825 return s
826 826 matches = [x for x in self.series if s in x]
827 827 if len(matches) > 1:
828 828 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
829 829 for m in matches:
830 830 self.ui.warn(' %s\n' % m)
831 831 return None
832 832 if matches:
833 833 return matches[0]
834 834 if len(self.series) > 0 and len(self.applied) > 0:
835 835 if s == 'qtip':
836 836 return self.series[self.series_end(True)-1]
837 837 if s == 'qbase':
838 838 return self.series[0]
839 839 return None
840 840
841 841 if patch == None:
842 842 return None
843 843 if patch in self.series:
844 844 return patch
845 845
846 846 if not os.path.isfile(self.join(patch)):
847 847 try:
848 848 sno = int(patch)
849 849 except(ValueError, OverflowError):
850 850 pass
851 851 else:
852 852 if -len(self.series) <= sno < len(self.series):
853 853 return self.series[sno]
854 854
855 855 if not strict:
856 856 res = partial_name(patch)
857 857 if res:
858 858 return res
859 859 minus = patch.rfind('-')
860 860 if minus >= 0:
861 861 res = partial_name(patch[:minus])
862 862 if res:
863 863 i = self.series.index(res)
864 864 try:
865 865 off = int(patch[minus+1:] or 1)
866 866 except(ValueError, OverflowError):
867 867 pass
868 868 else:
869 869 if i - off >= 0:
870 870 return self.series[i - off]
871 871 plus = patch.rfind('+')
872 872 if plus >= 0:
873 873 res = partial_name(patch[:plus])
874 874 if res:
875 875 i = self.series.index(res)
876 876 try:
877 877 off = int(patch[plus+1:] or 1)
878 878 except(ValueError, OverflowError):
879 879 pass
880 880 else:
881 881 if i + off < len(self.series):
882 882 return self.series[i + off]
883 883 raise util.Abort(_("patch %s not in series") % patch)
884 884
885 885 def push(self, repo, patch=None, force=False, list=False,
886 886 mergeq=None, all=False):
887 887 wlock = repo.wlock()
888 888 if repo.dirstate.parents()[0] != repo.changelog.tip():
889 889 self.ui.status(_("(working directory not at tip)\n"))
890 890
891 891 if not self.series:
892 892 self.ui.warn(_('no patches in series\n'))
893 893 return 0
894 894
895 895 try:
896 896 patch = self.lookup(patch)
897 897 # Suppose our series file is: A B C and the current 'top'
898 898 # patch is B. qpush C should be performed (moving forward)
899 899 # qpush B is a NOP (no change) qpush A is an error (can't
900 900 # go backwards with qpush)
901 901 if patch:
902 902 info = self.isapplied(patch)
903 903 if info:
904 904 if info[0] < len(self.applied) - 1:
905 905 raise util.Abort(
906 906 _("cannot push to a previous patch: %s") % patch)
907 907 self.ui.warn(
908 908 _('qpush: %s is already at the top\n') % patch)
909 909 return
910 910 pushable, reason = self.pushable(patch)
911 911 if not pushable:
912 912 if reason:
913 913 reason = _('guarded by %r') % reason
914 914 else:
915 915 reason = _('no matching guards')
916 916 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
917 917 return 1
918 918 elif all:
919 919 patch = self.series[-1]
920 920 if self.isapplied(patch):
921 921 self.ui.warn(_('all patches are currently applied\n'))
922 922 return 0
923 923
924 924 # Following the above example, starting at 'top' of B:
925 925 # qpush should be performed (pushes C), but a subsequent
926 926 # qpush without an argument is an error (nothing to
927 927 # apply). This allows a loop of "...while hg qpush..." to
928 928 # work as it detects an error when done
929 929 start = self.series_end()
930 930 if start == len(self.series):
931 931 self.ui.warn(_('patch series already fully applied\n'))
932 932 return 1
933 933 if not force:
934 934 self.check_localchanges(repo)
935 935
936 936 self.applied_dirty = 1
937 937 if start > 0:
938 938 self.check_toppatch(repo)
939 939 if not patch:
940 940 patch = self.series[start]
941 941 end = start + 1
942 942 else:
943 943 end = self.series.index(patch, start) + 1
944 944 s = self.series[start:end]
945 945 all_files = {}
946 946 try:
947 947 if mergeq:
948 948 ret = self.mergepatch(repo, mergeq, s)
949 949 else:
950 950 ret = self.apply(repo, s, list, all_files=all_files)
951 951 except:
952 952 self.ui.warn(_('cleaning up working directory...'))
953 953 node = repo.dirstate.parents()[0]
954 954 hg.revert(repo, node, None)
955 955 unknown = repo.status(unknown=True)[4]
956 956 # only remove unknown files that we know we touched or
957 957 # created while patching
958 958 for f in unknown:
959 959 if f in all_files:
960 960 util.unlink(repo.wjoin(f))
961 961 self.ui.warn(_('done\n'))
962 962 raise
963 963 top = self.applied[-1].name
964 964 if ret[0]:
965 965 self.ui.write(_("errors during apply, please fix and "
966 966 "refresh %s\n") % top)
967 967 else:
968 968 self.ui.write(_("now at: %s\n") % top)
969 969 return ret[0]
970 970 finally:
971 971 del wlock
972 972
973 973 def pop(self, repo, patch=None, force=False, update=True, all=False):
974 974 def getfile(f, rev, flags):
975 975 t = repo.file(f).read(rev)
976 976 repo.wwrite(f, t, flags)
977 977
978 978 wlock = repo.wlock()
979 979 try:
980 980 if patch:
981 981 # index, rev, patch
982 982 info = self.isapplied(patch)
983 983 if not info:
984 984 patch = self.lookup(patch)
985 985 info = self.isapplied(patch)
986 986 if not info:
987 987 raise util.Abort(_("patch %s is not applied") % patch)
988 988
989 989 if len(self.applied) == 0:
990 990 # Allow qpop -a to work repeatedly,
991 991 # but not qpop without an argument
992 992 self.ui.warn(_("no patches applied\n"))
993 993 return not all
994 994
995 995 if all:
996 996 start = 0
997 997 elif patch:
998 998 start = info[0] + 1
999 999 else:
1000 1000 start = len(self.applied) - 1
1001 1001
1002 1002 if start >= len(self.applied):
1003 1003 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1004 1004 return
1005 1005
1006 1006 if not update:
1007 1007 parents = repo.dirstate.parents()
1008 1008 rr = [ bin(x.rev) for x in self.applied ]
1009 1009 for p in parents:
1010 1010 if p in rr:
1011 1011 self.ui.warn(_("qpop: forcing dirstate update\n"))
1012 1012 update = True
1013 1013 else:
1014 1014 parents = [p.hex() for p in repo[None].parents()]
1015 1015 needupdate = False
1016 1016 for entry in self.applied[start:]:
1017 1017 if entry.rev in parents:
1018 1018 needupdate = True
1019 1019 break
1020 1020 update = needupdate
1021 1021
1022 1022 if not force and update:
1023 1023 self.check_localchanges(repo)
1024 1024
1025 1025 self.applied_dirty = 1
1026 1026 end = len(self.applied)
1027 1027 rev = bin(self.applied[start].rev)
1028 1028 if update:
1029 1029 top = self.check_toppatch(repo)
1030 1030
1031 1031 try:
1032 1032 heads = repo.changelog.heads(rev)
1033 1033 except error.LookupError:
1034 1034 node = short(rev)
1035 1035 raise util.Abort(_('trying to pop unknown node %s') % node)
1036 1036
1037 1037 if heads != [bin(self.applied[-1].rev)]:
1038 1038 raise util.Abort(_("popping would remove a revision not "
1039 1039 "managed by this patch queue"))
1040 1040
1041 1041 # we know there are no local changes, so we can make a simplified
1042 1042 # form of hg.update.
1043 1043 if update:
1044 1044 qp = self.qparents(repo, rev)
1045 1045 changes = repo.changelog.read(qp)
1046 1046 mmap = repo.manifest.read(changes[0])
1047 1047 m, a, r, d = repo.status(qp, top)[:4]
1048 1048 if d:
1049 1049 raise util.Abort(_("deletions found between repo revs"))
1050 1050 for f in m:
1051 1051 getfile(f, mmap[f], mmap.flags(f))
1052 1052 for f in r:
1053 1053 getfile(f, mmap[f], mmap.flags(f))
1054 1054 for f in m + r:
1055 1055 repo.dirstate.normal(f)
1056 1056 for f in a:
1057 1057 try:
1058 1058 os.unlink(repo.wjoin(f))
1059 1059 except OSError, e:
1060 1060 if e.errno != errno.ENOENT:
1061 1061 raise
1062 1062 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1063 1063 except: pass
1064 1064 repo.dirstate.forget(f)
1065 1065 repo.dirstate.setparents(qp, nullid)
1066 1066 del self.applied[start:end]
1067 1067 self.strip(repo, rev, update=False, backup='strip')
1068 1068 if len(self.applied):
1069 1069 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1070 1070 else:
1071 1071 self.ui.write(_("patch queue now empty\n"))
1072 1072 finally:
1073 1073 del wlock
1074 1074
1075 1075 def diff(self, repo, pats, opts):
1076 1076 top = self.check_toppatch(repo)
1077 1077 if not top:
1078 1078 self.ui.write(_("no patches applied\n"))
1079 1079 return
1080 1080 qp = self.qparents(repo, top)
1081 1081 self._diffopts = patch.diffopts(self.ui, opts)
1082 1082 self.printdiff(repo, qp, files=pats, opts=opts)
1083 1083
1084 1084 def refresh(self, repo, pats=None, **opts):
1085 1085 if len(self.applied) == 0:
1086 1086 self.ui.write(_("no patches applied\n"))
1087 1087 return 1
1088 1088 msg = opts.get('msg', '').rstrip()
1089 1089 newuser = opts.get('user')
1090 1090 newdate = opts.get('date')
1091 1091 if newdate:
1092 1092 newdate = '%d %d' % util.parsedate(newdate)
1093 1093 wlock = repo.wlock()
1094 1094 try:
1095 1095 self.check_toppatch(repo)
1096 1096 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1097 1097 top = bin(top)
1098 1098 if repo.changelog.heads(top) != [top]:
1099 1099 raise util.Abort(_("cannot refresh a revision with children"))
1100 1100 cparents = repo.changelog.parents(top)
1101 1101 patchparent = self.qparents(repo, top)
1102 1102 ph = self.readheaders(patchfn)
1103 1103
1104 1104 patchf = self.opener(patchfn, 'r')
1105 1105
1106 1106 # if the patch was a git patch, refresh it as a git patch
1107 1107 for line in patchf:
1108 1108 if line.startswith('diff --git'):
1109 1109 self.diffopts().git = True
1110 1110 break
1111 1111
1112 1112 if msg:
1113 1113 ph.setmessage(msg)
1114 1114 if newuser:
1115 1115 ph.setuser(newuser)
1116 1116 if newdate:
1117 1117 ph.setdate(newdate)
1118 1118
1119 1119 # only commit new patch when write is complete
1120 1120 patchf = self.opener(patchfn, 'w', atomictemp=True)
1121 1121
1122 1122 patchf.seek(0)
1123 1123 patchf.truncate()
1124 1124
1125 1125 comments = str(ph)
1126 1126 if comments:
1127 1127 patchf.write(comments)
1128 1128
1129 1129 if opts.get('git'):
1130 1130 self.diffopts().git = True
1131 1131 tip = repo.changelog.tip()
1132 1132 if top == tip:
1133 1133 # if the top of our patch queue is also the tip, there is an
1134 1134 # optimization here. We update the dirstate in place and strip
1135 1135 # off the tip commit. Then just commit the current directory
1136 1136 # tree. We can also send repo.commit the list of files
1137 1137 # changed to speed up the diff
1138 1138 #
1139 1139 # in short mode, we only diff the files included in the
1140 1140 # patch already plus specified files
1141 1141 #
1142 1142 # this should really read:
1143 1143 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1144 1144 # but we do it backwards to take advantage of manifest/chlog
1145 1145 # caching against the next repo.status call
1146 1146 #
1147 1147 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1148 1148 changes = repo.changelog.read(tip)
1149 1149 man = repo.manifest.read(changes[0])
1150 1150 aaa = aa[:]
1151 1151 matchfn = cmdutil.match(repo, pats, opts)
1152 1152 if opts.get('short'):
1153 1153 # if amending a patch, we start with existing
1154 1154 # files plus specified files - unfiltered
1155 1155 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1156 1156 # filter with inc/exl options
1157 1157 matchfn = cmdutil.match(repo, opts=opts)
1158 1158 else:
1159 1159 match = cmdutil.matchall(repo)
1160 1160 m, a, r, d = repo.status(match=match)[:4]
1161 1161
1162 1162 # we might end up with files that were added between
1163 1163 # tip and the dirstate parent, but then changed in the
1164 1164 # local dirstate. in this case, we want them to only
1165 1165 # show up in the added section
1166 1166 for x in m:
1167 1167 if x not in aa:
1168 1168 mm.append(x)
1169 1169 # we might end up with files added by the local dirstate that
1170 1170 # were deleted by the patch. In this case, they should only
1171 1171 # show up in the changed section.
1172 1172 for x in a:
1173 1173 if x in dd:
1174 1174 del dd[dd.index(x)]
1175 1175 mm.append(x)
1176 1176 else:
1177 1177 aa.append(x)
1178 1178 # make sure any files deleted in the local dirstate
1179 1179 # are not in the add or change column of the patch
1180 1180 forget = []
1181 1181 for x in d + r:
1182 1182 if x in aa:
1183 1183 del aa[aa.index(x)]
1184 1184 forget.append(x)
1185 1185 continue
1186 1186 elif x in mm:
1187 1187 del mm[mm.index(x)]
1188 1188 dd.append(x)
1189 1189
1190 1190 m = util.unique(mm)
1191 1191 r = util.unique(dd)
1192 1192 a = util.unique(aa)
1193 1193 c = [filter(matchfn, l) for l in (m, a, r)]
1194 1194 match = cmdutil.matchfiles(repo, util.unique(c[0] + c[1] + c[2]))
1195 1195 chunks = patch.diff(repo, patchparent, match=match,
1196 1196 changes=c, opts=self.diffopts())
1197 1197 for chunk in chunks:
1198 1198 patchf.write(chunk)
1199 1199
1200 1200 try:
1201 1201 if self.diffopts().git:
1202 1202 copies = {}
1203 1203 for dst in a:
1204 1204 src = repo.dirstate.copied(dst)
1205 1205 # during qfold, the source file for copies may
1206 1206 # be removed. Treat this as a simple add.
1207 1207 if src is not None and src in repo.dirstate:
1208 1208 copies.setdefault(src, []).append(dst)
1209 1209 repo.dirstate.add(dst)
1210 1210 # remember the copies between patchparent and tip
1211 1211 for dst in aaa:
1212 1212 f = repo.file(dst)
1213 1213 src = f.renamed(man[dst])
1214 1214 if src:
1215 1215 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1216 1216 if dst in a:
1217 1217 copies[src[0]].append(dst)
1218 1218 # we can't copy a file created by the patch itself
1219 1219 if dst in copies:
1220 1220 del copies[dst]
1221 1221 for src, dsts in copies.iteritems():
1222 1222 for dst in dsts:
1223 1223 repo.dirstate.copy(src, dst)
1224 1224 else:
1225 1225 for dst in a:
1226 1226 repo.dirstate.add(dst)
1227 1227 # Drop useless copy information
1228 1228 for f in list(repo.dirstate.copies()):
1229 1229 repo.dirstate.copy(None, f)
1230 1230 for f in r:
1231 1231 repo.dirstate.remove(f)
1232 1232 # if the patch excludes a modified file, mark that
1233 1233 # file with mtime=0 so status can see it.
1234 1234 mm = []
1235 1235 for i in xrange(len(m)-1, -1, -1):
1236 1236 if not matchfn(m[i]):
1237 1237 mm.append(m[i])
1238 1238 del m[i]
1239 1239 for f in m:
1240 1240 repo.dirstate.normal(f)
1241 1241 for f in mm:
1242 1242 repo.dirstate.normallookup(f)
1243 1243 for f in forget:
1244 1244 repo.dirstate.forget(f)
1245 1245
1246 1246 if not msg:
1247 1247 if not ph.message:
1248 1248 message = "[mq]: %s\n" % patchfn
1249 1249 else:
1250 1250 message = "\n".join(ph.message)
1251 1251 else:
1252 1252 message = msg
1253 1253
1254 1254 user = ph.user or changes[1]
1255 1255
1256 1256 # assumes strip can roll itself back if interrupted
1257 1257 repo.dirstate.setparents(*cparents)
1258 1258 self.applied.pop()
1259 1259 self.applied_dirty = 1
1260 1260 self.strip(repo, top, update=False,
1261 1261 backup='strip')
1262 1262 except:
1263 1263 repo.dirstate.invalidate()
1264 1264 raise
1265 1265
1266 1266 try:
1267 1267 # might be nice to attempt to roll back strip after this
1268 1268 patchf.rename()
1269 1269 n = repo.commit(match.files(), message, user, ph.date,
1270 1270 match=match, force=1)
1271 1271 self.applied.append(statusentry(hex(n), patchfn))
1272 1272 except:
1273 1273 ctx = repo[cparents[0]]
1274 1274 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1275 1275 self.save_dirty()
1276 1276 self.ui.warn(_('refresh interrupted while patch was popped! '
1277 1277 '(revert --all, qpush to recover)\n'))
1278 1278 raise
1279 1279 else:
1280 1280 self.printdiff(repo, patchparent, fp=patchf)
1281 1281 patchf.rename()
1282 1282 added = repo.status()[1]
1283 1283 for a in added:
1284 1284 f = repo.wjoin(a)
1285 1285 try:
1286 1286 os.unlink(f)
1287 1287 except OSError, e:
1288 1288 if e.errno != errno.ENOENT:
1289 1289 raise
1290 1290 try: os.removedirs(os.path.dirname(f))
1291 1291 except: pass
1292 1292 # forget the file copies in the dirstate
1293 1293 # push should readd the files later on
1294 1294 repo.dirstate.forget(a)
1295 1295 self.pop(repo, force=True)
1296 1296 self.push(repo, force=True)
1297 1297 finally:
1298 1298 del wlock
1299 1299 self.removeundo(repo)
1300 1300
1301 1301 def init(self, repo, create=False):
1302 1302 if not create and os.path.isdir(self.path):
1303 1303 raise util.Abort(_("patch queue directory already exists"))
1304 1304 try:
1305 1305 os.mkdir(self.path)
1306 1306 except OSError, inst:
1307 1307 if inst.errno != errno.EEXIST or not create:
1308 1308 raise
1309 1309 if create:
1310 1310 return self.qrepo(create=True)
1311 1311
1312 1312 def unapplied(self, repo, patch=None):
1313 1313 if patch and patch not in self.series:
1314 1314 raise util.Abort(_("patch %s is not in series file") % patch)
1315 1315 if not patch:
1316 1316 start = self.series_end()
1317 1317 else:
1318 1318 start = self.series.index(patch) + 1
1319 1319 unapplied = []
1320 1320 for i in xrange(start, len(self.series)):
1321 1321 pushable, reason = self.pushable(i)
1322 1322 if pushable:
1323 1323 unapplied.append((i, self.series[i]))
1324 1324 self.explain_pushable(i)
1325 1325 return unapplied
1326 1326
1327 1327 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1328 1328 summary=False):
1329 1329 def displayname(patchname):
1330 1330 if summary:
1331 1331 ph = self.readheaders(patchname)
1332 1332 msg = ph.message
1333 1333 msg = msg and ': ' + msg[0] or ': '
1334 1334 else:
1335 1335 msg = ''
1336 1336 return '%s%s' % (patchname, msg)
1337 1337
1338 1338 applied = dict.fromkeys([p.name for p in self.applied])
1339 1339 if length is None:
1340 1340 length = len(self.series) - start
1341 1341 if not missing:
1342 1342 for i in xrange(start, start+length):
1343 1343 patch = self.series[i]
1344 1344 if patch in applied:
1345 1345 stat = 'A'
1346 1346 elif self.pushable(i)[0]:
1347 1347 stat = 'U'
1348 1348 else:
1349 1349 stat = 'G'
1350 1350 pfx = ''
1351 1351 if self.ui.verbose:
1352 1352 pfx = '%d %s ' % (i, stat)
1353 1353 elif status and status != stat:
1354 1354 continue
1355 1355 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1356 1356 else:
1357 1357 msng_list = []
1358 1358 for root, dirs, files in os.walk(self.path):
1359 1359 d = root[len(self.path) + 1:]
1360 1360 for f in files:
1361 1361 fl = os.path.join(d, f)
1362 1362 if (fl not in self.series and
1363 1363 fl not in (self.status_path, self.series_path,
1364 1364 self.guards_path)
1365 1365 and not fl.startswith('.')):
1366 1366 msng_list.append(fl)
1367 1367 for x in util.sort(msng_list):
1368 1368 pfx = self.ui.verbose and ('D ') or ''
1369 1369 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1370 1370
1371 1371 def issaveline(self, l):
1372 1372 if l.name == '.hg.patches.save.line':
1373 1373 return True
1374 1374
1375 1375 def qrepo(self, create=False):
1376 1376 if create or os.path.isdir(self.join(".hg")):
1377 1377 return hg.repository(self.ui, path=self.path, create=create)
1378 1378
1379 1379 def restore(self, repo, rev, delete=None, qupdate=None):
1380 1380 c = repo.changelog.read(rev)
1381 1381 desc = c[4].strip()
1382 1382 lines = desc.splitlines()
1383 1383 i = 0
1384 1384 datastart = None
1385 1385 series = []
1386 1386 applied = []
1387 1387 qpp = None
1388 1388 for i in xrange(0, len(lines)):
1389 1389 if lines[i] == 'Patch Data:':
1390 1390 datastart = i + 1
1391 1391 elif lines[i].startswith('Dirstate:'):
1392 1392 l = lines[i].rstrip()
1393 1393 l = l[10:].split(' ')
1394 1394 qpp = [ bin(x) for x in l ]
1395 1395 elif datastart != None:
1396 1396 l = lines[i].rstrip()
1397 1397 se = statusentry(l)
1398 1398 file_ = se.name
1399 1399 if se.rev:
1400 1400 applied.append(se)
1401 1401 else:
1402 1402 series.append(file_)
1403 1403 if datastart == None:
1404 1404 self.ui.warn(_("No saved patch data found\n"))
1405 1405 return 1
1406 1406 self.ui.warn(_("restoring status: %s\n") % lines[0])
1407 1407 self.full_series = series
1408 1408 self.applied = applied
1409 1409 self.parse_series()
1410 1410 self.series_dirty = 1
1411 1411 self.applied_dirty = 1
1412 1412 heads = repo.changelog.heads()
1413 1413 if delete:
1414 1414 if rev not in heads:
1415 1415 self.ui.warn(_("save entry has children, leaving it alone\n"))
1416 1416 else:
1417 1417 self.ui.warn(_("removing save entry %s\n") % short(rev))
1418 1418 pp = repo.dirstate.parents()
1419 1419 if rev in pp:
1420 1420 update = True
1421 1421 else:
1422 1422 update = False
1423 1423 self.strip(repo, rev, update=update, backup='strip')
1424 1424 if qpp:
1425 1425 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1426 1426 (short(qpp[0]), short(qpp[1])))
1427 1427 if qupdate:
1428 1428 self.ui.status(_("queue directory updating\n"))
1429 1429 r = self.qrepo()
1430 1430 if not r:
1431 1431 self.ui.warn(_("Unable to load queue repository\n"))
1432 1432 return 1
1433 1433 hg.clean(r, qpp[0])
1434 1434
1435 1435 def save(self, repo, msg=None):
1436 1436 if len(self.applied) == 0:
1437 1437 self.ui.warn(_("save: no patches applied, exiting\n"))
1438 1438 return 1
1439 1439 if self.issaveline(self.applied[-1]):
1440 1440 self.ui.warn(_("status is already saved\n"))
1441 1441 return 1
1442 1442
1443 1443 ar = [ ':' + x for x in self.full_series ]
1444 1444 if not msg:
1445 1445 msg = _("hg patches saved state")
1446 1446 else:
1447 1447 msg = "hg patches: " + msg.rstrip('\r\n')
1448 1448 r = self.qrepo()
1449 1449 if r:
1450 1450 pp = r.dirstate.parents()
1451 1451 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1452 1452 msg += "\n\nPatch Data:\n"
1453 1453 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1454 1454 "\n".join(ar) + '\n' or "")
1455 1455 n = repo.commit(None, text, user=None, force=1)
1456 1456 if not n:
1457 1457 self.ui.warn(_("repo commit failed\n"))
1458 1458 return 1
1459 1459 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1460 1460 self.applied_dirty = 1
1461 1461 self.removeundo(repo)
1462 1462
1463 1463 def full_series_end(self):
1464 1464 if len(self.applied) > 0:
1465 1465 p = self.applied[-1].name
1466 1466 end = self.find_series(p)
1467 1467 if end == None:
1468 1468 return len(self.full_series)
1469 1469 return end + 1
1470 1470 return 0
1471 1471
1472 1472 def series_end(self, all_patches=False):
1473 1473 """If all_patches is False, return the index of the next pushable patch
1474 1474 in the series, or the series length. If all_patches is True, return the
1475 1475 index of the first patch past the last applied one.
1476 1476 """
1477 1477 end = 0
1478 1478 def next(start):
1479 1479 if all_patches:
1480 1480 return start
1481 1481 i = start
1482 1482 while i < len(self.series):
1483 1483 p, reason = self.pushable(i)
1484 1484 if p:
1485 1485 break
1486 1486 self.explain_pushable(i)
1487 1487 i += 1
1488 1488 return i
1489 1489 if len(self.applied) > 0:
1490 1490 p = self.applied[-1].name
1491 1491 try:
1492 1492 end = self.series.index(p)
1493 1493 except ValueError:
1494 1494 return 0
1495 1495 return next(end + 1)
1496 1496 return next(end)
1497 1497
1498 1498 def appliedname(self, index):
1499 1499 pname = self.applied[index].name
1500 1500 if not self.ui.verbose:
1501 1501 p = pname
1502 1502 else:
1503 1503 p = str(self.series.index(pname)) + " " + pname
1504 1504 return p
1505 1505
1506 1506 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1507 1507 force=None, git=False):
1508 1508 def checkseries(patchname):
1509 1509 if patchname in self.series:
1510 1510 raise util.Abort(_('patch %s is already in the series file')
1511 1511 % patchname)
1512 1512 def checkfile(patchname):
1513 1513 if not force and os.path.exists(self.join(patchname)):
1514 1514 raise util.Abort(_('patch "%s" already exists')
1515 1515 % patchname)
1516 1516
1517 1517 if rev:
1518 1518 if files:
1519 1519 raise util.Abort(_('option "-r" not valid when importing '
1520 1520 'files'))
1521 1521 rev = cmdutil.revrange(repo, rev)
1522 1522 rev.sort(lambda x, y: cmp(y, x))
1523 1523 if (len(files) > 1 or len(rev) > 1) and patchname:
1524 1524 raise util.Abort(_('option "-n" not valid when importing multiple '
1525 1525 'patches'))
1526 1526 i = 0
1527 1527 added = []
1528 1528 if rev:
1529 1529 # If mq patches are applied, we can only import revisions
1530 1530 # that form a linear path to qbase.
1531 1531 # Otherwise, they should form a linear path to a head.
1532 1532 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1533 1533 if len(heads) > 1:
1534 1534 raise util.Abort(_('revision %d is the root of more than one '
1535 1535 'branch') % rev[-1])
1536 1536 if self.applied:
1537 1537 base = hex(repo.changelog.node(rev[0]))
1538 1538 if base in [n.rev for n in self.applied]:
1539 1539 raise util.Abort(_('revision %d is already managed')
1540 1540 % rev[0])
1541 1541 if heads != [bin(self.applied[-1].rev)]:
1542 1542 raise util.Abort(_('revision %d is not the parent of '
1543 1543 'the queue') % rev[0])
1544 1544 base = repo.changelog.rev(bin(self.applied[0].rev))
1545 1545 lastparent = repo.changelog.parentrevs(base)[0]
1546 1546 else:
1547 1547 if heads != [repo.changelog.node(rev[0])]:
1548 1548 raise util.Abort(_('revision %d has unmanaged children')
1549 1549 % rev[0])
1550 1550 lastparent = None
1551 1551
1552 1552 if git:
1553 1553 self.diffopts().git = True
1554 1554
1555 1555 for r in rev:
1556 1556 p1, p2 = repo.changelog.parentrevs(r)
1557 1557 n = repo.changelog.node(r)
1558 1558 if p2 != nullrev:
1559 1559 raise util.Abort(_('cannot import merge revision %d') % r)
1560 1560 if lastparent and lastparent != r:
1561 1561 raise util.Abort(_('revision %d is not the parent of %d')
1562 1562 % (r, lastparent))
1563 1563 lastparent = p1
1564 1564
1565 1565 if not patchname:
1566 1566 patchname = normname('%d.diff' % r)
1567 1567 self.check_reserved_name(patchname)
1568 1568 checkseries(patchname)
1569 1569 checkfile(patchname)
1570 1570 self.full_series.insert(0, patchname)
1571 1571
1572 1572 patchf = self.opener(patchname, "w")
1573 1573 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1574 1574 patchf.close()
1575 1575
1576 1576 se = statusentry(hex(n), patchname)
1577 1577 self.applied.insert(0, se)
1578 1578
1579 1579 added.append(patchname)
1580 1580 patchname = None
1581 1581 self.parse_series()
1582 1582 self.applied_dirty = 1
1583 1583
1584 1584 for filename in files:
1585 1585 if existing:
1586 1586 if filename == '-':
1587 1587 raise util.Abort(_('-e is incompatible with import from -'))
1588 1588 if not patchname:
1589 1589 patchname = normname(filename)
1590 1590 self.check_reserved_name(patchname)
1591 1591 if not os.path.isfile(self.join(patchname)):
1592 1592 raise util.Abort(_("patch %s does not exist") % patchname)
1593 1593 else:
1594 1594 try:
1595 1595 if filename == '-':
1596 1596 if not patchname:
1597 1597 raise util.Abort(_('need --name to import a patch from -'))
1598 1598 text = sys.stdin.read()
1599 1599 else:
1600 1600 text = url.open(self.ui, filename).read()
1601 1601 except (OSError, IOError):
1602 1602 raise util.Abort(_("unable to read %s") % filename)
1603 1603 if not patchname:
1604 1604 patchname = normname(os.path.basename(filename))
1605 1605 self.check_reserved_name(patchname)
1606 1606 checkfile(patchname)
1607 1607 patchf = self.opener(patchname, "w")
1608 1608 patchf.write(text)
1609 1609 if not force:
1610 1610 checkseries(patchname)
1611 1611 if patchname not in self.series:
1612 1612 index = self.full_series_end() + i
1613 1613 self.full_series[index:index] = [patchname]
1614 1614 self.parse_series()
1615 1615 self.ui.warn(_("adding %s to series file\n") % patchname)
1616 1616 i += 1
1617 1617 added.append(patchname)
1618 1618 patchname = None
1619 1619 self.series_dirty = 1
1620 1620 qrepo = self.qrepo()
1621 1621 if qrepo:
1622 1622 qrepo.add(added)
1623 1623
1624 1624 def delete(ui, repo, *patches, **opts):
1625 1625 """remove patches from queue
1626 1626
1627 1627 The patches must not be applied, unless they are arguments to
1628 1628 the --rev parameter. At least one patch or revision is required.
1629 1629
1630 1630 With --rev, mq will stop managing the named revisions (converting
1631 1631 them to regular mercurial changesets). The qfinish command should be
1632 1632 used as an alternative for qdel -r, as the latter option is deprecated.
1633 1633
1634 1634 With --keep, the patch files are preserved in the patch directory."""
1635 1635 q = repo.mq
1636 1636 q.delete(repo, patches, opts)
1637 1637 q.save_dirty()
1638 1638 return 0
1639 1639
1640 1640 def applied(ui, repo, patch=None, **opts):
1641 1641 """print the patches already applied"""
1642 1642 q = repo.mq
1643 1643 if patch:
1644 1644 if patch not in q.series:
1645 1645 raise util.Abort(_("patch %s is not in series file") % patch)
1646 1646 end = q.series.index(patch) + 1
1647 1647 else:
1648 1648 end = q.series_end(True)
1649 1649 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1650 1650
1651 1651 def unapplied(ui, repo, patch=None, **opts):
1652 1652 """print the patches not yet applied"""
1653 1653 q = repo.mq
1654 1654 if patch:
1655 1655 if patch not in q.series:
1656 1656 raise util.Abort(_("patch %s is not in series file") % patch)
1657 1657 start = q.series.index(patch) + 1
1658 1658 else:
1659 1659 start = q.series_end(True)
1660 1660 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1661 1661
1662 1662 def qimport(ui, repo, *filename, **opts):
1663 1663 """import a patch
1664 1664
1665 1665 The patch is inserted into the series after the last applied patch.
1666 1666 If no patches have been applied, qimport prepends the patch
1667 1667 to the series.
1668 1668
1669 1669 The patch will have the same name as its source file unless you
1670 1670 give it a new one with --name.
1671 1671
1672 1672 You can register an existing patch inside the patch directory
1673 1673 with the --existing flag.
1674 1674
1675 1675 With --force, an existing patch of the same name will be overwritten.
1676 1676
1677 1677 An existing changeset may be placed under mq control with --rev
1678 1678 (e.g. qimport --rev tip -n patch will place tip under mq control).
1679 1679 With --git, patches imported with --rev will use the git diff
1680 1680 format. See the diffs help topic for information on why this is
1681 1681 important for preserving rename/copy information and permission changes.
1682 1682 """
1683 1683 q = repo.mq
1684 1684 q.qimport(repo, filename, patchname=opts['name'],
1685 1685 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1686 1686 git=opts['git'])
1687 1687 q.save_dirty()
1688 1688 return 0
1689 1689
1690 1690 def init(ui, repo, **opts):
1691 1691 """init a new queue repository
1692 1692
1693 1693 The queue repository is unversioned by default. If -c is
1694 1694 specified, qinit will create a separate nested repository
1695 1695 for patches (qinit -c may also be run later to convert
1696 1696 an unversioned patch repository into a versioned one).
1697 1697 You can use qcommit to commit changes to this queue repository."""
1698 1698 q = repo.mq
1699 1699 r = q.init(repo, create=opts['create_repo'])
1700 1700 q.save_dirty()
1701 1701 if r:
1702 1702 if not os.path.exists(r.wjoin('.hgignore')):
1703 1703 fp = r.wopener('.hgignore', 'w')
1704 1704 fp.write('^\\.hg\n')
1705 1705 fp.write('^\\.mq\n')
1706 1706 fp.write('syntax: glob\n')
1707 1707 fp.write('status\n')
1708 1708 fp.write('guards\n')
1709 1709 fp.close()
1710 1710 if not os.path.exists(r.wjoin('series')):
1711 1711 r.wopener('series', 'w').close()
1712 1712 r.add(['.hgignore', 'series'])
1713 1713 commands.add(ui, r)
1714 1714 return 0
1715 1715
1716 1716 def clone(ui, source, dest=None, **opts):
1717 1717 '''clone main and patch repository at same time
1718 1718
1719 1719 If source is local, destination will have no patches applied. If
1720 1720 source is remote, this command can not check if patches are
1721 1721 applied in source, so cannot guarantee that patches are not
1722 1722 applied in destination. If you clone remote repository, be sure
1723 1723 before that it has no patches applied.
1724 1724
1725 1725 Source patch repository is looked for in <src>/.hg/patches by
1726 1726 default. Use -p <url> to change.
1727 1727
1728 1728 The patch directory must be a nested mercurial repository, as
1729 1729 would be created by qinit -c.
1730 1730 '''
1731 1731 def patchdir(repo):
1732 1732 url = repo.url()
1733 1733 if url.endswith('/'):
1734 1734 url = url[:-1]
1735 1735 return url + '/.hg/patches'
1736 1736 cmdutil.setremoteconfig(ui, opts)
1737 1737 if dest is None:
1738 1738 dest = hg.defaultdest(source)
1739 1739 sr = hg.repository(ui, ui.expandpath(source))
1740 1740 if opts['patches']:
1741 1741 patchespath = ui.expandpath(opts['patches'])
1742 1742 else:
1743 1743 patchespath = patchdir(sr)
1744 1744 try:
1745 pr = hg.repository(ui, patchespath)
1745 hg.repository(ui, patchespath)
1746 1746 except error.RepoError:
1747 1747 raise util.Abort(_('versioned patch repository not found'
1748 1748 ' (see qinit -c)'))
1749 1749 qbase, destrev = None, None
1750 1750 if sr.local():
1751 1751 if sr.mq.applied:
1752 1752 qbase = bin(sr.mq.applied[0].rev)
1753 1753 if not hg.islocal(dest):
1754 1754 heads = dict.fromkeys(sr.heads())
1755 1755 for h in sr.heads(qbase):
1756 1756 del heads[h]
1757 1757 destrev = heads.keys()
1758 1758 destrev.append(sr.changelog.parents(qbase)[0])
1759 1759 elif sr.capable('lookup'):
1760 1760 try:
1761 1761 qbase = sr.lookup('qbase')
1762 1762 except error.RepoError:
1763 1763 pass
1764 1764 ui.note(_('cloning main repo\n'))
1765 1765 sr, dr = hg.clone(ui, sr.url(), dest,
1766 1766 pull=opts['pull'],
1767 1767 rev=destrev,
1768 1768 update=False,
1769 1769 stream=opts['uncompressed'])
1770 1770 ui.note(_('cloning patch repo\n'))
1771 spr, dpr = hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1772 pull=opts['pull'], update=not opts['noupdate'],
1773 stream=opts['uncompressed'])
1771 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1772 pull=opts['pull'], update=not opts['noupdate'],
1773 stream=opts['uncompressed'])
1774 1774 if dr.local():
1775 1775 if qbase:
1776 1776 ui.note(_('stripping applied patches from destination repo\n'))
1777 1777 dr.mq.strip(dr, qbase, update=False, backup=None)
1778 1778 if not opts['noupdate']:
1779 1779 ui.note(_('updating destination repo\n'))
1780 1780 hg.update(dr, dr.changelog.tip())
1781 1781
1782 1782 def commit(ui, repo, *pats, **opts):
1783 1783 """commit changes in the queue repository"""
1784 1784 q = repo.mq
1785 1785 r = q.qrepo()
1786 1786 if not r: raise util.Abort('no queue repository')
1787 1787 commands.commit(r.ui, r, *pats, **opts)
1788 1788
1789 1789 def series(ui, repo, **opts):
1790 1790 """print the entire series file"""
1791 1791 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1792 1792 return 0
1793 1793
1794 1794 def top(ui, repo, **opts):
1795 1795 """print the name of the current patch"""
1796 1796 q = repo.mq
1797 1797 t = q.applied and q.series_end(True) or 0
1798 1798 if t:
1799 1799 return q.qseries(repo, start=t-1, length=1, status='A',
1800 1800 summary=opts.get('summary'))
1801 1801 else:
1802 1802 ui.write(_("no patches applied\n"))
1803 1803 return 1
1804 1804
1805 1805 def next(ui, repo, **opts):
1806 1806 """print the name of the next patch"""
1807 1807 q = repo.mq
1808 1808 end = q.series_end()
1809 1809 if end == len(q.series):
1810 1810 ui.write(_("all patches applied\n"))
1811 1811 return 1
1812 1812 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1813 1813
1814 1814 def prev(ui, repo, **opts):
1815 1815 """print the name of the previous patch"""
1816 1816 q = repo.mq
1817 1817 l = len(q.applied)
1818 1818 if l == 1:
1819 1819 ui.write(_("only one patch applied\n"))
1820 1820 return 1
1821 1821 if not l:
1822 1822 ui.write(_("no patches applied\n"))
1823 1823 return 1
1824 1824 return q.qseries(repo, start=l-2, length=1, status='A',
1825 1825 summary=opts.get('summary'))
1826 1826
1827 1827 def setupheaderopts(ui, opts):
1828 1828 def do(opt,val):
1829 1829 if not opts[opt] and opts['current' + opt]:
1830 1830 opts[opt] = val
1831 1831 do('user', ui.username())
1832 1832 do('date', "%d %d" % util.makedate())
1833 1833
1834 1834 def new(ui, repo, patch, *args, **opts):
1835 1835 """create a new patch
1836 1836
1837 1837 qnew creates a new patch on top of the currently-applied patch (if any).
1838 1838 It will refuse to run if there are any outstanding changes unless -f is
1839 1839 specified, in which case the patch will be initialized with them. You
1840 1840 may also use -I, -X, and/or a list of files after the patch name to add
1841 1841 only changes to matching files to the new patch, leaving the rest as
1842 1842 uncommitted modifications.
1843 1843
1844 1844 -u and -d can be used to set the (given) user and date, respectively.
1845 1845 -U and -D set user to current user and date to current date.
1846 1846
1847 1847 -e, -m or -l set the patch header as well as the commit message. If none
1848 1848 is specified, the header is empty and the commit message is '[mq]: PATCH'.
1849 1849
1850 1850 Use the --git option to keep the patch in the git extended diff
1851 1851 format. Read the diffs help topic for more information on why this
1852 1852 is important for preserving permission changes and copy/rename
1853 1853 information.
1854 1854 """
1855 1855 msg = cmdutil.logmessage(opts)
1856 1856 def getmsg(): return ui.edit(msg, ui.username())
1857 1857 q = repo.mq
1858 1858 opts['msg'] = msg
1859 1859 if opts.get('edit'):
1860 1860 opts['msg'] = getmsg
1861 1861 else:
1862 1862 opts['msg'] = msg
1863 1863 setupheaderopts(ui, opts)
1864 1864 q.new(repo, patch, *args, **opts)
1865 1865 q.save_dirty()
1866 1866 return 0
1867 1867
1868 1868 def refresh(ui, repo, *pats, **opts):
1869 1869 """update the current patch
1870 1870
1871 1871 If any file patterns are provided, the refreshed patch will contain only
1872 1872 the modifications that match those patterns; the remaining modifications
1873 1873 will remain in the working directory.
1874 1874
1875 1875 If --short is specified, files currently included in the patch will
1876 1876 be refreshed just like matched files and remain in the patch.
1877 1877
1878 1878 hg add/remove/copy/rename work as usual, though you might want to use
1879 1879 git-style patches (--git or [diff] git=1) to track copies and renames.
1880 1880 See the diffs help topic for more information on the git diff format.
1881 1881 """
1882 1882 q = repo.mq
1883 1883 message = cmdutil.logmessage(opts)
1884 1884 if opts['edit']:
1885 1885 if not q.applied:
1886 1886 ui.write(_("no patches applied\n"))
1887 1887 return 1
1888 1888 if message:
1889 1889 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1890 1890 patch = q.applied[-1].name
1891 1891 ph = q.readheaders(patch)
1892 1892 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1893 1893 setupheaderopts(ui, opts)
1894 1894 ret = q.refresh(repo, pats, msg=message, **opts)
1895 1895 q.save_dirty()
1896 1896 return ret
1897 1897
1898 1898 def diff(ui, repo, *pats, **opts):
1899 1899 """diff of the current patch and subsequent modifications
1900 1900
1901 1901 Shows a diff which includes the current patch as well as any changes which
1902 1902 have been made in the working directory since the last refresh (thus
1903 1903 showing what the current patch would become after a qrefresh).
1904 1904
1905 1905 Use 'hg diff' if you only want to see the changes made since the last
1906 1906 qrefresh, or 'hg export qtip' if you want to see changes made by the
1907 1907 current patch without including changes made since the qrefresh.
1908 1908 """
1909 1909 repo.mq.diff(repo, pats, opts)
1910 1910 return 0
1911 1911
1912 1912 def fold(ui, repo, *files, **opts):
1913 1913 """fold the named patches into the current patch
1914 1914
1915 1915 Patches must not yet be applied. Each patch will be successively
1916 1916 applied to the current patch in the order given. If all the
1917 1917 patches apply successfully, the current patch will be refreshed
1918 1918 with the new cumulative patch, and the folded patches will
1919 1919 be deleted. With -k/--keep, the folded patch files will not
1920 1920 be removed afterwards.
1921 1921
1922 1922 The header for each folded patch will be concatenated with
1923 1923 the current patch header, separated by a line of '* * *'."""
1924 1924
1925 1925 q = repo.mq
1926 1926
1927 1927 if not files:
1928 1928 raise util.Abort(_('qfold requires at least one patch name'))
1929 1929 if not q.check_toppatch(repo):
1930 1930 raise util.Abort(_('No patches applied'))
1931 1931
1932 1932 message = cmdutil.logmessage(opts)
1933 1933 if opts['edit']:
1934 1934 if message:
1935 1935 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1936 1936
1937 1937 parent = q.lookup('qtip')
1938 1938 patches = []
1939 1939 messages = []
1940 1940 for f in files:
1941 1941 p = q.lookup(f)
1942 1942 if p in patches or p == parent:
1943 1943 ui.warn(_('Skipping already folded patch %s') % p)
1944 1944 if q.isapplied(p):
1945 1945 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1946 1946 patches.append(p)
1947 1947
1948 1948 for p in patches:
1949 1949 if not message:
1950 1950 ph = q.readheaders(p)
1951 1951 if ph.message:
1952 1952 messages.append(ph.message)
1953 1953 pf = q.join(p)
1954 1954 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1955 1955 if not patchsuccess:
1956 1956 raise util.Abort(_('Error folding patch %s') % p)
1957 1957 patch.updatedir(ui, repo, files)
1958 1958
1959 1959 if not message:
1960 1960 ph = q.readheaders(parent)
1961 1961 message, user = ph.message, ph.user
1962 1962 for msg in messages:
1963 1963 message.append('* * *')
1964 1964 message.extend(msg)
1965 1965 message = '\n'.join(message)
1966 1966
1967 1967 if opts['edit']:
1968 1968 message = ui.edit(message, user or ui.username())
1969 1969
1970 1970 q.refresh(repo, msg=message)
1971 1971 q.delete(repo, patches, opts)
1972 1972 q.save_dirty()
1973 1973
1974 1974 def goto(ui, repo, patch, **opts):
1975 1975 '''push or pop patches until named patch is at top of stack'''
1976 1976 q = repo.mq
1977 1977 patch = q.lookup(patch)
1978 1978 if q.isapplied(patch):
1979 1979 ret = q.pop(repo, patch, force=opts['force'])
1980 1980 else:
1981 1981 ret = q.push(repo, patch, force=opts['force'])
1982 1982 q.save_dirty()
1983 1983 return ret
1984 1984
1985 1985 def guard(ui, repo, *args, **opts):
1986 1986 '''set or print guards for a patch
1987 1987
1988 1988 Guards control whether a patch can be pushed. A patch with no
1989 1989 guards is always pushed. A patch with a positive guard ("+foo") is
1990 1990 pushed only if the qselect command has activated it. A patch with
1991 1991 a negative guard ("-foo") is never pushed if the qselect command
1992 1992 has activated it.
1993 1993
1994 1994 With no arguments, print the currently active guards.
1995 1995 With arguments, set guards for the named patch.
1996 1996 NOTE: Specifying negative guards now requires '--'.
1997 1997
1998 1998 To set guards on another patch:
1999 1999 hg qguard -- other.patch +2.6.17 -stable
2000 2000 '''
2001 2001 def status(idx):
2002 2002 guards = q.series_guards[idx] or ['unguarded']
2003 2003 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2004 2004 q = repo.mq
2005 2005 patch = None
2006 2006 args = list(args)
2007 2007 if opts['list']:
2008 2008 if args or opts['none']:
2009 2009 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2010 2010 for i in xrange(len(q.series)):
2011 2011 status(i)
2012 2012 return
2013 2013 if not args or args[0][0:1] in '-+':
2014 2014 if not q.applied:
2015 2015 raise util.Abort(_('no patches applied'))
2016 2016 patch = q.applied[-1].name
2017 2017 if patch is None and args[0][0:1] not in '-+':
2018 2018 patch = args.pop(0)
2019 2019 if patch is None:
2020 2020 raise util.Abort(_('no patch to work with'))
2021 2021 if args or opts['none']:
2022 2022 idx = q.find_series(patch)
2023 2023 if idx is None:
2024 2024 raise util.Abort(_('no patch named %s') % patch)
2025 2025 q.set_guards(idx, args)
2026 2026 q.save_dirty()
2027 2027 else:
2028 2028 status(q.series.index(q.lookup(patch)))
2029 2029
2030 2030 def header(ui, repo, patch=None):
2031 2031 """print the header of the topmost or specified patch"""
2032 2032 q = repo.mq
2033 2033
2034 2034 if patch:
2035 2035 patch = q.lookup(patch)
2036 2036 else:
2037 2037 if not q.applied:
2038 2038 ui.write('no patches applied\n')
2039 2039 return 1
2040 2040 patch = q.lookup('qtip')
2041 2041 ph = repo.mq.readheaders(patch)
2042 2042
2043 2043 ui.write('\n'.join(ph.message) + '\n')
2044 2044
2045 2045 def lastsavename(path):
2046 2046 (directory, base) = os.path.split(path)
2047 2047 names = os.listdir(directory)
2048 2048 namere = re.compile("%s.([0-9]+)" % base)
2049 2049 maxindex = None
2050 2050 maxname = None
2051 2051 for f in names:
2052 2052 m = namere.match(f)
2053 2053 if m:
2054 2054 index = int(m.group(1))
2055 2055 if maxindex == None or index > maxindex:
2056 2056 maxindex = index
2057 2057 maxname = f
2058 2058 if maxname:
2059 2059 return (os.path.join(directory, maxname), maxindex)
2060 2060 return (None, None)
2061 2061
2062 2062 def savename(path):
2063 2063 (last, index) = lastsavename(path)
2064 2064 if last is None:
2065 2065 index = 0
2066 2066 newpath = path + ".%d" % (index + 1)
2067 2067 return newpath
2068 2068
2069 2069 def push(ui, repo, patch=None, **opts):
2070 2070 """push the next patch onto the stack
2071 2071
2072 2072 When --force is applied, all local changes in patched files will be lost.
2073 2073 """
2074 2074 q = repo.mq
2075 2075 mergeq = None
2076 2076
2077 2077 if opts['merge']:
2078 2078 if opts['name']:
2079 2079 newpath = repo.join(opts['name'])
2080 2080 else:
2081 2081 newpath, i = lastsavename(q.path)
2082 2082 if not newpath:
2083 2083 ui.warn(_("no saved queues found, please use -n\n"))
2084 2084 return 1
2085 2085 mergeq = queue(ui, repo.join(""), newpath)
2086 2086 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2087 2087 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2088 2088 mergeq=mergeq, all=opts.get('all'))
2089 2089 return ret
2090 2090
2091 2091 def pop(ui, repo, patch=None, **opts):
2092 2092 """pop the current patch off the stack
2093 2093
2094 2094 By default, pops off the top of the patch stack. If given a patch name,
2095 2095 keeps popping off patches until the named patch is at the top of the stack.
2096 2096 """
2097 2097 localupdate = True
2098 2098 if opts['name']:
2099 2099 q = queue(ui, repo.join(""), repo.join(opts['name']))
2100 2100 ui.warn(_('using patch queue: %s\n') % q.path)
2101 2101 localupdate = False
2102 2102 else:
2103 2103 q = repo.mq
2104 2104 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2105 2105 all=opts['all'])
2106 2106 q.save_dirty()
2107 2107 return ret
2108 2108
2109 2109 def rename(ui, repo, patch, name=None, **opts):
2110 2110 """rename a patch
2111 2111
2112 2112 With one argument, renames the current patch to PATCH1.
2113 2113 With two arguments, renames PATCH1 to PATCH2."""
2114 2114
2115 2115 q = repo.mq
2116 2116
2117 2117 if not name:
2118 2118 name = patch
2119 2119 patch = None
2120 2120
2121 2121 if patch:
2122 2122 patch = q.lookup(patch)
2123 2123 else:
2124 2124 if not q.applied:
2125 2125 ui.write(_('no patches applied\n'))
2126 2126 return
2127 2127 patch = q.lookup('qtip')
2128 2128 absdest = q.join(name)
2129 2129 if os.path.isdir(absdest):
2130 2130 name = normname(os.path.join(name, os.path.basename(patch)))
2131 2131 absdest = q.join(name)
2132 2132 if os.path.exists(absdest):
2133 2133 raise util.Abort(_('%s already exists') % absdest)
2134 2134
2135 2135 if name in q.series:
2136 2136 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2137 2137
2138 2138 if ui.verbose:
2139 2139 ui.write('renaming %s to %s\n' % (patch, name))
2140 2140 i = q.find_series(patch)
2141 2141 guards = q.guard_re.findall(q.full_series[i])
2142 2142 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2143 2143 q.parse_series()
2144 2144 q.series_dirty = 1
2145 2145
2146 2146 info = q.isapplied(patch)
2147 2147 if info:
2148 2148 q.applied[info[0]] = statusentry(info[1], name)
2149 2149 q.applied_dirty = 1
2150 2150
2151 2151 util.rename(q.join(patch), absdest)
2152 2152 r = q.qrepo()
2153 2153 if r:
2154 2154 wlock = r.wlock()
2155 2155 try:
2156 2156 if r.dirstate[patch] == 'a':
2157 2157 r.dirstate.forget(patch)
2158 2158 r.dirstate.add(name)
2159 2159 else:
2160 2160 if r.dirstate[name] == 'r':
2161 2161 r.undelete([name])
2162 2162 r.copy(patch, name)
2163 2163 r.remove([patch], False)
2164 2164 finally:
2165 2165 del wlock
2166 2166
2167 2167 q.save_dirty()
2168 2168
2169 2169 def restore(ui, repo, rev, **opts):
2170 2170 """restore the queue state saved by a rev"""
2171 2171 rev = repo.lookup(rev)
2172 2172 q = repo.mq
2173 2173 q.restore(repo, rev, delete=opts['delete'],
2174 2174 qupdate=opts['update'])
2175 2175 q.save_dirty()
2176 2176 return 0
2177 2177
2178 2178 def save(ui, repo, **opts):
2179 2179 """save current queue state"""
2180 2180 q = repo.mq
2181 2181 message = cmdutil.logmessage(opts)
2182 2182 ret = q.save(repo, msg=message)
2183 2183 if ret:
2184 2184 return ret
2185 2185 q.save_dirty()
2186 2186 if opts['copy']:
2187 2187 path = q.path
2188 2188 if opts['name']:
2189 2189 newpath = os.path.join(q.basepath, opts['name'])
2190 2190 if os.path.exists(newpath):
2191 2191 if not os.path.isdir(newpath):
2192 2192 raise util.Abort(_('destination %s exists and is not '
2193 2193 'a directory') % newpath)
2194 2194 if not opts['force']:
2195 2195 raise util.Abort(_('destination %s exists, '
2196 2196 'use -f to force') % newpath)
2197 2197 else:
2198 2198 newpath = savename(path)
2199 2199 ui.warn(_("copy %s to %s\n") % (path, newpath))
2200 2200 util.copyfiles(path, newpath)
2201 2201 if opts['empty']:
2202 2202 try:
2203 2203 os.unlink(q.join(q.status_path))
2204 2204 except:
2205 2205 pass
2206 2206 return 0
2207 2207
2208 2208 def strip(ui, repo, rev, **opts):
2209 2209 """strip a revision and all its descendants from the repository
2210 2210
2211 2211 If one of the working dir's parent revisions is stripped, the working
2212 2212 directory will be updated to the parent of the stripped revision.
2213 2213 """
2214 2214 backup = 'all'
2215 2215 if opts['backup']:
2216 2216 backup = 'strip'
2217 2217 elif opts['nobackup']:
2218 2218 backup = 'none'
2219 2219
2220 2220 rev = repo.lookup(rev)
2221 2221 p = repo.dirstate.parents()
2222 2222 cl = repo.changelog
2223 2223 update = True
2224 2224 if p[0] == nullid:
2225 2225 update = False
2226 2226 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2227 2227 update = False
2228 2228 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2229 2229 update = False
2230 2230
2231 2231 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2232 2232 return 0
2233 2233
2234 2234 def select(ui, repo, *args, **opts):
2235 2235 '''set or print guarded patches to push
2236 2236
2237 2237 Use the qguard command to set or print guards on patch, then use
2238 2238 qselect to tell mq which guards to use. A patch will be pushed if it
2239 2239 has no guards or any positive guards match the currently selected guard,
2240 2240 but will not be pushed if any negative guards match the current guard.
2241 2241 For example:
2242 2242
2243 2243 qguard foo.patch -stable (negative guard)
2244 2244 qguard bar.patch +stable (positive guard)
2245 2245 qselect stable
2246 2246
2247 2247 This activates the "stable" guard. mq will skip foo.patch (because
2248 2248 it has a negative match) but push bar.patch (because it
2249 2249 has a positive match).
2250 2250
2251 2251 With no arguments, prints the currently active guards.
2252 2252 With one argument, sets the active guard.
2253 2253
2254 2254 Use -n/--none to deactivate guards (no other arguments needed).
2255 2255 When no guards are active, patches with positive guards are skipped
2256 2256 and patches with negative guards are pushed.
2257 2257
2258 2258 qselect can change the guards on applied patches. It does not pop
2259 2259 guarded patches by default. Use --pop to pop back to the last applied
2260 2260 patch that is not guarded. Use --reapply (which implies --pop) to push
2261 2261 back to the current patch afterwards, but skip guarded patches.
2262 2262
2263 2263 Use -s/--series to print a list of all guards in the series file (no
2264 2264 other arguments needed). Use -v for more information.'''
2265 2265
2266 2266 q = repo.mq
2267 2267 guards = q.active()
2268 2268 if args or opts['none']:
2269 2269 old_unapplied = q.unapplied(repo)
2270 2270 old_guarded = [i for i in xrange(len(q.applied)) if
2271 2271 not q.pushable(i)[0]]
2272 2272 q.set_active(args)
2273 2273 q.save_dirty()
2274 2274 if not args:
2275 2275 ui.status(_('guards deactivated\n'))
2276 2276 if not opts['pop'] and not opts['reapply']:
2277 2277 unapplied = q.unapplied(repo)
2278 2278 guarded = [i for i in xrange(len(q.applied))
2279 2279 if not q.pushable(i)[0]]
2280 2280 if len(unapplied) != len(old_unapplied):
2281 2281 ui.status(_('number of unguarded, unapplied patches has '
2282 2282 'changed from %d to %d\n') %
2283 2283 (len(old_unapplied), len(unapplied)))
2284 2284 if len(guarded) != len(old_guarded):
2285 2285 ui.status(_('number of guarded, applied patches has changed '
2286 2286 'from %d to %d\n') %
2287 2287 (len(old_guarded), len(guarded)))
2288 2288 elif opts['series']:
2289 2289 guards = {}
2290 2290 noguards = 0
2291 2291 for gs in q.series_guards:
2292 2292 if not gs:
2293 2293 noguards += 1
2294 2294 for g in gs:
2295 2295 guards.setdefault(g, 0)
2296 2296 guards[g] += 1
2297 2297 if ui.verbose:
2298 2298 guards['NONE'] = noguards
2299 2299 guards = guards.items()
2300 2300 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2301 2301 if guards:
2302 2302 ui.note(_('guards in series file:\n'))
2303 2303 for guard, count in guards:
2304 2304 ui.note('%2d ' % count)
2305 2305 ui.write(guard, '\n')
2306 2306 else:
2307 2307 ui.note(_('no guards in series file\n'))
2308 2308 else:
2309 2309 if guards:
2310 2310 ui.note(_('active guards:\n'))
2311 2311 for g in guards:
2312 2312 ui.write(g, '\n')
2313 2313 else:
2314 2314 ui.write(_('no active guards\n'))
2315 2315 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2316 2316 popped = False
2317 2317 if opts['pop'] or opts['reapply']:
2318 2318 for i in xrange(len(q.applied)):
2319 2319 pushable, reason = q.pushable(i)
2320 2320 if not pushable:
2321 2321 ui.status(_('popping guarded patches\n'))
2322 2322 popped = True
2323 2323 if i == 0:
2324 2324 q.pop(repo, all=True)
2325 2325 else:
2326 2326 q.pop(repo, i-1)
2327 2327 break
2328 2328 if popped:
2329 2329 try:
2330 2330 if reapply:
2331 2331 ui.status(_('reapplying unguarded patches\n'))
2332 2332 q.push(repo, reapply)
2333 2333 finally:
2334 2334 q.save_dirty()
2335 2335
2336 2336 def finish(ui, repo, *revrange, **opts):
2337 2337 """move applied patches into repository history
2338 2338
2339 2339 Finishes the specified revisions (corresponding to applied patches) by
2340 2340 moving them out of mq control into regular repository history.
2341 2341
2342 2342 Accepts a revision range or the --applied option. If --applied is
2343 2343 specified, all applied mq revisions are removed from mq control.
2344 2344 Otherwise, the given revisions must be at the base of the stack of
2345 2345 applied patches.
2346 2346
2347 2347 This can be especially useful if your changes have been applied to an
2348 2348 upstream repository, or if you are about to push your changes to upstream.
2349 2349 """
2350 2350 if not opts['applied'] and not revrange:
2351 2351 raise util.Abort(_('no revisions specified'))
2352 2352 elif opts['applied']:
2353 2353 revrange = ('qbase:qtip',) + revrange
2354 2354
2355 2355 q = repo.mq
2356 2356 if not q.applied:
2357 2357 ui.status(_('no patches applied\n'))
2358 2358 return 0
2359 2359
2360 2360 revs = cmdutil.revrange(repo, revrange)
2361 2361 q.finish(repo, revs)
2362 2362 q.save_dirty()
2363 2363 return 0
2364 2364
2365 2365 def reposetup(ui, repo):
2366 2366 class mqrepo(repo.__class__):
2367 2367 def abort_if_wdir_patched(self, errmsg, force=False):
2368 2368 if self.mq.applied and not force:
2369 2369 parent = hex(self.dirstate.parents()[0])
2370 2370 if parent in [s.rev for s in self.mq.applied]:
2371 2371 raise util.Abort(errmsg)
2372 2372
2373 2373 def commit(self, *args, **opts):
2374 2374 if len(args) >= 6:
2375 2375 force = args[5]
2376 2376 else:
2377 2377 force = opts.get('force')
2378 2378 self.abort_if_wdir_patched(
2379 2379 _('cannot commit over an applied mq patch'),
2380 2380 force)
2381 2381
2382 2382 return super(mqrepo, self).commit(*args, **opts)
2383 2383
2384 2384 def push(self, remote, force=False, revs=None):
2385 2385 if self.mq.applied and not force and not revs:
2386 2386 raise util.Abort(_('source has mq patches applied'))
2387 2387 return super(mqrepo, self).push(remote, force, revs)
2388 2388
2389 2389 def tags(self):
2390 2390 if self.tagscache:
2391 2391 return self.tagscache
2392 2392
2393 2393 tagscache = super(mqrepo, self).tags()
2394 2394
2395 2395 q = self.mq
2396 2396 if not q.applied:
2397 2397 return tagscache
2398 2398
2399 2399 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2400 2400
2401 2401 if mqtags[-1][0] not in self.changelog.nodemap:
2402 2402 self.ui.warn(_('mq status file refers to unknown node %s\n')
2403 2403 % short(mqtags[-1][0]))
2404 2404 return tagscache
2405 2405
2406 2406 mqtags.append((mqtags[-1][0], 'qtip'))
2407 2407 mqtags.append((mqtags[0][0], 'qbase'))
2408 2408 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2409 2409 for patch in mqtags:
2410 2410 if patch[1] in tagscache:
2411 2411 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2412 2412 % patch[1])
2413 2413 else:
2414 2414 tagscache[patch[1]] = patch[0]
2415 2415
2416 2416 return tagscache
2417 2417
2418 2418 def _branchtags(self, partial, lrev):
2419 2419 q = self.mq
2420 2420 if not q.applied:
2421 2421 return super(mqrepo, self)._branchtags(partial, lrev)
2422 2422
2423 2423 cl = self.changelog
2424 2424 qbasenode = bin(q.applied[0].rev)
2425 2425 if qbasenode not in cl.nodemap:
2426 2426 self.ui.warn(_('mq status file refers to unknown node %s\n')
2427 2427 % short(qbasenode))
2428 2428 return super(mqrepo, self)._branchtags(partial, lrev)
2429 2429
2430 2430 qbase = cl.rev(qbasenode)
2431 2431 start = lrev + 1
2432 2432 if start < qbase:
2433 2433 # update the cache (excluding the patches) and save it
2434 2434 self._updatebranchcache(partial, lrev+1, qbase)
2435 2435 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2436 2436 start = qbase
2437 2437 # if start = qbase, the cache is as updated as it should be.
2438 2438 # if start > qbase, the cache includes (part of) the patches.
2439 2439 # we might as well use it, but we won't save it.
2440 2440
2441 2441 # update the cache up to the tip
2442 2442 self._updatebranchcache(partial, start, len(cl))
2443 2443
2444 2444 return partial
2445 2445
2446 2446 if repo.local():
2447 2447 repo.__class__ = mqrepo
2448 2448 repo.mq = queue(ui, repo.join(""))
2449 2449
2450 2450 def mqimport(orig, ui, repo, *args, **kwargs):
2451 2451 if hasattr(repo, 'abort_if_wdir_patched'):
2452 2452 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2453 2453 kwargs.get('force'))
2454 2454 return orig(ui, repo, *args, **kwargs)
2455 2455
2456 2456 def uisetup(ui):
2457 2457 extensions.wrapcommand(commands.table, 'import', mqimport)
2458 2458
2459 2459 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2460 2460
2461 2461 cmdtable = {
2462 2462 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2463 2463 "qclone":
2464 2464 (clone,
2465 2465 [('', 'pull', None, _('use pull protocol to copy metadata')),
2466 2466 ('U', 'noupdate', None, _('do not update the new working directories')),
2467 2467 ('', 'uncompressed', None,
2468 2468 _('use uncompressed transfer (fast over LAN)')),
2469 2469 ('p', 'patches', '', _('location of source patch repo')),
2470 2470 ] + commands.remoteopts,
2471 2471 _('hg qclone [OPTION]... SOURCE [DEST]')),
2472 2472 "qcommit|qci":
2473 2473 (commit,
2474 2474 commands.table["^commit|ci"][1],
2475 2475 _('hg qcommit [OPTION]... [FILE]...')),
2476 2476 "^qdiff":
2477 2477 (diff,
2478 2478 commands.diffopts + commands.diffopts2 + commands.walkopts,
2479 2479 _('hg qdiff [OPTION]... [FILE]...')),
2480 2480 "qdelete|qremove|qrm":
2481 2481 (delete,
2482 2482 [('k', 'keep', None, _('keep patch file')),
2483 2483 ('r', 'rev', [], _('stop managing a revision'))],
2484 2484 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2485 2485 'qfold':
2486 2486 (fold,
2487 2487 [('e', 'edit', None, _('edit patch header')),
2488 2488 ('k', 'keep', None, _('keep folded patch files')),
2489 2489 ] + commands.commitopts,
2490 2490 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2491 2491 'qgoto':
2492 2492 (goto,
2493 2493 [('f', 'force', None, _('overwrite any local changes'))],
2494 2494 _('hg qgoto [OPTION]... PATCH')),
2495 2495 'qguard':
2496 2496 (guard,
2497 2497 [('l', 'list', None, _('list all patches and guards')),
2498 2498 ('n', 'none', None, _('drop all guards'))],
2499 2499 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2500 2500 'qheader': (header, [], _('hg qheader [PATCH]')),
2501 2501 "^qimport":
2502 2502 (qimport,
2503 2503 [('e', 'existing', None, _('import file in patch dir')),
2504 2504 ('n', 'name', '', _('patch file name')),
2505 2505 ('f', 'force', None, _('overwrite existing files')),
2506 2506 ('r', 'rev', [], _('place existing revisions under mq control')),
2507 2507 ('g', 'git', None, _('use git extended diff format'))],
2508 2508 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2509 2509 "^qinit":
2510 2510 (init,
2511 2511 [('c', 'create-repo', None, _('create queue repository'))],
2512 2512 _('hg qinit [-c]')),
2513 2513 "qnew":
2514 2514 (new,
2515 2515 [('e', 'edit', None, _('edit commit message')),
2516 2516 ('f', 'force', None, _('import uncommitted changes into patch')),
2517 2517 ('g', 'git', None, _('use git extended diff format')),
2518 2518 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2519 2519 ('u', 'user', '', _('add "From: <given user>" to patch')),
2520 2520 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2521 2521 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2522 2522 ] + commands.walkopts + commands.commitopts,
2523 2523 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2524 2524 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2525 2525 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2526 2526 "^qpop":
2527 2527 (pop,
2528 2528 [('a', 'all', None, _('pop all patches')),
2529 2529 ('n', 'name', '', _('queue name to pop')),
2530 2530 ('f', 'force', None, _('forget any local changes'))],
2531 2531 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2532 2532 "^qpush":
2533 2533 (push,
2534 2534 [('f', 'force', None, _('apply if the patch has rejects')),
2535 2535 ('l', 'list', None, _('list patch name in commit text')),
2536 2536 ('a', 'all', None, _('apply all patches')),
2537 2537 ('m', 'merge', None, _('merge from another queue')),
2538 2538 ('n', 'name', '', _('merge queue name'))],
2539 2539 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2540 2540 "^qrefresh":
2541 2541 (refresh,
2542 2542 [('e', 'edit', None, _('edit commit message')),
2543 2543 ('g', 'git', None, _('use git extended diff format')),
2544 2544 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2545 2545 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2546 2546 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2547 2547 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2548 2548 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2549 2549 ] + commands.walkopts + commands.commitopts,
2550 2550 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2551 2551 'qrename|qmv':
2552 2552 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2553 2553 "qrestore":
2554 2554 (restore,
2555 2555 [('d', 'delete', None, _('delete save entry')),
2556 2556 ('u', 'update', None, _('update queue working dir'))],
2557 2557 _('hg qrestore [-d] [-u] REV')),
2558 2558 "qsave":
2559 2559 (save,
2560 2560 [('c', 'copy', None, _('copy patch directory')),
2561 2561 ('n', 'name', '', _('copy directory name')),
2562 2562 ('e', 'empty', None, _('clear queue status file')),
2563 2563 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2564 2564 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2565 2565 "qselect":
2566 2566 (select,
2567 2567 [('n', 'none', None, _('disable all guards')),
2568 2568 ('s', 'series', None, _('list all guards in series file')),
2569 2569 ('', 'pop', None, _('pop to before first guarded applied patch')),
2570 2570 ('', 'reapply', None, _('pop, then reapply patches'))],
2571 2571 _('hg qselect [OPTION]... [GUARD]...')),
2572 2572 "qseries":
2573 2573 (series,
2574 2574 [('m', 'missing', None, _('print patches not in series')),
2575 2575 ] + seriesopts,
2576 2576 _('hg qseries [-ms]')),
2577 2577 "^strip":
2578 2578 (strip,
2579 2579 [('f', 'force', None, _('force removal with local changes')),
2580 2580 ('b', 'backup', None, _('bundle unrelated changesets')),
2581 2581 ('n', 'nobackup', None, _('no backups'))],
2582 2582 _('hg strip [-f] [-b] [-n] REV')),
2583 2583 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2584 2584 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2585 2585 "qfinish":
2586 2586 (finish,
2587 2587 [('a', 'applied', None, _('finish all applied changesets'))],
2588 2588 _('hg qfinish [-a] [REV...]')),
2589 2589 }
@@ -1,480 +1,480 b''
1 1 '''sending Mercurial changesets as a series of patch emails
2 2
3 3 The series is started off with a "[PATCH 0 of N]" introduction,
4 4 which describes the series as a whole.
5 5
6 6 Each patch email has a Subject line of "[PATCH M of N] ...", using
7 7 the first line of the changeset description as the subject text.
8 8 The message contains two or three body parts:
9 9
10 10 The remainder of the changeset description.
11 11
12 12 [Optional] The result of running diffstat on the patch.
13 13
14 14 The patch itself, as generated by "hg export".
15 15
16 16 Each message refers to all of its predecessors using the In-Reply-To
17 17 and References headers, so they will show up as a sequence in
18 18 threaded mail and news readers, and in mail archives.
19 19
20 20 For each changeset, you will be prompted with a diffstat summary and
21 21 the changeset summary, so you can be sure you are sending the right changes.
22 22
23 23 To enable this extension:
24 24
25 25 [extensions]
26 26 hgext.patchbomb =
27 27
28 28 To configure other defaults, add a section like this to your hgrc file:
29 29
30 30 [email]
31 31 from = My Name <my@email>
32 32 to = recipient1, recipient2, ...
33 33 cc = cc1, cc2, ...
34 34 bcc = bcc1, bcc2, ...
35 35
36 36 Then you can use the "hg email" command to mail a series of changesets
37 37 as a patchbomb.
38 38
39 39 To avoid sending patches prematurely, it is a good idea to first run
40 40 the "email" command with the "-n" option (test only). You will be
41 41 prompted for an email recipient address, a subject an an introductory
42 42 message describing the patches of your patchbomb. Then when all is
43 43 done, patchbomb messages are displayed. If PAGER environment variable
44 44 is set, your pager will be fired up once for each patchbomb message, so
45 45 you can verify everything is alright.
46 46
47 47 The "-m" (mbox) option is also very useful. Instead of previewing
48 48 each patchbomb message in a pager or sending the messages directly,
49 49 it will create a UNIX mailbox file with the patch emails. This
50 50 mailbox file can be previewed with any mail user agent which supports
51 51 UNIX mbox files, e.g. with mutt:
52 52
53 53 % mutt -R -f mbox
54 54
55 55 When you are previewing the patchbomb messages, you can use `formail'
56 56 (a utility that is commonly installed as part of the procmail package),
57 57 to send each message out:
58 58
59 59 % formail -s sendmail -bm -t < mbox
60 60
61 61 That should be all. Now your patchbomb is on its way out.
62 62
63 63 You can also either configure the method option in the email section
64 64 to be a sendmail compatable mailer or fill out the [smtp] section so
65 65 that the patchbomb extension can automatically send patchbombs directly
66 66 from the commandline. See the [email] and [smtp] sections in hgrc(5)
67 67 for details.'''
68 68
69 69 import os, errno, socket, tempfile, cStringIO
70 70 import email.MIMEMultipart, email.MIMEBase
71 71 import email.Utils, email.Encoders, email.Generator
72 72 from mercurial import cmdutil, commands, hg, mail, patch, util
73 73 from mercurial.i18n import _
74 74 from mercurial.node import bin
75 75
76 76 def prompt(ui, prompt, default=None, rest=': ', empty_ok=False):
77 77 if not ui.interactive:
78 78 return default
79 79 if default:
80 80 prompt += ' [%s]' % default
81 81 prompt += rest
82 82 while True:
83 83 r = ui.prompt(prompt, default=default)
84 84 if r:
85 85 return r
86 86 if default is not None:
87 87 return default
88 88 if empty_ok:
89 89 return r
90 90 ui.warn(_('Please enter a valid value.\n'))
91 91
92 92 def cdiffstat(ui, summary, patchlines):
93 93 s = patch.diffstat(patchlines)
94 94 if summary:
95 95 ui.write(summary, '\n')
96 96 ui.write(s, '\n')
97 97 ans = prompt(ui, _('does the diffstat above look okay? '), 'y')
98 98 if not ans.lower().startswith('y'):
99 99 raise util.Abort(_('diffstat rejected'))
100 100 return s
101 101
102 102 def makepatch(ui, repo, patch, opts, _charsets, idx, total, patchname=None):
103 103
104 104 desc = []
105 105 node = None
106 106 body = ''
107 107
108 108 for line in patch:
109 109 if line.startswith('#'):
110 110 if line.startswith('# Node ID'):
111 111 node = line.split()[-1]
112 112 continue
113 113 if line.startswith('diff -r') or line.startswith('diff --git'):
114 114 break
115 115 desc.append(line)
116 116
117 117 if not patchname and not node:
118 118 raise ValueError
119 119
120 120 if opts.get('attach'):
121 121 body = ('\n'.join(desc[1:]).strip() or
122 122 'Patch subject is complete summary.')
123 123 body += '\n\n\n'
124 124
125 125 if opts.get('plain'):
126 126 while patch and patch[0].startswith('# '):
127 127 patch.pop(0)
128 128 if patch:
129 129 patch.pop(0)
130 130 while patch and not patch[0].strip():
131 131 patch.pop(0)
132 132
133 133 if opts.get('diffstat'):
134 134 body += cdiffstat(ui, '\n'.join(desc), patch) + '\n\n'
135 135
136 136 if opts.get('attach') or opts.get('inline'):
137 137 msg = email.MIMEMultipart.MIMEMultipart()
138 138 if body:
139 139 msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
140 140 p = mail.mimetextpatch('\n'.join(patch), 'x-patch', opts.get('test'))
141 141 binnode = bin(node)
142 142 # if node is mq patch, it will have patch file name as tag
143 143 if not patchname:
144 144 patchtags = [t for t in repo.nodetags(binnode)
145 145 if t.endswith('.patch') or t.endswith('.diff')]
146 146 if patchtags:
147 147 patchname = patchtags[0]
148 148 elif total > 1:
149 149 patchname = cmdutil.make_filename(repo, '%b-%n.patch',
150 150 binnode, seqno=idx, total=total)
151 151 else:
152 152 patchname = cmdutil.make_filename(repo, '%b.patch', binnode)
153 153 disposition = 'inline'
154 154 if opts.get('attach'):
155 155 disposition = 'attachment'
156 156 p['Content-Disposition'] = disposition + '; filename=' + patchname
157 157 msg.attach(p)
158 158 else:
159 159 body += '\n'.join(patch)
160 160 msg = mail.mimetextpatch(body, display=opts.get('test'))
161 161
162 162 subj = desc[0].strip().rstrip('. ')
163 163 if total == 1 and not opts.get('intro'):
164 164 subj = '[PATCH] ' + (opts.get('subject') or subj)
165 165 else:
166 166 tlen = len(str(total))
167 167 subj = '[PATCH %0*d of %d] %s' % (tlen, idx, total, subj)
168 168 msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
169 169 msg['X-Mercurial-Node'] = node
170 170 return msg, subj
171 171
172 172 def patchbomb(ui, repo, *revs, **opts):
173 173 '''send changesets by email
174 174
175 175 By default, diffs are sent in the format generated by hg export,
176 176 one per message. The series starts with a "[PATCH 0 of N]"
177 177 introduction, which describes the series as a whole.
178 178
179 179 Each patch email has a Subject line of "[PATCH M of N] ...", using
180 180 the first line of the changeset description as the subject text.
181 181 The message contains two or three body parts. First, the rest of
182 182 the changeset description. Next, (optionally) if the diffstat
183 183 program is installed, the result of running diffstat on the patch.
184 184 Finally, the patch itself, as generated by "hg export".
185 185
186 186 With --outgoing, emails will be generated for patches not
187 187 found in the destination repository (or only those which are
188 188 ancestors of the specified revisions if any are provided)
189 189
190 190 With --bundle, changesets are selected as for --outgoing,
191 191 but a single email containing a binary Mercurial bundle as an
192 192 attachment will be sent.
193 193
194 194 Examples:
195 195
196 196 hg email -r 3000 # send patch 3000 only
197 197 hg email -r 3000 -r 3001 # send patches 3000 and 3001
198 198 hg email -r 3000:3005 # send patches 3000 through 3005
199 199 hg email 3000 # send patch 3000 (deprecated)
200 200
201 201 hg email -o # send all patches not in default
202 202 hg email -o DEST # send all patches not in DEST
203 203 hg email -o -r 3000 # send all ancestors of 3000 not in default
204 204 hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST
205 205
206 206 hg email -b # send bundle of all patches not in default
207 207 hg email -b DEST # send bundle of all patches not in DEST
208 208 hg email -b -r 3000 # bundle of all ancestors of 3000 not in default
209 209 hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST
210 210
211 211 Before using this command, you will need to enable email in your hgrc.
212 212 See the [email] section in hgrc(5) for details.
213 213 '''
214 214
215 215 _charsets = mail._charsets(ui)
216 216
217 217 def outgoing(dest, revs):
218 218 '''Return the revisions present locally but not in dest'''
219 219 dest = ui.expandpath(dest or 'default-push', dest or 'default')
220 220 revs = [repo.lookup(rev) for rev in revs]
221 221 other = hg.repository(ui, dest)
222 222 ui.status(_('comparing with %s\n') % dest)
223 223 o = repo.findoutgoing(other)
224 224 if not o:
225 225 ui.status(_("no changes found\n"))
226 226 return []
227 227 o = repo.changelog.nodesbetween(o, revs or None)[0]
228 228 return [str(repo.changelog.rev(r)) for r in o]
229 229
230 230 def getpatches(revs):
231 231 for r in cmdutil.revrange(repo, revs):
232 232 output = cStringIO.StringIO()
233 p = patch.export(repo, [r], fp=output,
234 opts=patch.diffopts(ui, opts))
233 patch.export(repo, [r], fp=output,
234 opts=patch.diffopts(ui, opts))
235 235 yield output.getvalue().split('\n')
236 236
237 237 def getbundle(dest):
238 238 tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
239 239 tmpfn = os.path.join(tmpdir, 'bundle')
240 240 try:
241 241 commands.bundle(ui, repo, tmpfn, dest, **opts)
242 242 return open(tmpfn, 'rb').read()
243 243 finally:
244 244 try:
245 245 os.unlink(tmpfn)
246 246 except:
247 247 pass
248 248 os.rmdir(tmpdir)
249 249
250 250 if not (opts.get('test') or opts.get('mbox')):
251 251 # really sending
252 252 mail.validateconfig(ui)
253 253
254 254 if not (revs or opts.get('rev')
255 255 or opts.get('outgoing') or opts.get('bundle')
256 256 or opts.get('patches')):
257 257 raise util.Abort(_('specify at least one changeset with -r or -o'))
258 258
259 259 cmdutil.setremoteconfig(ui, opts)
260 260 if opts.get('outgoing') and opts.get('bundle'):
261 261 raise util.Abort(_("--outgoing mode always on with --bundle;"
262 262 " do not re-specify --outgoing"))
263 263
264 264 if opts.get('outgoing') or opts.get('bundle'):
265 265 if len(revs) > 1:
266 266 raise util.Abort(_("too many destinations"))
267 267 dest = revs and revs[0] or None
268 268 revs = []
269 269
270 270 if opts.get('rev'):
271 271 if revs:
272 272 raise util.Abort(_('use only one form to specify the revision'))
273 273 revs = opts.get('rev')
274 274
275 275 if opts.get('outgoing'):
276 276 revs = outgoing(dest, opts.get('rev'))
277 277 if opts.get('bundle'):
278 278 opts['revs'] = revs
279 279
280 280 # start
281 281 if opts.get('date'):
282 282 start_time = util.parsedate(opts.get('date'))
283 283 else:
284 284 start_time = util.makedate()
285 285
286 286 def genmsgid(id):
287 287 return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
288 288
289 289 def getdescription(body, sender):
290 290 if opts.get('desc'):
291 291 body = open(opts.get('desc')).read()
292 292 else:
293 293 ui.write(_('\nWrite the introductory message for the '
294 294 'patch series.\n\n'))
295 295 body = ui.edit(body, sender)
296 296 return body
297 297
298 298 def getpatchmsgs(patches, patchnames=None):
299 299 jumbo = []
300 300 msgs = []
301 301
302 302 ui.write(_('This patch series consists of %d patches.\n\n')
303 303 % len(patches))
304 304
305 305 name = None
306 306 for i, p in enumerate(patches):
307 307 jumbo.extend(p)
308 308 if patchnames:
309 309 name = patchnames[i]
310 310 msg = makepatch(ui, repo, p, opts, _charsets, i + 1,
311 311 len(patches), name)
312 312 msgs.append(msg)
313 313
314 314 if len(patches) > 1 or opts.get('intro'):
315 315 tlen = len(str(len(patches)))
316 316
317 317 subj = '[PATCH %0*d of %d] %s' % (
318 318 tlen, 0, len(patches),
319 319 opts.get('subject') or
320 320 prompt(ui, 'Subject:',
321 321 rest=' [PATCH %0*d of %d] ' % (tlen, 0, len(patches))))
322 322
323 323 body = ''
324 324 if opts.get('diffstat'):
325 325 d = cdiffstat(ui, _('Final summary:\n'), jumbo)
326 326 if d:
327 327 body = '\n' + d
328 328
329 329 body = getdescription(body, sender)
330 330 msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
331 331 msg['Subject'] = mail.headencode(ui, subj, _charsets,
332 332 opts.get('test'))
333 333
334 334 msgs.insert(0, (msg, subj))
335 335 return msgs
336 336
337 337 def getbundlemsgs(bundle):
338 338 subj = (opts.get('subject')
339 339 or prompt(ui, 'Subject:', 'A bundle for your repository'))
340 340
341 341 body = getdescription('', sender)
342 342 msg = email.MIMEMultipart.MIMEMultipart()
343 343 if body:
344 344 msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
345 345 datapart = email.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
346 346 datapart.set_payload(bundle)
347 347 datapart.add_header('Content-Disposition', 'attachment',
348 348 filename='bundle.hg')
349 349 email.Encoders.encode_base64(datapart)
350 350 msg.attach(datapart)
351 351 msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
352 352 return [(msg, subj)]
353 353
354 354 sender = (opts.get('from') or ui.config('email', 'from') or
355 355 ui.config('patchbomb', 'from') or
356 356 prompt(ui, 'From', ui.username()))
357 357
358 358 # internal option used by pbranches
359 359 patches = opts.get('patches')
360 360 if patches:
361 361 msgs = getpatchmsgs(patches, opts.get('patchnames'))
362 362 elif opts.get('bundle'):
363 363 msgs = getbundlemsgs(getbundle(dest))
364 364 else:
365 365 msgs = getpatchmsgs(list(getpatches(revs)))
366 366
367 367 def getaddrs(opt, prpt, default = None):
368 368 addrs = opts.get(opt) or (ui.config('email', opt) or
369 369 ui.config('patchbomb', opt) or
370 370 prompt(ui, prpt, default)).split(',')
371 371 return [mail.addressencode(ui, a.strip(), _charsets, opts.get('test'))
372 372 for a in addrs if a.strip()]
373 373
374 374 to = getaddrs('to', 'To')
375 375 cc = getaddrs('cc', 'Cc', '')
376 376
377 377 bcc = opts.get('bcc') or (ui.config('email', 'bcc') or
378 378 ui.config('patchbomb', 'bcc') or '').split(',')
379 379 bcc = [mail.addressencode(ui, a.strip(), _charsets, opts.get('test'))
380 380 for a in bcc if a.strip()]
381 381
382 382 ui.write('\n')
383 383
384 384 parent = None
385 385
386 386 sender_addr = email.Utils.parseaddr(sender)[1]
387 387 sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
388 388 sendmail = None
389 389 for m, subj in msgs:
390 390 try:
391 391 m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
392 392 except TypeError:
393 393 m['Message-Id'] = genmsgid('patchbomb')
394 394 if parent:
395 395 m['In-Reply-To'] = parent
396 396 m['References'] = parent
397 397 else:
398 398 parent = m['Message-Id']
399 399 m['Date'] = util.datestr(start_time, "%a, %d %b %Y %H:%M:%S %1%2")
400 400
401 401 start_time = (start_time[0] + 1, start_time[1])
402 402 m['From'] = sender
403 403 m['To'] = ', '.join(to)
404 404 if cc:
405 405 m['Cc'] = ', '.join(cc)
406 406 if bcc:
407 407 m['Bcc'] = ', '.join(bcc)
408 408 if opts.get('test'):
409 409 ui.status(_('Displaying '), subj, ' ...\n')
410 410 ui.flush()
411 411 if 'PAGER' in os.environ:
412 412 fp = util.popen(os.environ['PAGER'], 'w')
413 413 else:
414 414 fp = ui
415 415 generator = email.Generator.Generator(fp, mangle_from_=False)
416 416 try:
417 417 generator.flatten(m, 0)
418 418 fp.write('\n')
419 419 except IOError, inst:
420 420 if inst.errno != errno.EPIPE:
421 421 raise
422 422 if fp is not ui:
423 423 fp.close()
424 424 elif opts.get('mbox'):
425 425 ui.status(_('Writing '), subj, ' ...\n')
426 426 fp = open(opts.get('mbox'), 'In-Reply-To' in m and 'ab+' or 'wb+')
427 427 generator = email.Generator.Generator(fp, mangle_from_=True)
428 428 date = util.datestr(start_time, '%a %b %d %H:%M:%S %Y')
429 429 fp.write('From %s %s\n' % (sender_addr, date))
430 430 generator.flatten(m, 0)
431 431 fp.write('\n\n')
432 432 fp.close()
433 433 else:
434 434 if not sendmail:
435 435 sendmail = mail.connect(ui)
436 436 ui.status(_('Sending '), subj, ' ...\n')
437 437 # Exim does not remove the Bcc field
438 438 del m['Bcc']
439 439 fp = cStringIO.StringIO()
440 440 generator = email.Generator.Generator(fp, mangle_from_=False)
441 441 generator.flatten(m, 0)
442 442 sendmail(sender, to + bcc + cc, fp.getvalue())
443 443
444 444 emailopts = [
445 445 ('a', 'attach', None, _('send patches as attachments')),
446 446 ('i', 'inline', None, _('send patches as inline attachments')),
447 447 ('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
448 448 ('c', 'cc', [], _('email addresses of copy recipients')),
449 449 ('d', 'diffstat', None, _('add diffstat output to messages')),
450 450 ('', 'date', '', _('use the given date as the sending date')),
451 451 ('', 'desc', '', _('use the given file as the series description')),
452 452 ('f', 'from', '', _('email address of sender')),
453 453 ('n', 'test', None, _('print messages that would be sent')),
454 454 ('m', 'mbox', '',
455 455 _('write messages to mbox file instead of sending them')),
456 456 ('s', 'subject', '',
457 457 _('subject of first message (intro or single patch)')),
458 458 ('t', 'to', [], _('email addresses of recipients')),
459 459 ]
460 460
461 461
462 462 cmdtable = {
463 463 "email":
464 464 (patchbomb,
465 465 [('g', 'git', None, _('use git extended diff format')),
466 466 ('', 'plain', None, _('omit hg patch header')),
467 467 ('o', 'outgoing', None,
468 468 _('send changes not found in the target repository')),
469 469 ('b', 'bundle', None,
470 470 _('send changes not in target as a binary bundle')),
471 471 ('r', 'rev', [], _('a revision to send')),
472 472 ('', 'force', None,
473 473 _('run even when remote repository is unrelated (with -b)')),
474 474 ('', 'base', [],
475 475 _('a base changeset to specify instead of a destination (with -b)')),
476 476 ('', 'intro', None,
477 477 _('send an introduction email for a single patch')),
478 478 ] + emailopts + commands.remoteopts,
479 479 _('hg email [OPTION]... [DEST]...'))
480 480 }
@@ -1,598 +1,598 b''
1 1 # Patch transplanting extension for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 '''patch transplanting tool
9 9
10 10 This extension allows you to transplant patches from another branch.
11 11
12 12 Transplanted patches are recorded in .hg/transplant/transplants, as a map
13 13 from a changeset hash to its hash in the source repository.
14 14 '''
15 15
16 16 from mercurial.i18n import _
17 17 import os, tempfile
18 18 from mercurial import bundlerepo, changegroup, cmdutil, hg, merge
19 19 from mercurial import patch, revlog, util, error
20 20
21 21 class transplantentry:
22 22 def __init__(self, lnode, rnode):
23 23 self.lnode = lnode
24 24 self.rnode = rnode
25 25
26 26 class transplants:
27 27 def __init__(self, path=None, transplantfile=None, opener=None):
28 28 self.path = path
29 29 self.transplantfile = transplantfile
30 30 self.opener = opener
31 31
32 32 if not opener:
33 33 self.opener = util.opener(self.path)
34 34 self.transplants = []
35 35 self.dirty = False
36 36 self.read()
37 37
38 38 def read(self):
39 39 abspath = os.path.join(self.path, self.transplantfile)
40 40 if self.transplantfile and os.path.exists(abspath):
41 41 for line in self.opener(self.transplantfile).read().splitlines():
42 42 lnode, rnode = map(revlog.bin, line.split(':'))
43 43 self.transplants.append(transplantentry(lnode, rnode))
44 44
45 45 def write(self):
46 46 if self.dirty and self.transplantfile:
47 47 if not os.path.isdir(self.path):
48 48 os.mkdir(self.path)
49 49 fp = self.opener(self.transplantfile, 'w')
50 50 for c in self.transplants:
51 51 l, r = map(revlog.hex, (c.lnode, c.rnode))
52 52 fp.write(l + ':' + r + '\n')
53 53 fp.close()
54 54 self.dirty = False
55 55
56 56 def get(self, rnode):
57 57 return [t for t in self.transplants if t.rnode == rnode]
58 58
59 59 def set(self, lnode, rnode):
60 60 self.transplants.append(transplantentry(lnode, rnode))
61 61 self.dirty = True
62 62
63 63 def remove(self, transplant):
64 64 del self.transplants[self.transplants.index(transplant)]
65 65 self.dirty = True
66 66
67 67 class transplanter:
68 68 def __init__(self, ui, repo):
69 69 self.ui = ui
70 70 self.path = repo.join('transplant')
71 71 self.opener = util.opener(self.path)
72 72 self.transplants = transplants(self.path, 'transplants',
73 73 opener=self.opener)
74 74
75 75 def applied(self, repo, node, parent):
76 76 '''returns True if a node is already an ancestor of parent
77 77 or has already been transplanted'''
78 78 if hasnode(repo, node):
79 79 if node in repo.changelog.reachable(parent, stop=node):
80 80 return True
81 81 for t in self.transplants.get(node):
82 82 # it might have been stripped
83 83 if not hasnode(repo, t.lnode):
84 84 self.transplants.remove(t)
85 85 return False
86 86 if t.lnode in repo.changelog.reachable(parent, stop=t.lnode):
87 87 return True
88 88 return False
89 89
90 90 def apply(self, repo, source, revmap, merges, opts={}):
91 91 '''apply the revisions in revmap one by one in revision order'''
92 92 revs = util.sort(revmap)
93 93 p1, p2 = repo.dirstate.parents()
94 94 pulls = []
95 95 diffopts = patch.diffopts(self.ui, opts)
96 96 diffopts.git = True
97 97
98 98 lock = wlock = None
99 99 try:
100 100 wlock = repo.wlock()
101 101 lock = repo.lock()
102 102 for rev in revs:
103 103 node = revmap[rev]
104 104 revstr = '%s:%s' % (rev, revlog.short(node))
105 105
106 106 if self.applied(repo, node, p1):
107 107 self.ui.warn(_('skipping already applied revision %s\n') %
108 108 revstr)
109 109 continue
110 110
111 111 parents = source.changelog.parents(node)
112 112 if not opts.get('filter'):
113 113 # If the changeset parent is the same as the
114 114 # wdir's parent, just pull it.
115 115 if parents[0] == p1:
116 116 pulls.append(node)
117 117 p1 = node
118 118 continue
119 119 if pulls:
120 120 if source != repo:
121 121 repo.pull(source, heads=pulls)
122 122 merge.update(repo, pulls[-1], False, False, None)
123 123 p1, p2 = repo.dirstate.parents()
124 124 pulls = []
125 125
126 126 domerge = False
127 127 if node in merges:
128 128 # pulling all the merge revs at once would mean we
129 129 # couldn't transplant after the latest even if
130 130 # transplants before them fail.
131 131 domerge = True
132 132 if not hasnode(repo, node):
133 133 repo.pull(source, heads=[node])
134 134
135 135 if parents[1] != revlog.nullid:
136 136 self.ui.note(_('skipping merge changeset %s:%s\n')
137 137 % (rev, revlog.short(node)))
138 138 patchfile = None
139 139 else:
140 140 fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
141 141 fp = os.fdopen(fd, 'w')
142 142 gen = patch.diff(source, parents[0], node, opts=diffopts)
143 143 for chunk in gen:
144 144 fp.write(chunk)
145 145 fp.close()
146 146
147 147 del revmap[rev]
148 148 if patchfile or domerge:
149 149 try:
150 150 n = self.applyone(repo, node,
151 151 source.changelog.read(node),
152 152 patchfile, merge=domerge,
153 153 log=opts.get('log'),
154 154 filter=opts.get('filter'))
155 155 if n and domerge:
156 156 self.ui.status(_('%s merged at %s\n') % (revstr,
157 157 revlog.short(n)))
158 158 elif n:
159 159 self.ui.status(_('%s transplanted to %s\n')
160 160 % (revlog.short(node),
161 161 revlog.short(n)))
162 162 finally:
163 163 if patchfile:
164 164 os.unlink(patchfile)
165 165 if pulls:
166 166 repo.pull(source, heads=pulls)
167 167 merge.update(repo, pulls[-1], False, False, None)
168 168 finally:
169 169 self.saveseries(revmap, merges)
170 170 self.transplants.write()
171 171 del lock, wlock
172 172
173 173 def filter(self, filter, changelog, patchfile):
174 174 '''arbitrarily rewrite changeset before applying it'''
175 175
176 176 self.ui.status(_('filtering %s\n') % patchfile)
177 177 user, date, msg = (changelog[1], changelog[2], changelog[4])
178 178
179 179 fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
180 180 fp = os.fdopen(fd, 'w')
181 181 fp.write("# HG changeset patch\n")
182 182 fp.write("# User %s\n" % user)
183 183 fp.write("# Date %d %d\n" % date)
184 184 fp.write(changelog[4])
185 185 fp.close()
186 186
187 187 try:
188 188 util.system('%s %s %s' % (filter, util.shellquote(headerfile),
189 189 util.shellquote(patchfile)),
190 190 environ={'HGUSER': changelog[1]},
191 191 onerr=util.Abort, errprefix=_('filter failed'))
192 192 user, date, msg = self.parselog(file(headerfile))[1:4]
193 193 finally:
194 194 os.unlink(headerfile)
195 195
196 196 return (user, date, msg)
197 197
198 198 def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
199 199 filter=None):
200 200 '''apply the patch in patchfile to the repository as a transplant'''
201 201 (manifest, user, (time, timezone), files, message) = cl[:5]
202 202 date = "%d %d" % (time, timezone)
203 203 extra = {'transplant_source': node}
204 204 if filter:
205 205 (user, date, message) = self.filter(filter, cl, patchfile)
206 206
207 207 if log:
208 208 message += '\n(transplanted from %s)' % revlog.hex(node)
209 209
210 210 self.ui.status(_('applying %s\n') % revlog.short(node))
211 211 self.ui.note('%s %s\n%s\n' % (user, date, message))
212 212
213 213 if not patchfile and not merge:
214 214 raise util.Abort(_('can only omit patchfile if merging'))
215 215 if patchfile:
216 216 try:
217 217 files = {}
218 218 try:
219 fuzz = patch.patch(patchfile, self.ui, cwd=repo.root,
220 files=files)
219 patch.patch(patchfile, self.ui, cwd=repo.root,
220 files=files)
221 221 if not files:
222 222 self.ui.warn(_('%s: empty changeset')
223 223 % revlog.hex(node))
224 224 return None
225 225 finally:
226 226 files = patch.updatedir(self.ui, repo, files)
227 227 except Exception, inst:
228 228 if filter:
229 229 os.unlink(patchfile)
230 230 seriespath = os.path.join(self.path, 'series')
231 231 if os.path.exists(seriespath):
232 232 os.unlink(seriespath)
233 233 p1 = repo.dirstate.parents()[0]
234 234 p2 = node
235 235 self.log(user, date, message, p1, p2, merge=merge)
236 236 self.ui.write(str(inst) + '\n')
237 237 raise util.Abort(_('Fix up the merge and run '
238 238 'hg transplant --continue'))
239 239 else:
240 240 files = None
241 241 if merge:
242 242 p1, p2 = repo.dirstate.parents()
243 243 repo.dirstate.setparents(p1, node)
244 244
245 245 n = repo.commit(files, message, user, date, extra=extra)
246 246 if not merge:
247 247 self.transplants.set(n, node)
248 248
249 249 return n
250 250
251 251 def resume(self, repo, source, opts=None):
252 252 '''recover last transaction and apply remaining changesets'''
253 253 if os.path.exists(os.path.join(self.path, 'journal')):
254 254 n, node = self.recover(repo)
255 255 self.ui.status(_('%s transplanted as %s\n') % (revlog.short(node),
256 256 revlog.short(n)))
257 257 seriespath = os.path.join(self.path, 'series')
258 258 if not os.path.exists(seriespath):
259 259 self.transplants.write()
260 260 return
261 261 nodes, merges = self.readseries()
262 262 revmap = {}
263 263 for n in nodes:
264 264 revmap[source.changelog.rev(n)] = n
265 265 os.unlink(seriespath)
266 266
267 267 self.apply(repo, source, revmap, merges, opts)
268 268
269 269 def recover(self, repo):
270 270 '''commit working directory using journal metadata'''
271 271 node, user, date, message, parents = self.readlog()
272 272 merge = len(parents) == 2
273 273
274 274 if not user or not date or not message or not parents[0]:
275 275 raise util.Abort(_('transplant log file is corrupt'))
276 276
277 277 extra = {'transplant_source': node}
278 278 wlock = repo.wlock()
279 279 try:
280 280 p1, p2 = repo.dirstate.parents()
281 281 if p1 != parents[0]:
282 282 raise util.Abort(
283 283 _('working dir not at transplant parent %s') %
284 284 revlog.hex(parents[0]))
285 285 if merge:
286 286 repo.dirstate.setparents(p1, parents[1])
287 287 n = repo.commit(None, message, user, date, extra=extra)
288 288 if not n:
289 289 raise util.Abort(_('commit failed'))
290 290 if not merge:
291 291 self.transplants.set(n, node)
292 292 self.unlog()
293 293
294 294 return n, node
295 295 finally:
296 296 del wlock
297 297
298 298 def readseries(self):
299 299 nodes = []
300 300 merges = []
301 301 cur = nodes
302 302 for line in self.opener('series').read().splitlines():
303 303 if line.startswith('# Merges'):
304 304 cur = merges
305 305 continue
306 306 cur.append(revlog.bin(line))
307 307
308 308 return (nodes, merges)
309 309
310 310 def saveseries(self, revmap, merges):
311 311 if not revmap:
312 312 return
313 313
314 314 if not os.path.isdir(self.path):
315 315 os.mkdir(self.path)
316 316 series = self.opener('series', 'w')
317 317 for rev in util.sort(revmap):
318 318 series.write(revlog.hex(revmap[rev]) + '\n')
319 319 if merges:
320 320 series.write('# Merges\n')
321 321 for m in merges:
322 322 series.write(revlog.hex(m) + '\n')
323 323 series.close()
324 324
325 325 def parselog(self, fp):
326 326 parents = []
327 327 message = []
328 328 node = revlog.nullid
329 329 inmsg = False
330 330 for line in fp.read().splitlines():
331 331 if inmsg:
332 332 message.append(line)
333 333 elif line.startswith('# User '):
334 334 user = line[7:]
335 335 elif line.startswith('# Date '):
336 336 date = line[7:]
337 337 elif line.startswith('# Node ID '):
338 338 node = revlog.bin(line[10:])
339 339 elif line.startswith('# Parent '):
340 340 parents.append(revlog.bin(line[9:]))
341 341 elif not line.startswith('#'):
342 342 inmsg = True
343 343 message.append(line)
344 344 return (node, user, date, '\n'.join(message), parents)
345 345
346 346 def log(self, user, date, message, p1, p2, merge=False):
347 347 '''journal changelog metadata for later recover'''
348 348
349 349 if not os.path.isdir(self.path):
350 350 os.mkdir(self.path)
351 351 fp = self.opener('journal', 'w')
352 352 fp.write('# User %s\n' % user)
353 353 fp.write('# Date %s\n' % date)
354 354 fp.write('# Node ID %s\n' % revlog.hex(p2))
355 355 fp.write('# Parent ' + revlog.hex(p1) + '\n')
356 356 if merge:
357 357 fp.write('# Parent ' + revlog.hex(p2) + '\n')
358 358 fp.write(message.rstrip() + '\n')
359 359 fp.close()
360 360
361 361 def readlog(self):
362 362 return self.parselog(self.opener('journal'))
363 363
364 364 def unlog(self):
365 365 '''remove changelog journal'''
366 366 absdst = os.path.join(self.path, 'journal')
367 367 if os.path.exists(absdst):
368 368 os.unlink(absdst)
369 369
370 370 def transplantfilter(self, repo, source, root):
371 371 def matchfn(node):
372 372 if self.applied(repo, node, root):
373 373 return False
374 374 if source.changelog.parents(node)[1] != revlog.nullid:
375 375 return False
376 376 extra = source.changelog.read(node)[5]
377 377 cnode = extra.get('transplant_source')
378 378 if cnode and self.applied(repo, cnode, root):
379 379 return False
380 380 return True
381 381
382 382 return matchfn
383 383
384 384 def hasnode(repo, node):
385 385 try:
386 386 return repo.changelog.rev(node) != None
387 387 except error.RevlogError:
388 388 return False
389 389
390 390 def browserevs(ui, repo, nodes, opts):
391 391 '''interactively transplant changesets'''
392 392 def browsehelp(ui):
393 393 ui.write('y: transplant this changeset\n'
394 394 'n: skip this changeset\n'
395 395 'm: merge at this changeset\n'
396 396 'p: show patch\n'
397 397 'c: commit selected changesets\n'
398 398 'q: cancel transplant\n'
399 399 '?: show this help\n')
400 400
401 401 displayer = cmdutil.show_changeset(ui, repo, opts)
402 402 transplants = []
403 403 merges = []
404 404 for node in nodes:
405 405 displayer.show(repo[node])
406 406 action = None
407 407 while not action:
408 408 action = ui.prompt(_('apply changeset? [ynmpcq?]:'))
409 409 if action == '?':
410 410 browsehelp(ui)
411 411 action = None
412 412 elif action == 'p':
413 413 parent = repo.changelog.parents(node)[0]
414 414 for chunk in patch.diff(repo, parent, node):
415 415 repo.ui.write(chunk)
416 416 action = None
417 417 elif action not in ('y', 'n', 'm', 'c', 'q'):
418 418 ui.write('no such option\n')
419 419 action = None
420 420 if action == 'y':
421 421 transplants.append(node)
422 422 elif action == 'm':
423 423 merges.append(node)
424 424 elif action == 'c':
425 425 break
426 426 elif action == 'q':
427 427 transplants = ()
428 428 merges = ()
429 429 break
430 430 return (transplants, merges)
431 431
432 432 def transplant(ui, repo, *revs, **opts):
433 433 '''transplant changesets from another branch
434 434
435 435 Selected changesets will be applied on top of the current working
436 436 directory with the log of the original changeset. If --log is
437 437 specified, log messages will have a comment appended of the form:
438 438
439 439 (transplanted from CHANGESETHASH)
440 440
441 441 You can rewrite the changelog message with the --filter option.
442 442 Its argument will be invoked with the current changelog message
443 443 as $1 and the patch as $2.
444 444
445 445 If --source is specified, selects changesets from the named
446 446 repository. If --branch is specified, selects changesets from the
447 447 branch holding the named revision, up to that revision. If --all
448 448 is specified, all changesets on the branch will be transplanted,
449 449 otherwise you will be prompted to select the changesets you want.
450 450
451 451 hg transplant --branch REVISION --all will rebase the selected branch
452 452 (up to the named revision) onto your current working directory.
453 453
454 454 You can optionally mark selected transplanted changesets as
455 455 merge changesets. You will not be prompted to transplant any
456 456 ancestors of a merged transplant, and you can merge descendants
457 457 of them normally instead of transplanting them.
458 458
459 459 If no merges or revisions are provided, hg transplant will start
460 460 an interactive changeset browser.
461 461
462 462 If a changeset application fails, you can fix the merge by hand and
463 463 then resume where you left off by calling hg transplant --continue.
464 464 '''
465 465 def getremotechanges(repo, url):
466 466 sourcerepo = ui.expandpath(url)
467 467 source = hg.repository(ui, sourcerepo)
468 468 common, incoming, rheads = repo.findcommonincoming(source, force=True)
469 469 if not incoming:
470 470 return (source, None, None)
471 471
472 472 bundle = None
473 473 if not source.local():
474 474 if source.capable('changegroupsubset'):
475 475 cg = source.changegroupsubset(incoming, rheads, 'incoming')
476 476 else:
477 477 cg = source.changegroup(incoming, 'incoming')
478 478 bundle = changegroup.writebundle(cg, None, 'HG10UN')
479 479 source = bundlerepo.bundlerepository(ui, repo.root, bundle)
480 480
481 481 return (source, incoming, bundle)
482 482
483 483 def incwalk(repo, incoming, branches, match=util.always):
484 484 if not branches:
485 485 branches=None
486 486 for node in repo.changelog.nodesbetween(incoming, branches)[0]:
487 487 if match(node):
488 488 yield node
489 489
490 490 def transplantwalk(repo, root, branches, match=util.always):
491 491 if not branches:
492 492 branches = repo.heads()
493 493 ancestors = []
494 494 for branch in branches:
495 495 ancestors.append(repo.changelog.ancestor(root, branch))
496 496 for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
497 497 if match(node):
498 498 yield node
499 499
500 500 def checkopts(opts, revs):
501 501 if opts.get('continue'):
502 502 if filter(lambda opt: opts.get(opt), ('branch', 'all', 'merge')):
503 503 raise util.Abort(_('--continue is incompatible with '
504 504 'branch, all or merge'))
505 505 return
506 506 if not (opts.get('source') or revs or
507 507 opts.get('merge') or opts.get('branch')):
508 508 raise util.Abort(_('no source URL, branch tag or revision '
509 509 'list provided'))
510 510 if opts.get('all'):
511 511 if not opts.get('branch'):
512 512 raise util.Abort(_('--all requires a branch revision'))
513 513 if revs:
514 514 raise util.Abort(_('--all is incompatible with a '
515 515 'revision list'))
516 516
517 517 checkopts(opts, revs)
518 518
519 519 if not opts.get('log'):
520 520 opts['log'] = ui.config('transplant', 'log')
521 521 if not opts.get('filter'):
522 522 opts['filter'] = ui.config('transplant', 'filter')
523 523
524 524 tp = transplanter(ui, repo)
525 525
526 526 p1, p2 = repo.dirstate.parents()
527 527 if p1 == revlog.nullid:
528 528 raise util.Abort(_('no revision checked out'))
529 529 if not opts.get('continue'):
530 530 if p2 != revlog.nullid:
531 531 raise util.Abort(_('outstanding uncommitted merges'))
532 532 m, a, r, d = repo.status()[:4]
533 533 if m or a or r or d:
534 534 raise util.Abort(_('outstanding local changes'))
535 535
536 536 bundle = None
537 537 source = opts.get('source')
538 538 if source:
539 539 (source, incoming, bundle) = getremotechanges(repo, source)
540 540 else:
541 541 source = repo
542 542
543 543 try:
544 544 if opts.get('continue'):
545 545 tp.resume(repo, source, opts)
546 546 return
547 547
548 548 tf=tp.transplantfilter(repo, source, p1)
549 549 if opts.get('prune'):
550 550 prune = [source.lookup(r)
551 551 for r in cmdutil.revrange(source, opts.get('prune'))]
552 552 matchfn = lambda x: tf(x) and x not in prune
553 553 else:
554 554 matchfn = tf
555 555 branches = map(source.lookup, opts.get('branch', ()))
556 556 merges = map(source.lookup, opts.get('merge', ()))
557 557 revmap = {}
558 558 if revs:
559 559 for r in cmdutil.revrange(source, revs):
560 560 revmap[int(r)] = source.lookup(r)
561 561 elif opts.get('all') or not merges:
562 562 if source != repo:
563 563 alltransplants = incwalk(source, incoming, branches,
564 564 match=matchfn)
565 565 else:
566 566 alltransplants = transplantwalk(source, p1, branches,
567 567 match=matchfn)
568 568 if opts.get('all'):
569 569 revs = alltransplants
570 570 else:
571 571 revs, newmerges = browserevs(ui, source, alltransplants, opts)
572 572 merges.extend(newmerges)
573 573 for r in revs:
574 574 revmap[source.changelog.rev(r)] = r
575 575 for r in merges:
576 576 revmap[source.changelog.rev(r)] = r
577 577
578 578 tp.apply(repo, source, revmap, merges, opts)
579 579 finally:
580 580 if bundle:
581 581 source.close()
582 582 os.unlink(bundle)
583 583
584 584 cmdtable = {
585 585 "transplant":
586 586 (transplant,
587 587 [('s', 'source', '', _('pull patches from REPOSITORY')),
588 588 ('b', 'branch', [], _('pull patches from branch BRANCH')),
589 589 ('a', 'all', None, _('pull all changesets up to BRANCH')),
590 590 ('p', 'prune', [], _('skip over REV')),
591 591 ('m', 'merge', [], _('merge at REV')),
592 592 ('', 'log', None, _('append transplant info to log message')),
593 593 ('c', 'continue', None, _('continue last transplant session '
594 594 'after repair')),
595 595 ('', 'filter', '', _('filter changesets through FILTER'))],
596 596 _('hg transplant [-s REPOSITORY] [-b BRANCH [-a]] [-p REV] '
597 597 '[-m REV] [REV]...'))
598 598 }
@@ -1,1571 +1,1570 b''
1 1 """ Multicast DNS Service Discovery for Python, v0.12
2 2 Copyright (C) 2003, Paul Scott-Murphy
3 3
4 4 This module provides a framework for the use of DNS Service Discovery
5 5 using IP multicast. It has been tested against the JRendezvous
6 6 implementation from <a href="http://strangeberry.com">StrangeBerry</a>,
7 7 and against the mDNSResponder from Mac OS X 10.3.8.
8 8
9 9 This library is free software; you can redistribute it and/or
10 10 modify it under the terms of the GNU Lesser General Public
11 11 License as published by the Free Software Foundation; either
12 12 version 2.1 of the License, or (at your option) any later version.
13 13
14 14 This library is distributed in the hope that it will be useful,
15 15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 17 Lesser General Public License for more details.
18 18
19 19 You should have received a copy of the GNU Lesser General Public
20 20 License along with this library; if not, write to the Free Software
21 21 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 22
23 23 """
24 24
25 25 """0.12 update - allow selection of binding interface
26 26 typo fix - Thanks A. M. Kuchlingi
27 27 removed all use of word 'Rendezvous' - this is an API change"""
28 28
29 29 """0.11 update - correction to comments for addListener method
30 30 support for new record types seen from OS X
31 31 - IPv6 address
32 32 - hostinfo
33 33 ignore unknown DNS record types
34 34 fixes to name decoding
35 35 works alongside other processes using port 5353 (e.g. on Mac OS X)
36 36 tested against Mac OS X 10.3.2's mDNSResponder
37 37 corrections to removal of list entries for service browser"""
38 38
39 39 """0.10 update - Jonathon Paisley contributed these corrections:
40 40 always multicast replies, even when query is unicast
41 41 correct a pointer encoding problem
42 42 can now write records in any order
43 43 traceback shown on failure
44 44 better TXT record parsing
45 45 server is now separate from name
46 46 can cancel a service browser
47 47
48 48 modified some unit tests to accommodate these changes"""
49 49
50 50 """0.09 update - remove all records on service unregistration
51 51 fix DOS security problem with readName"""
52 52
53 53 """0.08 update - changed licensing to LGPL"""
54 54
55 55 """0.07 update - faster shutdown on engine
56 56 pointer encoding of outgoing names
57 57 ServiceBrowser now works
58 58 new unit tests"""
59 59
60 60 """0.06 update - small improvements with unit tests
61 61 added defined exception types
62 62 new style objects
63 63 fixed hostname/interface problem
64 64 fixed socket timeout problem
65 65 fixed addServiceListener() typo bug
66 66 using select() for socket reads
67 67 tested on Debian unstable with Python 2.2.2"""
68 68
69 69 """0.05 update - ensure case insensitivty on domain names
70 70 support for unicast DNS queries"""
71 71
72 72 """0.04 update - added some unit tests
73 73 added __ne__ adjuncts where required
74 74 ensure names end in '.local.'
75 75 timeout on receiving socket for clean shutdown"""
76 76
77 77 __author__ = "Paul Scott-Murphy"
78 78 __email__ = "paul at scott dash murphy dot com"
79 79 __version__ = "0.12"
80 80
81 81 import string
82 82 import time
83 83 import struct
84 84 import socket
85 85 import threading
86 86 import select
87 87 import traceback
88 88
89 89 __all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
90 90
91 91 # hook for threads
92 92
93 93 globals()['_GLOBAL_DONE'] = 0
94 94
95 95 # Some timing constants
96 96
97 97 _UNREGISTER_TIME = 125
98 98 _CHECK_TIME = 175
99 99 _REGISTER_TIME = 225
100 100 _LISTENER_TIME = 200
101 101 _BROWSER_TIME = 500
102 102
103 103 # Some DNS constants
104 104
105 105 _MDNS_ADDR = '224.0.0.251'
106 106 _MDNS_PORT = 5353;
107 107 _DNS_PORT = 53;
108 108 _DNS_TTL = 60 * 60; # one hour default TTL
109 109
110 110 _MAX_MSG_TYPICAL = 1460 # unused
111 111 _MAX_MSG_ABSOLUTE = 8972
112 112
113 113 _FLAGS_QR_MASK = 0x8000 # query response mask
114 114 _FLAGS_QR_QUERY = 0x0000 # query
115 115 _FLAGS_QR_RESPONSE = 0x8000 # response
116 116
117 117 _FLAGS_AA = 0x0400 # Authorative answer
118 118 _FLAGS_TC = 0x0200 # Truncated
119 119 _FLAGS_RD = 0x0100 # Recursion desired
120 120 _FLAGS_RA = 0x8000 # Recursion available
121 121
122 122 _FLAGS_Z = 0x0040 # Zero
123 123 _FLAGS_AD = 0x0020 # Authentic data
124 124 _FLAGS_CD = 0x0010 # Checking disabled
125 125
126 126 _CLASS_IN = 1
127 127 _CLASS_CS = 2
128 128 _CLASS_CH = 3
129 129 _CLASS_HS = 4
130 130 _CLASS_NONE = 254
131 131 _CLASS_ANY = 255
132 132 _CLASS_MASK = 0x7FFF
133 133 _CLASS_UNIQUE = 0x8000
134 134
135 135 _TYPE_A = 1
136 136 _TYPE_NS = 2
137 137 _TYPE_MD = 3
138 138 _TYPE_MF = 4
139 139 _TYPE_CNAME = 5
140 140 _TYPE_SOA = 6
141 141 _TYPE_MB = 7
142 142 _TYPE_MG = 8
143 143 _TYPE_MR = 9
144 144 _TYPE_NULL = 10
145 145 _TYPE_WKS = 11
146 146 _TYPE_PTR = 12
147 147 _TYPE_HINFO = 13
148 148 _TYPE_MINFO = 14
149 149 _TYPE_MX = 15
150 150 _TYPE_TXT = 16
151 151 _TYPE_AAAA = 28
152 152 _TYPE_SRV = 33
153 153 _TYPE_ANY = 255
154 154
155 155 # Mapping constants to names
156 156
157 157 _CLASSES = { _CLASS_IN : "in",
158 158 _CLASS_CS : "cs",
159 159 _CLASS_CH : "ch",
160 160 _CLASS_HS : "hs",
161 161 _CLASS_NONE : "none",
162 162 _CLASS_ANY : "any" }
163 163
164 164 _TYPES = { _TYPE_A : "a",
165 165 _TYPE_NS : "ns",
166 166 _TYPE_MD : "md",
167 167 _TYPE_MF : "mf",
168 168 _TYPE_CNAME : "cname",
169 169 _TYPE_SOA : "soa",
170 170 _TYPE_MB : "mb",
171 171 _TYPE_MG : "mg",
172 172 _TYPE_MR : "mr",
173 173 _TYPE_NULL : "null",
174 174 _TYPE_WKS : "wks",
175 175 _TYPE_PTR : "ptr",
176 176 _TYPE_HINFO : "hinfo",
177 177 _TYPE_MINFO : "minfo",
178 178 _TYPE_MX : "mx",
179 179 _TYPE_TXT : "txt",
180 180 _TYPE_AAAA : "quada",
181 181 _TYPE_SRV : "srv",
182 182 _TYPE_ANY : "any" }
183 183
184 184 # utility functions
185 185
186 186 def currentTimeMillis():
187 187 """Current system time in milliseconds"""
188 188 return time.time() * 1000
189 189
190 190 # Exceptions
191 191
192 192 class NonLocalNameException(Exception):
193 193 pass
194 194
195 195 class NonUniqueNameException(Exception):
196 196 pass
197 197
198 198 class NamePartTooLongException(Exception):
199 199 pass
200 200
201 201 class AbstractMethodException(Exception):
202 202 pass
203 203
204 204 class BadTypeInNameException(Exception):
205 205 pass
206 206
207 207 # implementation classes
208 208
209 209 class DNSEntry(object):
210 210 """A DNS entry"""
211 211
212 212 def __init__(self, name, type, clazz):
213 213 self.key = string.lower(name)
214 214 self.name = name
215 215 self.type = type
216 216 self.clazz = clazz & _CLASS_MASK
217 217 self.unique = (clazz & _CLASS_UNIQUE) != 0
218 218
219 219 def __eq__(self, other):
220 220 """Equality test on name, type, and class"""
221 221 if isinstance(other, DNSEntry):
222 222 return self.name == other.name and self.type == other.type and self.clazz == other.clazz
223 223 return 0
224 224
225 225 def __ne__(self, other):
226 226 """Non-equality test"""
227 227 return not self.__eq__(other)
228 228
229 229 def getClazz(self, clazz):
230 230 """Class accessor"""
231 231 try:
232 232 return _CLASSES[clazz]
233 233 except:
234 234 return "?(%s)" % (clazz)
235 235
236 236 def getType(self, type):
237 237 """Type accessor"""
238 238 try:
239 239 return _TYPES[type]
240 240 except:
241 241 return "?(%s)" % (type)
242 242
243 243 def toString(self, hdr, other):
244 244 """String representation with additional information"""
245 245 result = "%s[%s,%s" % (hdr, self.getType(self.type), self.getClazz(self.clazz))
246 246 if self.unique:
247 247 result += "-unique,"
248 248 else:
249 249 result += ","
250 250 result += self.name
251 251 if other is not None:
252 252 result += ",%s]" % (other)
253 253 else:
254 254 result += "]"
255 255 return result
256 256
257 257 class DNSQuestion(DNSEntry):
258 258 """A DNS question entry"""
259 259
260 260 def __init__(self, name, type, clazz):
261 261 if not name.endswith(".local."):
262 262 raise NonLocalNameException
263 263 DNSEntry.__init__(self, name, type, clazz)
264 264
265 265 def answeredBy(self, rec):
266 266 """Returns true if the question is answered by the record"""
267 267 return self.clazz == rec.clazz and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name
268 268
269 269 def __repr__(self):
270 270 """String representation"""
271 271 return DNSEntry.toString(self, "question", None)
272 272
273 273
274 274 class DNSRecord(DNSEntry):
275 275 """A DNS record - like a DNS entry, but has a TTL"""
276 276
277 277 def __init__(self, name, type, clazz, ttl):
278 278 DNSEntry.__init__(self, name, type, clazz)
279 279 self.ttl = ttl
280 280 self.created = currentTimeMillis()
281 281
282 282 def __eq__(self, other):
283 283 """Tests equality as per DNSRecord"""
284 284 if isinstance(other, DNSRecord):
285 285 return DNSEntry.__eq__(self, other)
286 286 return 0
287 287
288 288 def suppressedBy(self, msg):
289 289 """Returns true if any answer in a message can suffice for the
290 290 information held in this record."""
291 291 for record in msg.answers:
292 292 if self.suppressedByAnswer(record):
293 293 return 1
294 294 return 0
295 295
296 296 def suppressedByAnswer(self, other):
297 297 """Returns true if another record has same name, type and class,
298 298 and if its TTL is at least half of this record's."""
299 299 if self == other and other.ttl > (self.ttl / 2):
300 300 return 1
301 301 return 0
302 302
303 303 def getExpirationTime(self, percent):
304 304 """Returns the time at which this record will have expired
305 305 by a certain percentage."""
306 306 return self.created + (percent * self.ttl * 10)
307 307
308 308 def getRemainingTTL(self, now):
309 309 """Returns the remaining TTL in seconds."""
310 310 return max(0, (self.getExpirationTime(100) - now) / 1000)
311 311
312 312 def isExpired(self, now):
313 313 """Returns true if this record has expired."""
314 314 return self.getExpirationTime(100) <= now
315 315
316 316 def isStale(self, now):
317 317 """Returns true if this record is at least half way expired."""
318 318 return self.getExpirationTime(50) <= now
319 319
320 320 def resetTTL(self, other):
321 321 """Sets this record's TTL and created time to that of
322 322 another record."""
323 323 self.created = other.created
324 324 self.ttl = other.ttl
325 325
326 326 def write(self, out):
327 327 """Abstract method"""
328 328 raise AbstractMethodException
329 329
330 330 def toString(self, other):
331 331 """String representation with addtional information"""
332 332 arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)
333 333 return DNSEntry.toString(self, "record", arg)
334 334
335 335 class DNSAddress(DNSRecord):
336 336 """A DNS address record"""
337 337
338 338 def __init__(self, name, type, clazz, ttl, address):
339 339 DNSRecord.__init__(self, name, type, clazz, ttl)
340 340 self.address = address
341 341
342 342 def write(self, out):
343 343 """Used in constructing an outgoing packet"""
344 344 out.writeString(self.address, len(self.address))
345 345
346 346 def __eq__(self, other):
347 347 """Tests equality on address"""
348 348 if isinstance(other, DNSAddress):
349 349 return self.address == other.address
350 350 return 0
351 351
352 352 def __repr__(self):
353 353 """String representation"""
354 354 try:
355 355 return socket.inet_ntoa(self.address)
356 356 except:
357 357 return self.address
358 358
359 359 class DNSHinfo(DNSRecord):
360 360 """A DNS host information record"""
361 361
362 362 def __init__(self, name, type, clazz, ttl, cpu, os):
363 363 DNSRecord.__init__(self, name, type, clazz, ttl)
364 364 self.cpu = cpu
365 365 self.os = os
366 366
367 367 def write(self, out):
368 368 """Used in constructing an outgoing packet"""
369 369 out.writeString(self.cpu, len(self.cpu))
370 370 out.writeString(self.os, len(self.os))
371 371
372 372 def __eq__(self, other):
373 373 """Tests equality on cpu and os"""
374 374 if isinstance(other, DNSHinfo):
375 375 return self.cpu == other.cpu and self.os == other.os
376 376 return 0
377 377
378 378 def __repr__(self):
379 379 """String representation"""
380 380 return self.cpu + " " + self.os
381 381
382 382 class DNSPointer(DNSRecord):
383 383 """A DNS pointer record"""
384 384
385 385 def __init__(self, name, type, clazz, ttl, alias):
386 386 DNSRecord.__init__(self, name, type, clazz, ttl)
387 387 self.alias = alias
388 388
389 389 def write(self, out):
390 390 """Used in constructing an outgoing packet"""
391 391 out.writeName(self.alias)
392 392
393 393 def __eq__(self, other):
394 394 """Tests equality on alias"""
395 395 if isinstance(other, DNSPointer):
396 396 return self.alias == other.alias
397 397 return 0
398 398
399 399 def __repr__(self):
400 400 """String representation"""
401 401 return self.toString(self.alias)
402 402
403 403 class DNSText(DNSRecord):
404 404 """A DNS text record"""
405 405
406 406 def __init__(self, name, type, clazz, ttl, text):
407 407 DNSRecord.__init__(self, name, type, clazz, ttl)
408 408 self.text = text
409 409
410 410 def write(self, out):
411 411 """Used in constructing an outgoing packet"""
412 412 out.writeString(self.text, len(self.text))
413 413
414 414 def __eq__(self, other):
415 415 """Tests equality on text"""
416 416 if isinstance(other, DNSText):
417 417 return self.text == other.text
418 418 return 0
419 419
420 420 def __repr__(self):
421 421 """String representation"""
422 422 if len(self.text) > 10:
423 423 return self.toString(self.text[:7] + "...")
424 424 else:
425 425 return self.toString(self.text)
426 426
427 427 class DNSService(DNSRecord):
428 428 """A DNS service record"""
429 429
430 430 def __init__(self, name, type, clazz, ttl, priority, weight, port, server):
431 431 DNSRecord.__init__(self, name, type, clazz, ttl)
432 432 self.priority = priority
433 433 self.weight = weight
434 434 self.port = port
435 435 self.server = server
436 436
437 437 def write(self, out):
438 438 """Used in constructing an outgoing packet"""
439 439 out.writeShort(self.priority)
440 440 out.writeShort(self.weight)
441 441 out.writeShort(self.port)
442 442 out.writeName(self.server)
443 443
444 444 def __eq__(self, other):
445 445 """Tests equality on priority, weight, port and server"""
446 446 if isinstance(other, DNSService):
447 447 return self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server
448 448 return 0
449 449
450 450 def __repr__(self):
451 451 """String representation"""
452 452 return self.toString("%s:%s" % (self.server, self.port))
453 453
454 454 class DNSIncoming(object):
455 455 """Object representation of an incoming DNS packet"""
456 456
457 457 def __init__(self, data):
458 458 """Constructor from string holding bytes of packet"""
459 459 self.offset = 0
460 460 self.data = data
461 461 self.questions = []
462 462 self.answers = []
463 463 self.numQuestions = 0
464 464 self.numAnswers = 0
465 465 self.numAuthorities = 0
466 466 self.numAdditionals = 0
467 467
468 468 self.readHeader()
469 469 self.readQuestions()
470 470 self.readOthers()
471 471
472 472 def readHeader(self):
473 473 """Reads header portion of packet"""
474 474 format = '!HHHHHH'
475 475 length = struct.calcsize(format)
476 476 info = struct.unpack(format, self.data[self.offset:self.offset+length])
477 477 self.offset += length
478 478
479 479 self.id = info[0]
480 480 self.flags = info[1]
481 481 self.numQuestions = info[2]
482 482 self.numAnswers = info[3]
483 483 self.numAuthorities = info[4]
484 484 self.numAdditionals = info[5]
485 485
486 486 def readQuestions(self):
487 487 """Reads questions section of packet"""
488 488 format = '!HH'
489 489 length = struct.calcsize(format)
490 490 for i in range(0, self.numQuestions):
491 491 name = self.readName()
492 492 info = struct.unpack(format, self.data[self.offset:self.offset+length])
493 493 self.offset += length
494 494
495 495 question = DNSQuestion(name, info[0], info[1])
496 496 self.questions.append(question)
497 497
498 498 def readInt(self):
499 499 """Reads an integer from the packet"""
500 500 format = '!I'
501 501 length = struct.calcsize(format)
502 502 info = struct.unpack(format, self.data[self.offset:self.offset+length])
503 503 self.offset += length
504 504 return info[0]
505 505
506 506 def readCharacterString(self):
507 507 """Reads a character string from the packet"""
508 508 length = ord(self.data[self.offset])
509 509 self.offset += 1
510 510 return self.readString(length)
511 511
512 512 def readString(self, len):
513 513 """Reads a string of a given length from the packet"""
514 514 format = '!' + str(len) + 's'
515 515 length = struct.calcsize(format)
516 516 info = struct.unpack(format, self.data[self.offset:self.offset+length])
517 517 self.offset += length
518 518 return info[0]
519 519
520 520 def readUnsignedShort(self):
521 521 """Reads an unsigned short from the packet"""
522 522 format = '!H'
523 523 length = struct.calcsize(format)
524 524 info = struct.unpack(format, self.data[self.offset:self.offset+length])
525 525 self.offset += length
526 526 return info[0]
527 527
528 528 def readOthers(self):
529 529 """Reads the answers, authorities and additionals section of the packet"""
530 530 format = '!HHiH'
531 531 length = struct.calcsize(format)
532 532 n = self.numAnswers + self.numAuthorities + self.numAdditionals
533 533 for i in range(0, n):
534 534 domain = self.readName()
535 535 info = struct.unpack(format, self.data[self.offset:self.offset+length])
536 536 self.offset += length
537 537
538 538 rec = None
539 539 if info[0] == _TYPE_A:
540 540 rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(4))
541 541 elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
542 542 rec = DNSPointer(domain, info[0], info[1], info[2], self.readName())
543 543 elif info[0] == _TYPE_TXT:
544 544 rec = DNSText(domain, info[0], info[1], info[2], self.readString(info[3]))
545 545 elif info[0] == _TYPE_SRV:
546 546 rec = DNSService(domain, info[0], info[1], info[2], self.readUnsignedShort(), self.readUnsignedShort(), self.readUnsignedShort(), self.readName())
547 547 elif info[0] == _TYPE_HINFO:
548 548 rec = DNSHinfo(domain, info[0], info[1], info[2], self.readCharacterString(), self.readCharacterString())
549 549 elif info[0] == _TYPE_AAAA:
550 550 rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(16))
551 551 else:
552 552 # Try to ignore types we don't know about
553 553 # this may mean the rest of the name is
554 554 # unable to be parsed, and may show errors
555 555 # so this is left for debugging. New types
556 556 # encountered need to be parsed properly.
557 557 #
558 558 #print "UNKNOWN TYPE = " + str(info[0])
559 559 #raise BadTypeInNameException
560 560 pass
561 561
562 562 if rec is not None:
563 563 self.answers.append(rec)
564 564
565 565 def isQuery(self):
566 566 """Returns true if this is a query"""
567 567 return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
568 568
569 569 def isResponse(self):
570 570 """Returns true if this is a response"""
571 571 return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
572 572
573 573 def readUTF(self, offset, len):
574 574 """Reads a UTF-8 string of a given length from the packet"""
575 575 result = self.data[offset:offset+len].decode('utf-8')
576 576 return result
577 577
578 578 def readName(self):
579 579 """Reads a domain name from the packet"""
580 580 result = ''
581 581 off = self.offset
582 582 next = -1
583 583 first = off
584 584
585 585 while 1:
586 586 len = ord(self.data[off])
587 587 off += 1
588 588 if len == 0:
589 589 break
590 590 t = len & 0xC0
591 591 if t == 0x00:
592 592 result = ''.join((result, self.readUTF(off, len) + '.'))
593 593 off += len
594 594 elif t == 0xC0:
595 595 if next < 0:
596 596 next = off + 1
597 597 off = ((len & 0x3F) << 8) | ord(self.data[off])
598 598 if off >= first:
599 599 raise "Bad domain name (circular) at " + str(off)
600 600 first = off
601 601 else:
602 602 raise "Bad domain name at " + str(off)
603 603
604 604 if next >= 0:
605 605 self.offset = next
606 606 else:
607 607 self.offset = off
608 608
609 609 return result
610 610
611 611
612 612 class DNSOutgoing(object):
613 613 """Object representation of an outgoing packet"""
614 614
615 615 def __init__(self, flags, multicast = 1):
616 616 self.finished = 0
617 617 self.id = 0
618 618 self.multicast = multicast
619 619 self.flags = flags
620 620 self.names = {}
621 621 self.data = []
622 622 self.size = 12
623 623
624 624 self.questions = []
625 625 self.answers = []
626 626 self.authorities = []
627 627 self.additionals = []
628 628
629 629 def addQuestion(self, record):
630 630 """Adds a question"""
631 631 self.questions.append(record)
632 632
633 633 def addAnswer(self, inp, record):
634 634 """Adds an answer"""
635 635 if not record.suppressedBy(inp):
636 636 self.addAnswerAtTime(record, 0)
637 637
638 638 def addAnswerAtTime(self, record, now):
639 639 """Adds an answer if if does not expire by a certain time"""
640 640 if record is not None:
641 641 if now == 0 or not record.isExpired(now):
642 642 self.answers.append((record, now))
643 643
644 644 def addAuthorativeAnswer(self, record):
645 645 """Adds an authoritative answer"""
646 646 self.authorities.append(record)
647 647
648 648 def addAdditionalAnswer(self, record):
649 649 """Adds an additional answer"""
650 650 self.additionals.append(record)
651 651
652 652 def writeByte(self, value):
653 653 """Writes a single byte to the packet"""
654 654 format = '!c'
655 655 self.data.append(struct.pack(format, chr(value)))
656 656 self.size += 1
657 657
658 658 def insertShort(self, index, value):
659 659 """Inserts an unsigned short in a certain position in the packet"""
660 660 format = '!H'
661 661 self.data.insert(index, struct.pack(format, value))
662 662 self.size += 2
663 663
664 664 def writeShort(self, value):
665 665 """Writes an unsigned short to the packet"""
666 666 format = '!H'
667 667 self.data.append(struct.pack(format, value))
668 668 self.size += 2
669 669
670 670 def writeInt(self, value):
671 671 """Writes an unsigned integer to the packet"""
672 672 format = '!I'
673 673 self.data.append(struct.pack(format, int(value)))
674 674 self.size += 4
675 675
676 676 def writeString(self, value, length):
677 677 """Writes a string to the packet"""
678 678 format = '!' + str(length) + 's'
679 679 self.data.append(struct.pack(format, value))
680 680 self.size += length
681 681
682 682 def writeUTF(self, s):
683 683 """Writes a UTF-8 string of a given length to the packet"""
684 684 utfstr = s.encode('utf-8')
685 685 length = len(utfstr)
686 686 if length > 64:
687 687 raise NamePartTooLongException
688 688 self.writeByte(length)
689 689 self.writeString(utfstr, length)
690 690
691 691 def writeName(self, name):
692 692 """Writes a domain name to the packet"""
693 693
694 694 try:
695 695 # Find existing instance of this name in packet
696 696 #
697 697 index = self.names[name]
698 698 except KeyError:
699 699 # No record of this name already, so write it
700 700 # out as normal, recording the location of the name
701 701 # for future pointers to it.
702 702 #
703 703 self.names[name] = self.size
704 704 parts = name.split('.')
705 705 if parts[-1] == '':
706 706 parts = parts[:-1]
707 707 for part in parts:
708 708 self.writeUTF(part)
709 709 self.writeByte(0)
710 710 return
711 711
712 712 # An index was found, so write a pointer to it
713 713 #
714 714 self.writeByte((index >> 8) | 0xC0)
715 715 self.writeByte(index)
716 716
717 717 def writeQuestion(self, question):
718 718 """Writes a question to the packet"""
719 719 self.writeName(question.name)
720 720 self.writeShort(question.type)
721 721 self.writeShort(question.clazz)
722 722
723 723 def writeRecord(self, record, now):
724 724 """Writes a record (answer, authoritative answer, additional) to
725 725 the packet"""
726 726 self.writeName(record.name)
727 727 self.writeShort(record.type)
728 728 if record.unique and self.multicast:
729 729 self.writeShort(record.clazz | _CLASS_UNIQUE)
730 730 else:
731 731 self.writeShort(record.clazz)
732 732 if now == 0:
733 733 self.writeInt(record.ttl)
734 734 else:
735 735 self.writeInt(record.getRemainingTTL(now))
736 736 index = len(self.data)
737 737 # Adjust size for the short we will write before this record
738 738 #
739 739 self.size += 2
740 740 record.write(self)
741 741 self.size -= 2
742 742
743 743 length = len(''.join(self.data[index:]))
744 744 self.insertShort(index, length) # Here is the short we adjusted for
745 745
746 746 def packet(self):
747 747 """Returns a string containing the packet's bytes
748 748
749 749 No further parts should be added to the packet once this
750 750 is done."""
751 751 if not self.finished:
752 752 self.finished = 1
753 753 for question in self.questions:
754 754 self.writeQuestion(question)
755 755 for answer, time in self.answers:
756 756 self.writeRecord(answer, time)
757 757 for authority in self.authorities:
758 758 self.writeRecord(authority, 0)
759 759 for additional in self.additionals:
760 760 self.writeRecord(additional, 0)
761 761
762 762 self.insertShort(0, len(self.additionals))
763 763 self.insertShort(0, len(self.authorities))
764 764 self.insertShort(0, len(self.answers))
765 765 self.insertShort(0, len(self.questions))
766 766 self.insertShort(0, self.flags)
767 767 if self.multicast:
768 768 self.insertShort(0, 0)
769 769 else:
770 770 self.insertShort(0, self.id)
771 771 return ''.join(self.data)
772 772
773 773
774 774 class DNSCache(object):
775 775 """A cache of DNS entries"""
776 776
777 777 def __init__(self):
778 778 self.cache = {}
779 779
780 780 def add(self, entry):
781 781 """Adds an entry"""
782 782 try:
783 783 list = self.cache[entry.key]
784 784 except:
785 785 list = self.cache[entry.key] = []
786 786 list.append(entry)
787 787
788 788 def remove(self, entry):
789 789 """Removes an entry"""
790 790 try:
791 791 list = self.cache[entry.key]
792 792 list.remove(entry)
793 793 except:
794 794 pass
795 795
796 796 def get(self, entry):
797 797 """Gets an entry by key. Will return None if there is no
798 798 matching entry."""
799 799 try:
800 800 list = self.cache[entry.key]
801 801 return list[list.index(entry)]
802 802 except:
803 803 return None
804 804
805 805 def getByDetails(self, name, type, clazz):
806 806 """Gets an entry by details. Will return None if there is
807 807 no matching entry."""
808 808 entry = DNSEntry(name, type, clazz)
809 809 return self.get(entry)
810 810
811 811 def entriesWithName(self, name):
812 812 """Returns a list of entries whose key matches the name."""
813 813 try:
814 814 return self.cache[name]
815 815 except:
816 816 return []
817 817
818 818 def entries(self):
819 819 """Returns a list of all entries"""
820 820 def add(x, y): return x+y
821 821 try:
822 822 return reduce(add, self.cache.values())
823 823 except:
824 824 return []
825 825
826 826
827 827 class Engine(threading.Thread):
828 828 """An engine wraps read access to sockets, allowing objects that
829 829 need to receive data from sockets to be called back when the
830 830 sockets are ready.
831 831
832 832 A reader needs a handle_read() method, which is called when the socket
833 833 it is interested in is ready for reading.
834 834
835 835 Writers are not implemented here, because we only send short
836 836 packets.
837 837 """
838 838
839 839 def __init__(self, zeroconf):
840 840 threading.Thread.__init__(self)
841 841 self.zeroconf = zeroconf
842 842 self.readers = {} # maps socket to reader
843 843 self.timeout = 5
844 844 self.condition = threading.Condition()
845 845 self.start()
846 846
847 847 def run(self):
848 848 while not globals()['_GLOBAL_DONE']:
849 849 rs = self.getReaders()
850 850 if len(rs) == 0:
851 851 # No sockets to manage, but we wait for the timeout
852 852 # or addition of a socket
853 853 #
854 854 self.condition.acquire()
855 855 self.condition.wait(self.timeout)
856 856 self.condition.release()
857 857 else:
858 858 try:
859 859 rr, wr, er = select.select(rs, [], [], self.timeout)
860 860 for socket in rr:
861 861 try:
862 862 self.readers[socket].handle_read()
863 863 except:
864 864 traceback.print_exc()
865 865 except:
866 866 pass
867 867
868 868 def getReaders(self):
869 result = []
870 869 self.condition.acquire()
871 870 result = self.readers.keys()
872 871 self.condition.release()
873 872 return result
874 873
875 874 def addReader(self, reader, socket):
876 875 self.condition.acquire()
877 876 self.readers[socket] = reader
878 877 self.condition.notify()
879 878 self.condition.release()
880 879
881 880 def delReader(self, socket):
882 881 self.condition.acquire()
883 882 del(self.readers[socket])
884 883 self.condition.notify()
885 884 self.condition.release()
886 885
887 886 def notify(self):
888 887 self.condition.acquire()
889 888 self.condition.notify()
890 889 self.condition.release()
891 890
892 891 class Listener(object):
893 892 """A Listener is used by this module to listen on the multicast
894 893 group to which DNS messages are sent, allowing the implementation
895 894 to cache information as it arrives.
896 895
897 896 It requires registration with an Engine object in order to have
898 897 the read() method called when a socket is availble for reading."""
899 898
900 899 def __init__(self, zeroconf):
901 900 self.zeroconf = zeroconf
902 901 self.zeroconf.engine.addReader(self, self.zeroconf.socket)
903 902
904 903 def handle_read(self):
905 904 data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE)
906 905 self.data = data
907 906 msg = DNSIncoming(data)
908 907 if msg.isQuery():
909 908 # Always multicast responses
910 909 #
911 910 if port == _MDNS_PORT:
912 911 self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
913 912 # If it's not a multicast query, reply via unicast
914 913 # and multicast
915 914 #
916 915 elif port == _DNS_PORT:
917 916 self.zeroconf.handleQuery(msg, addr, port)
918 917 self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
919 918 else:
920 919 self.zeroconf.handleResponse(msg)
921 920
922 921
923 922 class Reaper(threading.Thread):
924 923 """A Reaper is used by this module to remove cache entries that
925 924 have expired."""
926 925
927 926 def __init__(self, zeroconf):
928 927 threading.Thread.__init__(self)
929 928 self.zeroconf = zeroconf
930 929 self.start()
931 930
932 931 def run(self):
933 932 while 1:
934 933 self.zeroconf.wait(10 * 1000)
935 934 if globals()['_GLOBAL_DONE']:
936 935 return
937 936 now = currentTimeMillis()
938 937 for record in self.zeroconf.cache.entries():
939 938 if record.isExpired(now):
940 939 self.zeroconf.updateRecord(now, record)
941 940 self.zeroconf.cache.remove(record)
942 941
943 942
944 943 class ServiceBrowser(threading.Thread):
945 944 """Used to browse for a service of a specific type.
946 945
947 946 The listener object will have its addService() and
948 947 removeService() methods called when this browser
949 948 discovers changes in the services availability."""
950 949
951 950 def __init__(self, zeroconf, type, listener):
952 951 """Creates a browser for a specific type"""
953 952 threading.Thread.__init__(self)
954 953 self.zeroconf = zeroconf
955 954 self.type = type
956 955 self.listener = listener
957 956 self.services = {}
958 957 self.nextTime = currentTimeMillis()
959 958 self.delay = _BROWSER_TIME
960 959 self.list = []
961 960
962 961 self.done = 0
963 962
964 963 self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
965 964 self.start()
966 965
967 966 def updateRecord(self, zeroconf, now, record):
968 967 """Callback invoked by Zeroconf when new information arrives.
969 968
970 969 Updates information required by browser in the Zeroconf cache."""
971 970 if record.type == _TYPE_PTR and record.name == self.type:
972 971 expired = record.isExpired(now)
973 972 try:
974 973 oldrecord = self.services[record.alias.lower()]
975 974 if not expired:
976 975 oldrecord.resetTTL(record)
977 976 else:
978 977 del(self.services[record.alias.lower()])
979 978 callback = lambda x: self.listener.removeService(x, self.type, record.alias)
980 979 self.list.append(callback)
981 980 return
982 981 except:
983 982 if not expired:
984 983 self.services[record.alias.lower()] = record
985 984 callback = lambda x: self.listener.addService(x, self.type, record.alias)
986 985 self.list.append(callback)
987 986
988 987 expires = record.getExpirationTime(75)
989 988 if expires < self.nextTime:
990 989 self.nextTime = expires
991 990
992 991 def cancel(self):
993 992 self.done = 1
994 993 self.zeroconf.notifyAll()
995 994
996 995 def run(self):
997 996 while 1:
998 997 event = None
999 998 now = currentTimeMillis()
1000 999 if len(self.list) == 0 and self.nextTime > now:
1001 1000 self.zeroconf.wait(self.nextTime - now)
1002 1001 if globals()['_GLOBAL_DONE'] or self.done:
1003 1002 return
1004 1003 now = currentTimeMillis()
1005 1004
1006 1005 if self.nextTime <= now:
1007 1006 out = DNSOutgoing(_FLAGS_QR_QUERY)
1008 1007 out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
1009 1008 for record in self.services.values():
1010 1009 if not record.isExpired(now):
1011 1010 out.addAnswerAtTime(record, now)
1012 1011 self.zeroconf.send(out)
1013 1012 self.nextTime = now + self.delay
1014 1013 self.delay = min(20 * 1000, self.delay * 2)
1015 1014
1016 1015 if len(self.list) > 0:
1017 1016 event = self.list.pop(0)
1018 1017
1019 1018 if event is not None:
1020 1019 event(self.zeroconf)
1021 1020
1022 1021
1023 1022 class ServiceInfo(object):
1024 1023 """Service information"""
1025 1024
1026 1025 def __init__(self, type, name, address=None, port=None, weight=0, priority=0, properties=None, server=None):
1027 1026 """Create a service description.
1028 1027
1029 1028 type: fully qualified service type name
1030 1029 name: fully qualified service name
1031 1030 address: IP address as unsigned short, network byte order
1032 1031 port: port that the service runs on
1033 1032 weight: weight of the service
1034 1033 priority: priority of the service
1035 1034 properties: dictionary of properties (or a string holding the bytes for the text field)
1036 1035 server: fully qualified name for service host (defaults to name)"""
1037 1036
1038 1037 if not name.endswith(type):
1039 1038 raise BadTypeInNameException
1040 1039 self.type = type
1041 1040 self.name = name
1042 1041 self.address = address
1043 1042 self.port = port
1044 1043 self.weight = weight
1045 1044 self.priority = priority
1046 1045 if server:
1047 1046 self.server = server
1048 1047 else:
1049 1048 self.server = name
1050 1049 self.setProperties(properties)
1051 1050
1052 1051 def setProperties(self, properties):
1053 1052 """Sets properties and text of this info from a dictionary"""
1054 1053 if isinstance(properties, dict):
1055 1054 self.properties = properties
1056 1055 list = []
1057 1056 result = ''
1058 1057 for key in properties:
1059 1058 value = properties[key]
1060 1059 if value is None:
1061 1060 suffix = ''.encode('utf-8')
1062 1061 elif isinstance(value, str):
1063 1062 suffix = value.encode('utf-8')
1064 1063 elif isinstance(value, int):
1065 1064 if value:
1066 1065 suffix = 'true'
1067 1066 else:
1068 1067 suffix = 'false'
1069 1068 else:
1070 1069 suffix = ''.encode('utf-8')
1071 1070 list.append('='.join((key, suffix)))
1072 1071 for item in list:
1073 1072 result = ''.join((result, struct.pack('!c', chr(len(item))), item))
1074 1073 self.text = result
1075 1074 else:
1076 1075 self.text = properties
1077 1076
1078 1077 def setText(self, text):
1079 1078 """Sets properties and text given a text field"""
1080 1079 self.text = text
1081 1080 try:
1082 1081 result = {}
1083 1082 end = len(text)
1084 1083 index = 0
1085 1084 strs = []
1086 1085 while index < end:
1087 1086 length = ord(text[index])
1088 1087 index += 1
1089 1088 strs.append(text[index:index+length])
1090 1089 index += length
1091 1090
1092 1091 for s in strs:
1093 1092 eindex = s.find('=')
1094 1093 if eindex == -1:
1095 1094 # No equals sign at all
1096 1095 key = s
1097 1096 value = 0
1098 1097 else:
1099 1098 key = s[:eindex]
1100 1099 value = s[eindex+1:]
1101 1100 if value == 'true':
1102 1101 value = 1
1103 1102 elif value == 'false' or not value:
1104 1103 value = 0
1105 1104
1106 1105 # Only update non-existent properties
1107 1106 if key and result.get(key) == None:
1108 1107 result[key] = value
1109 1108
1110 1109 self.properties = result
1111 1110 except:
1112 1111 traceback.print_exc()
1113 1112 self.properties = None
1114 1113
1115 1114 def getType(self):
1116 1115 """Type accessor"""
1117 1116 return self.type
1118 1117
1119 1118 def getName(self):
1120 1119 """Name accessor"""
1121 1120 if self.type is not None and self.name.endswith("." + self.type):
1122 1121 return self.name[:len(self.name) - len(self.type) - 1]
1123 1122 return self.name
1124 1123
1125 1124 def getAddress(self):
1126 1125 """Address accessor"""
1127 1126 return self.address
1128 1127
1129 1128 def getPort(self):
1130 1129 """Port accessor"""
1131 1130 return self.port
1132 1131
1133 1132 def getPriority(self):
1134 1133 """Pirority accessor"""
1135 1134 return self.priority
1136 1135
1137 1136 def getWeight(self):
1138 1137 """Weight accessor"""
1139 1138 return self.weight
1140 1139
1141 1140 def getProperties(self):
1142 1141 """Properties accessor"""
1143 1142 return self.properties
1144 1143
1145 1144 def getText(self):
1146 1145 """Text accessor"""
1147 1146 return self.text
1148 1147
1149 1148 def getServer(self):
1150 1149 """Server accessor"""
1151 1150 return self.server
1152 1151
1153 1152 def updateRecord(self, zeroconf, now, record):
1154 1153 """Updates service information from a DNS record"""
1155 1154 if record is not None and not record.isExpired(now):
1156 1155 if record.type == _TYPE_A:
1157 1156 #if record.name == self.name:
1158 1157 if record.name == self.server:
1159 1158 self.address = record.address
1160 1159 elif record.type == _TYPE_SRV:
1161 1160 if record.name == self.name:
1162 1161 self.server = record.server
1163 1162 self.port = record.port
1164 1163 self.weight = record.weight
1165 1164 self.priority = record.priority
1166 1165 #self.address = None
1167 1166 self.updateRecord(zeroconf, now, zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN))
1168 1167 elif record.type == _TYPE_TXT:
1169 1168 if record.name == self.name:
1170 1169 self.setText(record.text)
1171 1170
1172 1171 def request(self, zeroconf, timeout):
1173 1172 """Returns true if the service could be discovered on the
1174 1173 network, and updates this object with details discovered.
1175 1174 """
1176 1175 now = currentTimeMillis()
1177 1176 delay = _LISTENER_TIME
1178 1177 next = now + delay
1179 1178 last = now + timeout
1180 1179 result = 0
1181 1180 try:
1182 1181 zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN))
1183 1182 while self.server is None or self.address is None or self.text is None:
1184 1183 if last <= now:
1185 1184 return 0
1186 1185 if next <= now:
1187 1186 out = DNSOutgoing(_FLAGS_QR_QUERY)
1188 1187 out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN))
1189 1188 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_SRV, _CLASS_IN), now)
1190 1189 out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN))
1191 1190 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_TXT, _CLASS_IN), now)
1192 1191 if self.server is not None:
1193 1192 out.addQuestion(DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
1194 1193 out.addAnswerAtTime(zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN), now)
1195 1194 zeroconf.send(out)
1196 1195 next = now + delay
1197 1196 delay = delay * 2
1198 1197
1199 1198 zeroconf.wait(min(next, last) - now)
1200 1199 now = currentTimeMillis()
1201 1200 result = 1
1202 1201 finally:
1203 1202 zeroconf.removeListener(self)
1204 1203
1205 1204 return result
1206 1205
1207 1206 def __eq__(self, other):
1208 1207 """Tests equality of service name"""
1209 1208 if isinstance(other, ServiceInfo):
1210 1209 return other.name == self.name
1211 1210 return 0
1212 1211
1213 1212 def __ne__(self, other):
1214 1213 """Non-equality test"""
1215 1214 return not self.__eq__(other)
1216 1215
1217 1216 def __repr__(self):
1218 1217 """String representation"""
1219 1218 result = "service[%s,%s:%s," % (self.name, socket.inet_ntoa(self.getAddress()), self.port)
1220 1219 if self.text is None:
1221 1220 result += "None"
1222 1221 else:
1223 1222 if len(self.text) < 20:
1224 1223 result += self.text
1225 1224 else:
1226 1225 result += self.text[:17] + "..."
1227 1226 result += "]"
1228 1227 return result
1229 1228
1230 1229
1231 1230 class Zeroconf(object):
1232 1231 """Implementation of Zeroconf Multicast DNS Service Discovery
1233 1232
1234 1233 Supports registration, unregistration, queries and browsing.
1235 1234 """
1236 1235 def __init__(self, bindaddress=None):
1237 1236 """Creates an instance of the Zeroconf class, establishing
1238 1237 multicast communications, listening and reaping threads."""
1239 1238 globals()['_GLOBAL_DONE'] = 0
1240 1239 if bindaddress is None:
1241 1240 self.intf = socket.gethostbyname(socket.gethostname())
1242 1241 else:
1243 1242 self.intf = bindaddress
1244 1243 self.group = ('', _MDNS_PORT)
1245 1244 self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
1246 1245 try:
1247 1246 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1248 1247 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
1249 1248 except:
1250 1249 # SO_REUSEADDR should be equivalent to SO_REUSEPORT for
1251 1250 # multicast UDP sockets (p 731, "TCP/IP Illustrated,
1252 1251 # Volume 2"), but some BSD-derived systems require
1253 1252 # SO_REUSEPORT to be specified explicity. Also, not all
1254 1253 # versions of Python have SO_REUSEPORT available. So
1255 1254 # if you're on a BSD-based system, and haven't upgraded
1256 1255 # to Python 2.3 yet, you may find this library doesn't
1257 1256 # work as expected.
1258 1257 #
1259 1258 pass
1260 1259 self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
1261 1260 self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
1262 1261 try:
1263 1262 self.socket.bind(self.group)
1264 1263 except:
1265 1264 # Some versions of linux raise an exception even though
1266 1265 # the SO_REUSE* options have been set, so ignore it
1267 1266 #
1268 1267 pass
1269 1268 #self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.intf) + socket.inet_aton('0.0.0.0'))
1270 1269 self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
1271 1270
1272 1271 self.listeners = []
1273 1272 self.browsers = []
1274 1273 self.services = {}
1275 1274 self.servicetypes = {}
1276 1275
1277 1276 self.cache = DNSCache()
1278 1277
1279 1278 self.condition = threading.Condition()
1280 1279
1281 1280 self.engine = Engine(self)
1282 1281 self.listener = Listener(self)
1283 1282 self.reaper = Reaper(self)
1284 1283
1285 1284 def isLoopback(self):
1286 1285 return self.intf.startswith("127.0.0.1")
1287 1286
1288 1287 def isLinklocal(self):
1289 1288 return self.intf.startswith("169.254.")
1290 1289
1291 1290 def wait(self, timeout):
1292 1291 """Calling thread waits for a given number of milliseconds or
1293 1292 until notified."""
1294 1293 self.condition.acquire()
1295 1294 self.condition.wait(timeout/1000)
1296 1295 self.condition.release()
1297 1296
1298 1297 def notifyAll(self):
1299 1298 """Notifies all waiting threads"""
1300 1299 self.condition.acquire()
1301 1300 self.condition.notifyAll()
1302 1301 self.condition.release()
1303 1302
1304 1303 def getServiceInfo(self, type, name, timeout=3000):
1305 1304 """Returns network's service information for a particular
1306 1305 name and type, or None if no service matches by the timeout,
1307 1306 which defaults to 3 seconds."""
1308 1307 info = ServiceInfo(type, name)
1309 1308 if info.request(self, timeout):
1310 1309 return info
1311 1310 return None
1312 1311
1313 1312 def addServiceListener(self, type, listener):
1314 1313 """Adds a listener for a particular service type. This object
1315 1314 will then have its updateRecord method called when information
1316 1315 arrives for that type."""
1317 1316 self.removeServiceListener(listener)
1318 1317 self.browsers.append(ServiceBrowser(self, type, listener))
1319 1318
1320 1319 def removeServiceListener(self, listener):
1321 1320 """Removes a listener from the set that is currently listening."""
1322 1321 for browser in self.browsers:
1323 1322 if browser.listener == listener:
1324 1323 browser.cancel()
1325 1324 del(browser)
1326 1325
1327 1326 def registerService(self, info, ttl=_DNS_TTL):
1328 1327 """Registers service information to the network with a default TTL
1329 1328 of 60 seconds. Zeroconf will then respond to requests for
1330 1329 information for that service. The name of the service may be
1331 1330 changed if needed to make it unique on the network."""
1332 1331 self.checkService(info)
1333 1332 self.services[info.name.lower()] = info
1334 1333 if self.servicetypes.has_key(info.type):
1335 1334 self.servicetypes[info.type]+=1
1336 1335 else:
1337 1336 self.servicetypes[info.type]=1
1338 1337 now = currentTimeMillis()
1339 1338 nextTime = now
1340 1339 i = 0
1341 1340 while i < 3:
1342 1341 if now < nextTime:
1343 1342 self.wait(nextTime - now)
1344 1343 now = currentTimeMillis()
1345 1344 continue
1346 1345 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1347 1346 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0)
1348 1347 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, ttl, info.priority, info.weight, info.port, info.server), 0)
1349 1348 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0)
1350 1349 if info.address:
1351 1350 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, ttl, info.address), 0)
1352 1351 self.send(out)
1353 1352 i += 1
1354 1353 nextTime += _REGISTER_TIME
1355 1354
1356 1355 def unregisterService(self, info):
1357 1356 """Unregister a service."""
1358 1357 try:
1359 1358 del(self.services[info.name.lower()])
1360 1359 if self.servicetypes[info.type]>1:
1361 1360 self.servicetypes[info.type]-=1
1362 1361 else:
1363 1362 del self.servicetypes[info.type]
1364 1363 except:
1365 1364 pass
1366 1365 now = currentTimeMillis()
1367 1366 nextTime = now
1368 1367 i = 0
1369 1368 while i < 3:
1370 1369 if now < nextTime:
1371 1370 self.wait(nextTime - now)
1372 1371 now = currentTimeMillis()
1373 1372 continue
1374 1373 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1375 1374 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
1376 1375 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0)
1377 1376 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
1378 1377 if info.address:
1379 1378 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
1380 1379 self.send(out)
1381 1380 i += 1
1382 1381 nextTime += _UNREGISTER_TIME
1383 1382
1384 1383 def unregisterAllServices(self):
1385 1384 """Unregister all registered services."""
1386 1385 if len(self.services) > 0:
1387 1386 now = currentTimeMillis()
1388 1387 nextTime = now
1389 1388 i = 0
1390 1389 while i < 3:
1391 1390 if now < nextTime:
1392 1391 self.wait(nextTime - now)
1393 1392 now = currentTimeMillis()
1394 1393 continue
1395 1394 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1396 1395 for info in self.services.values():
1397 1396 out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
1398 1397 out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.server), 0)
1399 1398 out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
1400 1399 if info.address:
1401 1400 out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
1402 1401 self.send(out)
1403 1402 i += 1
1404 1403 nextTime += _UNREGISTER_TIME
1405 1404
1406 1405 def checkService(self, info):
1407 1406 """Checks the network for a unique service name, modifying the
1408 1407 ServiceInfo passed in if it is not unique."""
1409 1408 now = currentTimeMillis()
1410 1409 nextTime = now
1411 1410 i = 0
1412 1411 while i < 3:
1413 1412 for record in self.cache.entriesWithName(info.type):
1414 1413 if record.type == _TYPE_PTR and not record.isExpired(now) and record.alias == info.name:
1415 1414 if (info.name.find('.') < 0):
1416 1415 info.name = info.name + ".[" + info.address + ":" + info.port + "]." + info.type
1417 1416 self.checkService(info)
1418 1417 return
1419 1418 raise NonUniqueNameException
1420 1419 if now < nextTime:
1421 1420 self.wait(nextTime - now)
1422 1421 now = currentTimeMillis()
1423 1422 continue
1424 1423 out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
1425 1424 self.debug = out
1426 1425 out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
1427 1426 out.addAuthorativeAnswer(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name))
1428 1427 self.send(out)
1429 1428 i += 1
1430 1429 nextTime += _CHECK_TIME
1431 1430
1432 1431 def addListener(self, listener, question):
1433 1432 """Adds a listener for a given question. The listener will have
1434 1433 its updateRecord method called when information is available to
1435 1434 answer the question."""
1436 1435 now = currentTimeMillis()
1437 1436 self.listeners.append(listener)
1438 1437 if question is not None:
1439 1438 for record in self.cache.entriesWithName(question.name):
1440 1439 if question.answeredBy(record) and not record.isExpired(now):
1441 1440 listener.updateRecord(self, now, record)
1442 1441 self.notifyAll()
1443 1442
1444 1443 def removeListener(self, listener):
1445 1444 """Removes a listener."""
1446 1445 try:
1447 1446 self.listeners.remove(listener)
1448 1447 self.notifyAll()
1449 1448 except:
1450 1449 pass
1451 1450
1452 1451 def updateRecord(self, now, rec):
1453 1452 """Used to notify listeners of new information that has updated
1454 1453 a record."""
1455 1454 for listener in self.listeners:
1456 1455 listener.updateRecord(self, now, rec)
1457 1456 self.notifyAll()
1458 1457
1459 1458 def handleResponse(self, msg):
1460 1459 """Deal with incoming response packets. All answers
1461 1460 are held in the cache, and listeners are notified."""
1462 1461 now = currentTimeMillis()
1463 1462 for record in msg.answers:
1464 1463 expired = record.isExpired(now)
1465 1464 if record in self.cache.entries():
1466 1465 if expired:
1467 1466 self.cache.remove(record)
1468 1467 else:
1469 1468 entry = self.cache.get(record)
1470 1469 if entry is not None:
1471 1470 entry.resetTTL(record)
1472 1471 record = entry
1473 1472 else:
1474 1473 self.cache.add(record)
1475 1474
1476 1475 self.updateRecord(now, record)
1477 1476
1478 1477 def handleQuery(self, msg, addr, port):
1479 1478 """Deal with incoming query packets. Provides a response if
1480 1479 possible."""
1481 1480 out = None
1482 1481
1483 1482 # Support unicast client responses
1484 1483 #
1485 1484 if port != _MDNS_PORT:
1486 1485 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
1487 1486 for question in msg.questions:
1488 1487 out.addQuestion(question)
1489
1488
1490 1489 for question in msg.questions:
1491 1490 if question.type == _TYPE_PTR:
1492 1491 if question.name == "_services._dns-sd._udp.local.":
1493 1492 for stype in self.servicetypes.keys():
1494 1493 if out is None:
1495 1494 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1496 out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype))
1495 out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype))
1497 1496 for service in self.services.values():
1498 1497 if question.name == service.type:
1499 1498 if out is None:
1500 1499 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1501 1500 out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, service.name))
1502 1501 else:
1503 1502 try:
1504 1503 if out is None:
1505 1504 out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
1506
1505
1507 1506 # Answer A record queries for any service addresses we know
1508 1507 if question.type == _TYPE_A or question.type == _TYPE_ANY:
1509 1508 for service in self.services.values():
1510 1509 if service.server == question.name.lower():
1511 1510 out.addAnswer(msg, DNSAddress(question.name, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
1512
1511
1513 1512 service = self.services.get(question.name.lower(), None)
1514 1513 if not service: continue
1515
1514
1516 1515 if question.type == _TYPE_SRV or question.type == _TYPE_ANY:
1517 1516 out.addAnswer(msg, DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.priority, service.weight, service.port, service.server))
1518 1517 if question.type == _TYPE_TXT or question.type == _TYPE_ANY:
1519 1518 out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
1520 1519 if question.type == _TYPE_SRV:
1521 1520 out.addAdditionalAnswer(DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
1522 1521 except:
1523 1522 traceback.print_exc()
1524
1523
1525 1524 if out is not None and out.answers:
1526 1525 out.id = msg.id
1527 1526 self.send(out, addr, port)
1528 1527
1529 1528 def send(self, out, addr = _MDNS_ADDR, port = _MDNS_PORT):
1530 1529 """Sends an outgoing packet."""
1531 1530 # This is a quick test to see if we can parse the packets we generate
1532 1531 #temp = DNSIncoming(out.packet())
1533 1532 try:
1534 bytes_sent = self.socket.sendto(out.packet(), 0, (addr, port))
1533 self.socket.sendto(out.packet(), 0, (addr, port))
1535 1534 except:
1536 1535 # Ignore this, it may be a temporary loss of network connection
1537 1536 pass
1538 1537
1539 1538 def close(self):
1540 1539 """Ends the background threads, and prevent this instance from
1541 1540 servicing further queries."""
1542 1541 if globals()['_GLOBAL_DONE'] == 0:
1543 1542 globals()['_GLOBAL_DONE'] = 1
1544 1543 self.notifyAll()
1545 1544 self.engine.notify()
1546 1545 self.unregisterAllServices()
1547 1546 self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
1548 1547 self.socket.close()
1549 1548
1550 1549 # Test a few module features, including service registration, service
1551 1550 # query (for Zoe), and service unregistration.
1552 1551
1553 1552 if __name__ == '__main__':
1554 1553 print "Multicast DNS Service Discovery for Python, version", __version__
1555 1554 r = Zeroconf()
1556 1555 print "1. Testing registration of a service..."
1557 1556 desc = {'version':'0.10','a':'test value', 'b':'another value'}
1558 1557 info = ServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.", socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
1559 1558 print " Registering service..."
1560 1559 r.registerService(info)
1561 1560 print " Registration done."
1562 1561 print "2. Testing query of service information..."
1563 1562 print " Getting ZOE service:", str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local."))
1564 1563 print " Query done."
1565 1564 print "3. Testing query of own service..."
1566 1565 print " Getting self:", str(r.getServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local."))
1567 1566 print " Query done."
1568 1567 print "4. Testing unregister of service information..."
1569 1568 r.unregisterService(info)
1570 1569 print " Unregister done."
1571 1570 r.close()
@@ -1,160 +1,160 b''
1 1 # zeroconf.py - zeroconf support for Mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of
6 6 # the GNU General Public License (version 2), incorporated herein by
7 7 # reference.
8 8
9 9 '''zeroconf support for mercurial repositories
10 10
11 11 Zeroconf enabled repositories will be announced in a network without the need
12 12 to configure a server or a service. They can be discovered without knowing
13 13 their actual IP address.
14 14
15 15 To use the zeroconf extension add the following entry to your hgrc file:
16 16
17 17 [extensions]
18 18 hgext.zeroconf =
19 19
20 20 To allow other people to discover your repository using run "hg serve" in your
21 21 repository.
22 22
23 23 $ cd test
24 24 $ hg serve
25 25
26 26 You can discover zeroconf enabled repositories by running "hg paths".
27 27
28 28 $ hg paths
29 29 zc-test = http://example.com:8000/test
30 30 '''
31 31
32 32 import Zeroconf, socket, time, os
33 33 from mercurial import ui
34 34 from mercurial import extensions
35 35 from mercurial.hgweb import hgweb_mod
36 36 from mercurial.hgweb import hgwebdir_mod
37 37
38 38 # publish
39 39
40 40 server = None
41 41 localip = None
42 42
43 43 def getip():
44 44 # finds external-facing interface without sending any packets (Linux)
45 45 try:
46 46 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
47 47 s.connect(('1.0.0.1', 0))
48 48 ip = s.getsockname()[0]
49 49 return ip
50 50 except:
51 51 pass
52 52
53 53 # Generic method, sometimes gives useless results
54 54 dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
55 55 if not dumbip.startswith('127.') and ':' not in dumbip:
56 56 return dumbip
57 57
58 58 # works elsewhere, but actually sends a packet
59 59 try:
60 60 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
61 61 s.connect(('1.0.0.1', 1))
62 62 ip = s.getsockname()[0]
63 63 return ip
64 64 except:
65 65 pass
66 66
67 67 return dumbip
68 68
69 69 def publish(name, desc, path, port):
70 70 global server, localip
71 71 if not server:
72 72 try:
73 73 server = Zeroconf.Zeroconf()
74 74 except socket.gaierror:
75 75 # if we have no internet connection, this can happen.
76 76 return
77 77 ip = getip()
78 78 localip = socket.inet_aton(ip)
79 79
80 80 hostname = socket.gethostname().split('.')[0]
81 81 host = hostname + ".local"
82 82 name = "%s-%s" % (hostname, name)
83 83
84 84 # advertise to browsers
85 85 svc = Zeroconf.ServiceInfo('_http._tcp.local.',
86 86 name + '._http._tcp.local.',
87 87 server = host,
88 88 port = port,
89 89 properties = {'description': desc,
90 90 'path': "/" + path},
91 91 address = localip, weight = 0, priority = 0)
92 92 server.registerService(svc)
93 93
94 94 # advertise to Mercurial clients
95 95 svc = Zeroconf.ServiceInfo('_hg._tcp.local.',
96 96 name + '._hg._tcp.local.',
97 97 server = host,
98 98 port = port,
99 99 properties = {'description': desc,
100 100 'path': "/" + path},
101 101 address = localip, weight = 0, priority = 0)
102 102 server.registerService(svc)
103 103
104 104 class hgwebzc(hgweb_mod.hgweb):
105 105 def __init__(self, repo, name=None):
106 106 super(hgwebzc, self).__init__(repo, name)
107 107 name = self.reponame or os.path.basename(repo.root)
108 108 desc = self.repo.ui.config("web", "description", name)
109 109 publish(name, desc, name, int(repo.ui.config("web", "port", 8000)))
110 110
111 111 class hgwebdirzc(hgwebdir_mod.hgwebdir):
112 112 def run(self):
113 113 for r, p in self.repos:
114 114 u = ui.ui(parentui=self.parentui)
115 115 u.readconfig(os.path.join(p, '.hg', 'hgrc'))
116 116 n = os.path.basename(r)
117 117 publish(n, "hgweb", p, int(u.config("web", "port", 8000)))
118 118 return super(hgwebdirzc, self).run()
119 119
120 120 # listen
121 121
122 122 class listener(object):
123 123 def __init__(self):
124 124 self.found = {}
125 125 def removeService(self, server, type, name):
126 126 if repr(name) in self.found:
127 127 del self.found[repr(name)]
128 128 def addService(self, server, type, name):
129 129 self.found[repr(name)] = server.getServiceInfo(type, name)
130 130
131 131 def getzcpaths():
132 132 server = Zeroconf.Zeroconf()
133 133 l = listener()
134 browser = Zeroconf.ServiceBrowser(server, "_hg._tcp.local.", l)
134 Zeroconf.ServiceBrowser(server, "_hg._tcp.local.", l)
135 135 time.sleep(1)
136 136 server.close()
137 137 for v in l.found.values():
138 138 n = v.name[:v.name.index('.')]
139 139 n.replace(" ", "-")
140 140 u = "http://%s:%s%s" % (socket.inet_ntoa(v.address), v.port,
141 141 v.properties.get("path", "/"))
142 142 yield "zc-" + n, u
143 143
144 144 def config(orig, self, section, key, default=None, untrusted=False):
145 145 if section == "paths" and key.startswith("zc-"):
146 146 for n, p in getzcpaths():
147 147 if n == key:
148 148 return p
149 149 return orig(self, section, key, default, untrusted)
150 150
151 151 def configitems(orig, self, section, untrusted=False):
152 152 r = orig(self, section, untrusted)
153 153 if section == "paths":
154 154 r += getzcpaths()
155 155 return r
156 156
157 157 extensions.wrapfunction(ui.ui, 'config', config)
158 158 extensions.wrapfunction(ui.ui, 'configitems', configitems)
159 159 hgweb_mod.hgweb = hgwebzc
160 160 hgwebdir_mod.hgwebdir = hgwebdirzc
@@ -1,3422 +1,3422 b''
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _, gettext
10 10 import os, re, sys
11 11 import hg, util, revlog, bundlerepo, extensions, copies, context, error
12 12 import difflib, patch, time, help, mdiff, tempfile, url
13 13 import archival, changegroup, cmdutil, hgweb.server, sshserver, hbisect
14 14 import merge as merge_
15 15
16 16 # Commands start here, listed alphabetically
17 17
18 18 def add(ui, repo, *pats, **opts):
19 19 """add the specified files on the next commit
20 20
21 21 Schedule files to be version controlled and added to the repository.
22 22
23 23 The files will be added to the repository at the next commit. To
24 24 undo an add before that, see hg revert.
25 25
26 26 If no names are given, add all files to the repository.
27 27 """
28 28
29 29 rejected = None
30 30 exacts = {}
31 31 names = []
32 32 m = cmdutil.match(repo, pats, opts)
33 33 m.bad = lambda x,y: True
34 34 for abs in repo.walk(m):
35 35 if m.exact(abs):
36 36 if ui.verbose:
37 37 ui.status(_('adding %s\n') % m.rel(abs))
38 38 names.append(abs)
39 39 exacts[abs] = 1
40 40 elif abs not in repo.dirstate:
41 41 ui.status(_('adding %s\n') % m.rel(abs))
42 42 names.append(abs)
43 43 if not opts.get('dry_run'):
44 44 rejected = repo.add(names)
45 45 rejected = [p for p in rejected if p in exacts]
46 46 return rejected and 1 or 0
47 47
48 48 def addremove(ui, repo, *pats, **opts):
49 49 """add all new files, delete all missing files
50 50
51 51 Add all new files and remove all missing files from the repository.
52 52
53 53 New files are ignored if they match any of the patterns in .hgignore. As
54 54 with add, these changes take effect at the next commit.
55 55
56 56 Use the -s option to detect renamed files. With a parameter > 0,
57 57 this compares every removed file with every added file and records
58 58 those similar enough as renames. This option takes a percentage
59 59 between 0 (disabled) and 100 (files must be identical) as its
60 60 parameter. Detecting renamed files this way can be expensive.
61 61 """
62 62 try:
63 63 sim = float(opts.get('similarity') or 0)
64 64 except ValueError:
65 65 raise util.Abort(_('similarity must be a number'))
66 66 if sim < 0 or sim > 100:
67 67 raise util.Abort(_('similarity must be between 0 and 100'))
68 68 return cmdutil.addremove(repo, pats, opts, similarity=sim/100.)
69 69
70 70 def annotate(ui, repo, *pats, **opts):
71 71 """show changeset information per file line
72 72
73 73 List changes in files, showing the revision id responsible for each line
74 74
75 75 This command is useful to discover who did a change or when a change took
76 76 place.
77 77
78 78 Without the -a option, annotate will avoid processing files it
79 79 detects as binary. With -a, annotate will generate an annotation
80 80 anyway, probably with undesirable results.
81 81 """
82 82 datefunc = ui.quiet and util.shortdate or util.datestr
83 83 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
84 84
85 85 if not pats:
86 86 raise util.Abort(_('at least one file name or pattern required'))
87 87
88 88 opmap = [('user', lambda x: ui.shortuser(x[0].user())),
89 89 ('number', lambda x: str(x[0].rev())),
90 90 ('changeset', lambda x: short(x[0].node())),
91 91 ('date', getdate),
92 92 ('follow', lambda x: x[0].path()),
93 93 ]
94 94
95 95 if (not opts.get('user') and not opts.get('changeset') and not opts.get('date')
96 96 and not opts.get('follow')):
97 97 opts['number'] = 1
98 98
99 99 linenumber = opts.get('line_number') is not None
100 100 if (linenumber and (not opts.get('changeset')) and (not opts.get('number'))):
101 101 raise util.Abort(_('at least one of -n/-c is required for -l'))
102 102
103 103 funcmap = [func for op, func in opmap if opts.get(op)]
104 104 if linenumber:
105 105 lastfunc = funcmap[-1]
106 106 funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1])
107 107
108 108 ctx = repo[opts.get('rev')]
109 109
110 110 m = cmdutil.match(repo, pats, opts)
111 111 for abs in ctx.walk(m):
112 112 fctx = ctx[abs]
113 113 if not opts.get('text') and util.binary(fctx.data()):
114 114 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
115 115 continue
116 116
117 117 lines = fctx.annotate(follow=opts.get('follow'),
118 118 linenumber=linenumber)
119 119 pieces = []
120 120
121 121 for f in funcmap:
122 122 l = [f(n) for n, dummy in lines]
123 123 if l:
124 124 ml = max(map(len, l))
125 125 pieces.append(["%*s" % (ml, x) for x in l])
126 126
127 127 if pieces:
128 128 for p, l in zip(zip(*pieces), lines):
129 129 ui.write("%s: %s" % (" ".join(p), l[1]))
130 130
131 131 def archive(ui, repo, dest, **opts):
132 132 '''create unversioned archive of a repository revision
133 133
134 134 By default, the revision used is the parent of the working
135 135 directory; use "-r" to specify a different revision.
136 136
137 137 To specify the type of archive to create, use "-t". Valid
138 138 types are:
139 139
140 140 "files" (default): a directory full of files
141 141 "tar": tar archive, uncompressed
142 142 "tbz2": tar archive, compressed using bzip2
143 143 "tgz": tar archive, compressed using gzip
144 144 "uzip": zip archive, uncompressed
145 145 "zip": zip archive, compressed using deflate
146 146
147 147 The exact name of the destination archive or directory is given
148 148 using a format string; see "hg help export" for details.
149 149
150 150 Each member added to an archive file has a directory prefix
151 151 prepended. Use "-p" to specify a format string for the prefix.
152 152 The default is the basename of the archive, with suffixes removed.
153 153 '''
154 154
155 155 ctx = repo[opts.get('rev')]
156 156 if not ctx:
157 157 raise util.Abort(_('no working directory: please specify a revision'))
158 158 node = ctx.node()
159 159 dest = cmdutil.make_filename(repo, dest, node)
160 160 if os.path.realpath(dest) == repo.root:
161 161 raise util.Abort(_('repository root cannot be destination'))
162 162 matchfn = cmdutil.match(repo, [], opts)
163 163 kind = opts.get('type') or 'files'
164 164 prefix = opts.get('prefix')
165 165 if dest == '-':
166 166 if kind == 'files':
167 167 raise util.Abort(_('cannot archive plain files to stdout'))
168 168 dest = sys.stdout
169 169 if not prefix: prefix = os.path.basename(repo.root) + '-%h'
170 170 prefix = cmdutil.make_filename(repo, prefix, node)
171 171 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
172 172 matchfn, prefix)
173 173
174 174 def backout(ui, repo, node=None, rev=None, **opts):
175 175 '''reverse effect of earlier changeset
176 176
177 177 Commit the backed out changes as a new changeset. The new
178 178 changeset is a child of the backed out changeset.
179 179
180 180 If you back out a changeset other than the tip, a new head is
181 181 created. This head will be the new tip and you should merge this
182 182 backout changeset with another head (current one by default).
183 183
184 184 The --merge option remembers the parent of the working directory
185 185 before starting the backout, then merges the new head with that
186 186 changeset afterwards. This saves you from doing the merge by
187 187 hand. The result of this merge is not committed, as with a normal
188 188 merge.
189 189
190 190 See \'hg help dates\' for a list of formats valid for -d/--date.
191 191 '''
192 192 if rev and node:
193 193 raise util.Abort(_("please specify just one revision"))
194 194
195 195 if not rev:
196 196 rev = node
197 197
198 198 if not rev:
199 199 raise util.Abort(_("please specify a revision to backout"))
200 200
201 201 date = opts.get('date')
202 202 if date:
203 203 opts['date'] = util.parsedate(date)
204 204
205 205 cmdutil.bail_if_changed(repo)
206 206 node = repo.lookup(rev)
207 207
208 208 op1, op2 = repo.dirstate.parents()
209 209 a = repo.changelog.ancestor(op1, node)
210 210 if a != node:
211 211 raise util.Abort(_('cannot back out change on a different branch'))
212 212
213 213 p1, p2 = repo.changelog.parents(node)
214 214 if p1 == nullid:
215 215 raise util.Abort(_('cannot back out a change with no parents'))
216 216 if p2 != nullid:
217 217 if not opts.get('parent'):
218 218 raise util.Abort(_('cannot back out a merge changeset without '
219 219 '--parent'))
220 220 p = repo.lookup(opts['parent'])
221 221 if p not in (p1, p2):
222 222 raise util.Abort(_('%s is not a parent of %s') %
223 223 (short(p), short(node)))
224 224 parent = p
225 225 else:
226 226 if opts.get('parent'):
227 227 raise util.Abort(_('cannot use --parent on non-merge changeset'))
228 228 parent = p1
229 229
230 230 # the backout should appear on the same branch
231 231 branch = repo.dirstate.branch()
232 232 hg.clean(repo, node, show_stats=False)
233 233 repo.dirstate.setbranch(branch)
234 234 revert_opts = opts.copy()
235 235 revert_opts['date'] = None
236 236 revert_opts['all'] = True
237 237 revert_opts['rev'] = hex(parent)
238 238 revert_opts['no_backup'] = None
239 239 revert(ui, repo, **revert_opts)
240 240 commit_opts = opts.copy()
241 241 commit_opts['addremove'] = False
242 242 if not commit_opts['message'] and not commit_opts['logfile']:
243 243 commit_opts['message'] = _("Backed out changeset %s") % (short(node))
244 244 commit_opts['force_editor'] = True
245 245 commit(ui, repo, **commit_opts)
246 246 def nice(node):
247 247 return '%d:%s' % (repo.changelog.rev(node), short(node))
248 248 ui.status(_('changeset %s backs out changeset %s\n') %
249 249 (nice(repo.changelog.tip()), nice(node)))
250 250 if op1 != node:
251 251 hg.clean(repo, op1, show_stats=False)
252 252 if opts.get('merge'):
253 253 ui.status(_('merging with changeset %s\n') % nice(repo.changelog.tip()))
254 254 hg.merge(repo, hex(repo.changelog.tip()))
255 255 else:
256 256 ui.status(_('the backout changeset is a new head - '
257 257 'do not forget to merge\n'))
258 258 ui.status(_('(use "backout --merge" '
259 259 'if you want to auto-merge)\n'))
260 260
261 261 def bisect(ui, repo, rev=None, extra=None, command=None,
262 262 reset=None, good=None, bad=None, skip=None, noupdate=None):
263 263 """subdivision search of changesets
264 264
265 265 This command helps to find changesets which introduce problems.
266 266 To use, mark the earliest changeset you know exhibits the problem
267 267 as bad, then mark the latest changeset which is free from the
268 268 problem as good. Bisect will update your working directory to a
269 269 revision for testing (unless the --noupdate option is specified).
270 270 Once you have performed tests, mark the working directory as bad
271 271 or good and bisect will either update to another candidate changeset
272 272 or announce that it has found the bad revision.
273 273
274 274 As a shortcut, you can also use the revision argument to mark a
275 275 revision as good or bad without checking it out first.
276 276
277 277 If you supply a command it will be used for automatic bisection. Its exit
278 278 status will be used as flag to mark revision as bad or good. In case exit
279 279 status is 0 the revision is marked as good, 125 - skipped, 127 (command not
280 280 found) - bisection will be aborted; any other status bigger than 0 will
281 281 mark revision as bad.
282 282 """
283 283 def print_result(nodes, good):
284 284 displayer = cmdutil.show_changeset(ui, repo, {})
285 285 transition = (good and "good" or "bad")
286 286 if len(nodes) == 1:
287 287 # narrowed it down to a single revision
288 288 ui.write(_("The first %s revision is:\n") % transition)
289 289 displayer.show(repo[nodes[0]])
290 290 else:
291 291 # multiple possible revisions
292 292 ui.write(_("Due to skipped revisions, the first "
293 293 "%s revision could be any of:\n") % transition)
294 294 for n in nodes:
295 295 displayer.show(repo[n])
296 296
297 297 def check_state(state, interactive=True):
298 298 if not state['good'] or not state['bad']:
299 299 if (good or bad or skip or reset) and interactive:
300 300 return
301 301 if not state['good']:
302 302 raise util.Abort(_('cannot bisect (no known good revisions)'))
303 303 else:
304 304 raise util.Abort(_('cannot bisect (no known bad revisions)'))
305 305 return True
306 306
307 307 # backward compatibility
308 308 if rev in "good bad reset init".split():
309 309 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
310 310 cmd, rev, extra = rev, extra, None
311 311 if cmd == "good":
312 312 good = True
313 313 elif cmd == "bad":
314 314 bad = True
315 315 else:
316 316 reset = True
317 317 elif extra or good + bad + skip + reset + bool(command) > 1:
318 318 raise util.Abort(_('incompatible arguments'))
319 319
320 320 if reset:
321 321 p = repo.join("bisect.state")
322 322 if os.path.exists(p):
323 323 os.unlink(p)
324 324 return
325 325
326 326 state = hbisect.load_state(repo)
327 327
328 328 if command:
329 329 commandpath = util.find_exe(command)
330 330 changesets = 1
331 331 try:
332 332 while changesets:
333 333 # update state
334 334 status = os.spawnl(os.P_WAIT, commandpath, commandpath)
335 335 if status == 125:
336 336 transition = "skip"
337 337 elif status == 0:
338 338 transition = "good"
339 339 # status < 0 means process was killed
340 340 elif status == 127:
341 341 raise util.Abort(_("failed to execute %s") % command)
342 342 elif status < 0:
343 343 raise util.Abort(_("%s killed") % command)
344 344 else:
345 345 transition = "bad"
346 346 node = repo.lookup(rev or '.')
347 347 state[transition].append(node)
348 348 ui.note(_('Changeset %s: %s\n') % (short(node), transition))
349 349 check_state(state, interactive=False)
350 350 # bisect
351 351 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
352 352 # update to next check
353 353 cmdutil.bail_if_changed(repo)
354 354 hg.clean(repo, nodes[0], show_stats=False)
355 355 finally:
356 356 hbisect.save_state(repo, state)
357 357 return print_result(nodes, not status)
358 358
359 359 # update state
360 360 node = repo.lookup(rev or '.')
361 361 if good:
362 362 state['good'].append(node)
363 363 elif bad:
364 364 state['bad'].append(node)
365 365 elif skip:
366 366 state['skip'].append(node)
367 367
368 368 hbisect.save_state(repo, state)
369 369
370 370 if not check_state(state):
371 371 return
372 372
373 373 # actually bisect
374 374 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
375 375 if changesets == 0:
376 376 print_result(nodes, good)
377 377 else:
378 378 assert len(nodes) == 1 # only a single node can be tested next
379 379 node = nodes[0]
380 380 # compute the approximate number of remaining tests
381 381 tests, size = 0, 2
382 382 while size <= changesets:
383 383 tests, size = tests + 1, size * 2
384 384 rev = repo.changelog.rev(node)
385 385 ui.write(_("Testing changeset %s:%s "
386 386 "(%s changesets remaining, ~%s tests)\n")
387 387 % (rev, short(node), changesets, tests))
388 388 if not noupdate:
389 389 cmdutil.bail_if_changed(repo)
390 390 return hg.clean(repo, node)
391 391
392 392 def branch(ui, repo, label=None, **opts):
393 393 """set or show the current branch name
394 394
395 395 With no argument, show the current branch name. With one argument,
396 396 set the working directory branch name (the branch does not exist in
397 397 the repository until the next commit).
398 398
399 399 Unless --force is specified, branch will not let you set a
400 400 branch name that shadows an existing branch.
401 401
402 402 Use --clean to reset the working directory branch to that of the
403 403 parent of the working directory, negating a previous branch change.
404 404
405 405 Use the command 'hg update' to switch to an existing branch.
406 406 """
407 407
408 408 if opts.get('clean'):
409 409 label = repo[None].parents()[0].branch()
410 410 repo.dirstate.setbranch(label)
411 411 ui.status(_('reset working directory to branch %s\n') % label)
412 412 elif label:
413 413 if not opts.get('force') and label in repo.branchtags():
414 414 if label not in [p.branch() for p in repo.parents()]:
415 415 raise util.Abort(_('a branch of the same name already exists'
416 416 ' (use --force to override)'))
417 417 repo.dirstate.setbranch(util.fromlocal(label))
418 418 ui.status(_('marked working directory as branch %s\n') % label)
419 419 else:
420 420 ui.write("%s\n" % util.tolocal(repo.dirstate.branch()))
421 421
422 422 def branches(ui, repo, active=False):
423 423 """list repository named branches
424 424
425 425 List the repository's named branches, indicating which ones are
426 426 inactive. If active is specified, only show active branches.
427 427
428 428 A branch is considered active if it contains repository heads.
429 429
430 430 Use the command 'hg update' to switch to an existing branch.
431 431 """
432 432 hexfunc = ui.debugflag and hex or short
433 433 activebranches = [util.tolocal(repo[n].branch())
434 434 for n in repo.heads(closed=False)]
435 435 branches = util.sort([(tag in activebranches, repo.changelog.rev(node), tag)
436 436 for tag, node in repo.branchtags().items()])
437 437 branches.reverse()
438 438
439 439 for isactive, node, tag in branches:
440 440 if (not active) or isactive:
441 441 if ui.quiet:
442 442 ui.write("%s\n" % tag)
443 443 else:
444 444 hn = repo.lookup(node)
445 445 if isactive:
446 446 notice = ''
447 447 elif hn not in repo.branchheads(tag, closed=False):
448 448 notice = ' (closed)'
449 449 else:
450 450 notice = ' (inactive)'
451 451 rev = str(node).rjust(31 - util.colwidth(tag))
452 452 data = tag, rev, hexfunc(hn), notice
453 453 ui.write("%s %s:%s%s\n" % data)
454 454
455 455 def bundle(ui, repo, fname, dest=None, **opts):
456 456 """create a changegroup file
457 457
458 458 Generate a compressed changegroup file collecting changesets not
459 459 known to be in another repository.
460 460
461 461 If no destination repository is specified the destination is
462 462 assumed to have all the nodes specified by one or more --base
463 463 parameters. To create a bundle containing all changesets, use
464 464 --all (or --base null). To change the compression method applied,
465 465 use the -t option (by default, bundles are compressed using bz2).
466 466
467 467 The bundle file can then be transferred using conventional means and
468 468 applied to another repository with the unbundle or pull command.
469 469 This is useful when direct push and pull are not available or when
470 470 exporting an entire repository is undesirable.
471 471
472 472 Applying bundles preserves all changeset contents including
473 473 permissions, copy/rename information, and revision history.
474 474 """
475 475 revs = opts.get('rev') or None
476 476 if revs:
477 477 revs = [repo.lookup(rev) for rev in revs]
478 478 if opts.get('all'):
479 479 base = ['null']
480 480 else:
481 481 base = opts.get('base')
482 482 if base:
483 483 if dest:
484 484 raise util.Abort(_("--base is incompatible with specifiying "
485 485 "a destination"))
486 486 base = [repo.lookup(rev) for rev in base]
487 487 # create the right base
488 488 # XXX: nodesbetween / changegroup* should be "fixed" instead
489 489 o = []
490 490 has = {nullid: None}
491 491 for n in base:
492 492 has.update(repo.changelog.reachable(n))
493 493 if revs:
494 494 visit = list(revs)
495 495 else:
496 496 visit = repo.changelog.heads()
497 497 seen = {}
498 498 while visit:
499 499 n = visit.pop(0)
500 500 parents = [p for p in repo.changelog.parents(n) if p not in has]
501 501 if len(parents) == 0:
502 502 o.insert(0, n)
503 503 else:
504 504 for p in parents:
505 505 if p not in seen:
506 506 seen[p] = 1
507 507 visit.append(p)
508 508 else:
509 509 cmdutil.setremoteconfig(ui, opts)
510 510 dest, revs, checkout = hg.parseurl(
511 511 ui.expandpath(dest or 'default-push', dest or 'default'), revs)
512 512 other = hg.repository(ui, dest)
513 513 o = repo.findoutgoing(other, force=opts.get('force'))
514 514
515 515 if revs:
516 516 cg = repo.changegroupsubset(o, revs, 'bundle')
517 517 else:
518 518 cg = repo.changegroup(o, 'bundle')
519 519
520 520 bundletype = opts.get('type', 'bzip2').lower()
521 521 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
522 522 bundletype = btypes.get(bundletype)
523 523 if bundletype not in changegroup.bundletypes:
524 524 raise util.Abort(_('unknown bundle type specified with --type'))
525 525
526 526 changegroup.writebundle(cg, fname, bundletype)
527 527
528 528 def cat(ui, repo, file1, *pats, **opts):
529 529 """output the current or given revision of files
530 530
531 531 Print the specified files as they were at the given revision.
532 532 If no revision is given, the parent of the working directory is used,
533 533 or tip if no revision is checked out.
534 534
535 535 Output may be to a file, in which case the name of the file is
536 536 given using a format string. The formatting rules are the same as
537 537 for the export command, with the following additions:
538 538
539 539 %s basename of file being printed
540 540 %d dirname of file being printed, or '.' if in repo root
541 541 %p root-relative path name of file being printed
542 542 """
543 543 ctx = repo[opts.get('rev')]
544 544 err = 1
545 545 m = cmdutil.match(repo, (file1,) + pats, opts)
546 546 for abs in ctx.walk(m):
547 547 fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs)
548 548 data = ctx[abs].data()
549 549 if opts.get('decode'):
550 550 data = repo.wwritedata(abs, data)
551 551 fp.write(data)
552 552 err = 0
553 553 return err
554 554
555 555 def clone(ui, source, dest=None, **opts):
556 556 """make a copy of an existing repository
557 557
558 558 Create a copy of an existing repository in a new directory.
559 559
560 560 If no destination directory name is specified, it defaults to the
561 561 basename of the source.
562 562
563 563 The location of the source is added to the new repository's
564 564 .hg/hgrc file, as the default to be used for future pulls.
565 565
566 566 For efficiency, hardlinks are used for cloning whenever the source
567 567 and destination are on the same filesystem (note this applies only
568 568 to the repository data, not to the checked out files). Some
569 569 filesystems, such as AFS, implement hardlinking incorrectly, but
570 570 do not report errors. In these cases, use the --pull option to
571 571 avoid hardlinking.
572 572
573 573 In some cases, you can clone repositories and checked out files
574 574 using full hardlinks with
575 575
576 576 $ cp -al REPO REPOCLONE
577 577
578 578 This is the fastest way to clone, but it is not always safe. The
579 579 operation is not atomic (making sure REPO is not modified during
580 580 the operation is up to you) and you have to make sure your editor
581 581 breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
582 582 this is not compatible with certain extensions that place their
583 583 metadata under the .hg directory, such as mq.
584 584
585 585 If you use the -r option to clone up to a specific revision, no
586 586 subsequent revisions will be present in the cloned repository.
587 587 This option implies --pull, even on local repositories.
588 588
589 589 If the -U option is used, the new clone will contain only a repository
590 590 (.hg) and no working copy (the working copy parent is the null revision).
591 591
592 592 See 'hg help urls' for valid source format details.
593 593
594 594 It is possible to specify an ssh:// URL as the destination, but no
595 595 .hg/hgrc and working directory will be created on the remote side.
596 596 Look at the help text for urls for important details about ssh:// URLs.
597 597 """
598 598 cmdutil.setremoteconfig(ui, opts)
599 599 hg.clone(ui, source, dest,
600 600 pull=opts.get('pull'),
601 601 stream=opts.get('uncompressed'),
602 602 rev=opts.get('rev'),
603 603 update=not opts.get('noupdate'))
604 604
605 605 def commit(ui, repo, *pats, **opts):
606 606 """commit the specified files or all outstanding changes
607 607
608 608 Commit changes to the given files into the repository.
609 609
610 610 If a list of files is omitted, all changes reported by "hg status"
611 611 will be committed.
612 612
613 613 If you are committing the result of a merge, do not provide any
614 614 file names or -I/-X filters.
615 615
616 616 If no commit message is specified, the configured editor is started to
617 617 prompt you for a message.
618 618
619 619 See 'hg help dates' for a list of formats valid for -d/--date.
620 620 """
621 621 extra = {}
622 622 if opts.get('close_branch'):
623 623 extra['close'] = 1
624 624 def commitfunc(ui, repo, message, match, opts):
625 625 return repo.commit(match.files(), message, opts.get('user'),
626 626 opts.get('date'), match, force_editor=opts.get('force_editor'),
627 627 extra=extra)
628 628
629 629 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
630 630 if not node:
631 631 return
632 632 cl = repo.changelog
633 633 rev = cl.rev(node)
634 634 parents = cl.parentrevs(rev)
635 635 if rev - 1 in parents:
636 636 # one of the parents was the old tip
637 637 pass
638 638 elif (parents == (nullrev, nullrev) or
639 639 len(cl.heads(cl.node(parents[0]))) > 1 and
640 640 (parents[1] == nullrev or len(cl.heads(cl.node(parents[1]))) > 1)):
641 641 ui.status(_('created new head\n'))
642 642
643 643 if ui.debugflag:
644 644 ui.write(_('committed changeset %d:%s\n') % (rev,hex(node)))
645 645 elif ui.verbose:
646 646 ui.write(_('committed changeset %d:%s\n') % (rev,short(node)))
647 647
648 648 def copy(ui, repo, *pats, **opts):
649 649 """mark files as copied for the next commit
650 650
651 651 Mark dest as having copies of source files. If dest is a
652 652 directory, copies are put in that directory. If dest is a file,
653 653 the source must be a single file.
654 654
655 655 By default, this command copies the contents of files as they
656 656 stand in the working directory. If invoked with --after, the
657 657 operation is recorded, but no copying is performed.
658 658
659 659 This command takes effect with the next commit. To undo a copy
660 660 before that, see hg revert.
661 661 """
662 662 wlock = repo.wlock(False)
663 663 try:
664 664 return cmdutil.copy(ui, repo, pats, opts)
665 665 finally:
666 666 del wlock
667 667
668 668 def debugancestor(ui, repo, *args):
669 669 """find the ancestor revision of two revisions in a given index"""
670 670 if len(args) == 3:
671 671 index, rev1, rev2 = args
672 672 r = revlog.revlog(util.opener(os.getcwd(), audit=False), index)
673 673 lookup = r.lookup
674 674 elif len(args) == 2:
675 675 if not repo:
676 676 raise util.Abort(_("There is no Mercurial repository here "
677 677 "(.hg not found)"))
678 678 rev1, rev2 = args
679 679 r = repo.changelog
680 680 lookup = repo.lookup
681 681 else:
682 682 raise util.Abort(_('either two or three arguments required'))
683 683 a = r.ancestor(lookup(rev1), lookup(rev2))
684 684 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
685 685
686 686 def debugcomplete(ui, cmd='', **opts):
687 687 """returns the completion list associated with the given command"""
688 688
689 689 if opts.get('options'):
690 690 options = []
691 691 otables = [globalopts]
692 692 if cmd:
693 693 aliases, entry = cmdutil.findcmd(cmd, table, False)
694 694 otables.append(entry[1])
695 695 for t in otables:
696 696 for o in t:
697 697 if o[0]:
698 698 options.append('-%s' % o[0])
699 699 options.append('--%s' % o[1])
700 700 ui.write("%s\n" % "\n".join(options))
701 701 return
702 702
703 703 cmdlist = cmdutil.findpossible(cmd, table)
704 704 if ui.verbose:
705 705 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
706 706 ui.write("%s\n" % "\n".join(util.sort(cmdlist)))
707 707
708 708 def debugfsinfo(ui, path = "."):
709 709 file('.debugfsinfo', 'w').write('')
710 710 ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
711 711 ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
712 712 ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
713 713 and 'yes' or 'no'))
714 714 os.unlink('.debugfsinfo')
715 715
716 716 def debugrebuildstate(ui, repo, rev="tip"):
717 717 """rebuild the dirstate as it would look like for the given revision"""
718 718 ctx = repo[rev]
719 719 wlock = repo.wlock()
720 720 try:
721 721 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
722 722 finally:
723 723 del wlock
724 724
725 725 def debugcheckstate(ui, repo):
726 726 """validate the correctness of the current dirstate"""
727 727 parent1, parent2 = repo.dirstate.parents()
728 728 m1 = repo[parent1].manifest()
729 729 m2 = repo[parent2].manifest()
730 730 errors = 0
731 731 for f in repo.dirstate:
732 732 state = repo.dirstate[f]
733 733 if state in "nr" and f not in m1:
734 734 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
735 735 errors += 1
736 736 if state in "a" and f in m1:
737 737 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
738 738 errors += 1
739 739 if state in "m" and f not in m1 and f not in m2:
740 740 ui.warn(_("%s in state %s, but not in either manifest\n") %
741 741 (f, state))
742 742 errors += 1
743 743 for f in m1:
744 744 state = repo.dirstate[f]
745 745 if state not in "nrm":
746 746 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
747 747 errors += 1
748 748 if errors:
749 749 error = _(".hg/dirstate inconsistent with current parent's manifest")
750 750 raise util.Abort(error)
751 751
752 752 def showconfig(ui, repo, *values, **opts):
753 753 """show combined config settings from all hgrc files
754 754
755 755 With no args, print names and values of all config items.
756 756
757 757 With one arg of the form section.name, print just the value of
758 758 that config item.
759 759
760 760 With multiple args, print names and values of all config items
761 761 with matching section names."""
762 762
763 763 untrusted = bool(opts.get('untrusted'))
764 764 if values:
765 765 if len([v for v in values if '.' in v]) > 1:
766 766 raise util.Abort(_('only one config item permitted'))
767 767 for section, name, value in ui.walkconfig(untrusted=untrusted):
768 768 sectname = section + '.' + name
769 769 if values:
770 770 for v in values:
771 771 if v == section:
772 772 ui.write('%s=%s\n' % (sectname, value))
773 773 elif v == sectname:
774 774 ui.write(value, '\n')
775 775 else:
776 776 ui.write('%s=%s\n' % (sectname, value))
777 777
778 778 def debugsetparents(ui, repo, rev1, rev2=None):
779 779 """manually set the parents of the current working directory
780 780
781 781 This is useful for writing repository conversion tools, but should
782 782 be used with care.
783 783 """
784 784
785 785 if not rev2:
786 786 rev2 = hex(nullid)
787 787
788 788 wlock = repo.wlock()
789 789 try:
790 790 repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
791 791 finally:
792 792 del wlock
793 793
794 794 def debugstate(ui, repo, nodates=None):
795 795 """show the contents of the current dirstate"""
796 796 timestr = ""
797 797 showdate = not nodates
798 798 for file_, ent in util.sort(repo.dirstate._map.iteritems()):
799 799 if showdate:
800 800 if ent[3] == -1:
801 801 # Pad or slice to locale representation
802 802 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(0)))
803 803 timestr = 'unset'
804 804 timestr = timestr[:locale_len] + ' '*(locale_len - len(timestr))
805 805 else:
806 806 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(ent[3]))
807 807 if ent[1] & 020000:
808 808 mode = 'lnk'
809 809 else:
810 810 mode = '%3o' % (ent[1] & 0777)
811 811 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
812 812 for f in repo.dirstate.copies():
813 813 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
814 814
815 815 def debugdata(ui, file_, rev):
816 816 """dump the contents of a data file revision"""
817 817 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i")
818 818 try:
819 819 ui.write(r.revision(r.lookup(rev)))
820 820 except KeyError:
821 821 raise util.Abort(_('invalid revision identifier %s') % rev)
822 822
823 823 def debugdate(ui, date, range=None, **opts):
824 824 """parse and display a date"""
825 825 if opts["extended"]:
826 826 d = util.parsedate(date, util.extendeddateformats)
827 827 else:
828 828 d = util.parsedate(date)
829 829 ui.write("internal: %s %s\n" % d)
830 830 ui.write("standard: %s\n" % util.datestr(d))
831 831 if range:
832 832 m = util.matchdate(range)
833 833 ui.write("match: %s\n" % m(d[0]))
834 834
835 835 def debugindex(ui, file_):
836 836 """dump the contents of an index file"""
837 837 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
838 838 ui.write(" rev offset length base linkrev"
839 839 " nodeid p1 p2\n")
840 840 for i in r:
841 841 node = r.node(i)
842 842 try:
843 843 pp = r.parents(node)
844 844 except:
845 845 pp = [nullid, nullid]
846 846 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
847 847 i, r.start(i), r.length(i), r.base(i), r.linkrev(i),
848 848 short(node), short(pp[0]), short(pp[1])))
849 849
850 850 def debugindexdot(ui, file_):
851 851 """dump an index DAG as a .dot file"""
852 852 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
853 853 ui.write("digraph G {\n")
854 854 for i in r:
855 855 node = r.node(i)
856 856 pp = r.parents(node)
857 857 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
858 858 if pp[1] != nullid:
859 859 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
860 860 ui.write("}\n")
861 861
862 862 def debuginstall(ui):
863 863 '''test Mercurial installation'''
864 864
865 865 def writetemp(contents):
866 866 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
867 867 f = os.fdopen(fd, "wb")
868 868 f.write(contents)
869 869 f.close()
870 870 return name
871 871
872 872 problems = 0
873 873
874 874 # encoding
875 875 ui.status(_("Checking encoding (%s)...\n") % util._encoding)
876 876 try:
877 877 util.fromlocal("test")
878 878 except util.Abort, inst:
879 879 ui.write(" %s\n" % inst)
880 880 ui.write(_(" (check that your locale is properly set)\n"))
881 881 problems += 1
882 882
883 883 # compiled modules
884 884 ui.status(_("Checking extensions...\n"))
885 885 try:
886 886 import bdiff, mpatch, base85
887 887 except Exception, inst:
888 888 ui.write(" %s\n" % inst)
889 889 ui.write(_(" One or more extensions could not be found"))
890 890 ui.write(_(" (check that you compiled the extensions)\n"))
891 891 problems += 1
892 892
893 893 # templates
894 894 ui.status(_("Checking templates...\n"))
895 895 try:
896 896 import templater
897 t = templater.templater(templater.templatepath("map-cmdline.default"))
897 templater.templater(templater.templatepath("map-cmdline.default"))
898 898 except Exception, inst:
899 899 ui.write(" %s\n" % inst)
900 900 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
901 901 problems += 1
902 902
903 903 # patch
904 904 ui.status(_("Checking patch...\n"))
905 905 patchproblems = 0
906 906 a = "1\n2\n3\n4\n"
907 907 b = "1\n2\n3\ninsert\n4\n"
908 908 fa = writetemp(a)
909 909 d = mdiff.unidiff(a, None, b, None, os.path.basename(fa),
910 910 os.path.basename(fa))
911 911 fd = writetemp(d)
912 912
913 913 files = {}
914 914 try:
915 915 patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files)
916 916 except util.Abort, e:
917 917 ui.write(_(" patch call failed:\n"))
918 918 ui.write(" " + str(e) + "\n")
919 919 patchproblems += 1
920 920 else:
921 921 if list(files) != [os.path.basename(fa)]:
922 922 ui.write(_(" unexpected patch output!\n"))
923 923 patchproblems += 1
924 924 a = file(fa).read()
925 925 if a != b:
926 926 ui.write(_(" patch test failed!\n"))
927 927 patchproblems += 1
928 928
929 929 if patchproblems:
930 930 if ui.config('ui', 'patch'):
931 931 ui.write(_(" (Current patch tool may be incompatible with patch,"
932 932 " or misconfigured. Please check your .hgrc file)\n"))
933 933 else:
934 934 ui.write(_(" Internal patcher failure, please report this error"
935 935 " to http://www.selenic.com/mercurial/bts\n"))
936 936 problems += patchproblems
937 937
938 938 os.unlink(fa)
939 939 os.unlink(fd)
940 940
941 941 # editor
942 942 ui.status(_("Checking commit editor...\n"))
943 943 editor = ui.geteditor()
944 944 cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0])
945 945 if not cmdpath:
946 946 if editor == 'vi':
947 947 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
948 948 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
949 949 else:
950 950 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
951 951 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
952 952 problems += 1
953 953
954 954 # check username
955 955 ui.status(_("Checking username...\n"))
956 956 user = os.environ.get("HGUSER")
957 957 if user is None:
958 958 user = ui.config("ui", "username")
959 959 if user is None:
960 960 user = os.environ.get("EMAIL")
961 961 if not user:
962 962 ui.warn(" ")
963 963 ui.username()
964 964 ui.write(_(" (specify a username in your .hgrc file)\n"))
965 965
966 966 if not problems:
967 967 ui.status(_("No problems detected\n"))
968 968 else:
969 969 ui.write(_("%s problems detected,"
970 970 " please check your install!\n") % problems)
971 971
972 972 return problems
973 973
974 974 def debugrename(ui, repo, file1, *pats, **opts):
975 975 """dump rename information"""
976 976
977 977 ctx = repo[opts.get('rev')]
978 978 m = cmdutil.match(repo, (file1,) + pats, opts)
979 979 for abs in ctx.walk(m):
980 980 fctx = ctx[abs]
981 981 o = fctx.filelog().renamed(fctx.filenode())
982 982 rel = m.rel(abs)
983 983 if o:
984 984 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
985 985 else:
986 986 ui.write(_("%s not renamed\n") % rel)
987 987
988 988 def debugwalk(ui, repo, *pats, **opts):
989 989 """show how files match on given patterns"""
990 990 m = cmdutil.match(repo, pats, opts)
991 991 items = list(repo.walk(m))
992 992 if not items:
993 993 return
994 994 fmt = 'f %%-%ds %%-%ds %%s' % (
995 995 max([len(abs) for abs in items]),
996 996 max([len(m.rel(abs)) for abs in items]))
997 997 for abs in items:
998 998 line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
999 999 ui.write("%s\n" % line.rstrip())
1000 1000
1001 1001 def diff(ui, repo, *pats, **opts):
1002 1002 """diff repository (or selected files)
1003 1003
1004 1004 Show differences between revisions for the specified files.
1005 1005
1006 1006 Differences between files are shown using the unified diff format.
1007 1007
1008 1008 NOTE: diff may generate unexpected results for merges, as it will
1009 1009 default to comparing against the working directory's first parent
1010 1010 changeset if no revisions are specified.
1011 1011
1012 1012 When two revision arguments are given, then changes are shown
1013 1013 between those revisions. If only one revision is specified then
1014 1014 that revision is compared to the working directory, and, when no
1015 1015 revisions are specified, the working directory files are compared
1016 1016 to its parent.
1017 1017
1018 1018 Without the -a option, diff will avoid generating diffs of files
1019 1019 it detects as binary. With -a, diff will generate a diff anyway,
1020 1020 probably with undesirable results.
1021 1021
1022 1022 Use the --git option to generate diffs in the git extended diff
1023 1023 format. For more information, read hg help diffs.
1024 1024 """
1025 1025
1026 1026 revs = opts.get('rev')
1027 1027 change = opts.get('change')
1028 1028
1029 1029 if revs and change:
1030 1030 msg = _('cannot specify --rev and --change at the same time')
1031 1031 raise util.Abort(msg)
1032 1032 elif change:
1033 1033 node2 = repo.lookup(change)
1034 1034 node1 = repo[node2].parents()[0].node()
1035 1035 else:
1036 1036 node1, node2 = cmdutil.revpair(repo, revs)
1037 1037
1038 1038 m = cmdutil.match(repo, pats, opts)
1039 1039 it = patch.diff(repo, node1, node2, match=m, opts=patch.diffopts(ui, opts))
1040 1040 for chunk in it:
1041 1041 repo.ui.write(chunk)
1042 1042
1043 1043 def export(ui, repo, *changesets, **opts):
1044 1044 """dump the header and diffs for one or more changesets
1045 1045
1046 1046 Print the changeset header and diffs for one or more revisions.
1047 1047
1048 1048 The information shown in the changeset header is: author,
1049 1049 changeset hash, parent(s) and commit comment.
1050 1050
1051 1051 NOTE: export may generate unexpected diff output for merge changesets,
1052 1052 as it will compare the merge changeset against its first parent only.
1053 1053
1054 1054 Output may be to a file, in which case the name of the file is
1055 1055 given using a format string. The formatting rules are as follows:
1056 1056
1057 1057 %% literal "%" character
1058 1058 %H changeset hash (40 bytes of hexadecimal)
1059 1059 %N number of patches being generated
1060 1060 %R changeset revision number
1061 1061 %b basename of the exporting repository
1062 1062 %h short-form changeset hash (12 bytes of hexadecimal)
1063 1063 %n zero-padded sequence number, starting at 1
1064 1064 %r zero-padded changeset revision number
1065 1065
1066 1066 Without the -a option, export will avoid generating diffs of files
1067 1067 it detects as binary. With -a, export will generate a diff anyway,
1068 1068 probably with undesirable results.
1069 1069
1070 1070 Use the --git option to generate diffs in the git extended diff
1071 1071 format. Read the diffs help topic for more information.
1072 1072
1073 1073 With the --switch-parent option, the diff will be against the second
1074 1074 parent. It can be useful to review a merge.
1075 1075 """
1076 1076 if not changesets:
1077 1077 raise util.Abort(_("export requires at least one changeset"))
1078 1078 revs = cmdutil.revrange(repo, changesets)
1079 1079 if len(revs) > 1:
1080 1080 ui.note(_('exporting patches:\n'))
1081 1081 else:
1082 1082 ui.note(_('exporting patch:\n'))
1083 1083 patch.export(repo, revs, template=opts.get('output'),
1084 1084 switch_parent=opts.get('switch_parent'),
1085 1085 opts=patch.diffopts(ui, opts))
1086 1086
1087 1087 def grep(ui, repo, pattern, *pats, **opts):
1088 1088 """search for a pattern in specified files and revisions
1089 1089
1090 1090 Search revisions of files for a regular expression.
1091 1091
1092 1092 This command behaves differently than Unix grep. It only accepts
1093 1093 Python/Perl regexps. It searches repository history, not the
1094 1094 working directory. It always prints the revision number in which
1095 1095 a match appears.
1096 1096
1097 1097 By default, grep only prints output for the first revision of a
1098 1098 file in which it finds a match. To get it to print every revision
1099 1099 that contains a change in match status ("-" for a match that
1100 1100 becomes a non-match, or "+" for a non-match that becomes a match),
1101 1101 use the --all flag.
1102 1102 """
1103 1103 reflags = 0
1104 1104 if opts.get('ignore_case'):
1105 1105 reflags |= re.I
1106 1106 try:
1107 1107 regexp = re.compile(pattern, reflags)
1108 1108 except Exception, inst:
1109 1109 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
1110 1110 return None
1111 1111 sep, eol = ':', '\n'
1112 1112 if opts.get('print0'):
1113 1113 sep = eol = '\0'
1114 1114
1115 1115 fcache = {}
1116 1116 def getfile(fn):
1117 1117 if fn not in fcache:
1118 1118 fcache[fn] = repo.file(fn)
1119 1119 return fcache[fn]
1120 1120
1121 1121 def matchlines(body):
1122 1122 begin = 0
1123 1123 linenum = 0
1124 1124 while True:
1125 1125 match = regexp.search(body, begin)
1126 1126 if not match:
1127 1127 break
1128 1128 mstart, mend = match.span()
1129 1129 linenum += body.count('\n', begin, mstart) + 1
1130 1130 lstart = body.rfind('\n', begin, mstart) + 1 or begin
1131 1131 begin = body.find('\n', mend) + 1 or len(body)
1132 1132 lend = begin - 1
1133 1133 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
1134 1134
1135 1135 class linestate(object):
1136 1136 def __init__(self, line, linenum, colstart, colend):
1137 1137 self.line = line
1138 1138 self.linenum = linenum
1139 1139 self.colstart = colstart
1140 1140 self.colend = colend
1141 1141
1142 1142 def __hash__(self):
1143 1143 return hash((self.linenum, self.line))
1144 1144
1145 1145 def __eq__(self, other):
1146 1146 return self.line == other.line
1147 1147
1148 1148 matches = {}
1149 1149 copies = {}
1150 1150 def grepbody(fn, rev, body):
1151 1151 matches[rev].setdefault(fn, [])
1152 1152 m = matches[rev][fn]
1153 1153 for lnum, cstart, cend, line in matchlines(body):
1154 1154 s = linestate(line, lnum, cstart, cend)
1155 1155 m.append(s)
1156 1156
1157 1157 def difflinestates(a, b):
1158 1158 sm = difflib.SequenceMatcher(None, a, b)
1159 1159 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1160 1160 if tag == 'insert':
1161 1161 for i in xrange(blo, bhi):
1162 1162 yield ('+', b[i])
1163 1163 elif tag == 'delete':
1164 1164 for i in xrange(alo, ahi):
1165 1165 yield ('-', a[i])
1166 1166 elif tag == 'replace':
1167 1167 for i in xrange(alo, ahi):
1168 1168 yield ('-', a[i])
1169 1169 for i in xrange(blo, bhi):
1170 1170 yield ('+', b[i])
1171 1171
1172 1172 prev = {}
1173 1173 def display(fn, rev, states, prevstates):
1174 1174 datefunc = ui.quiet and util.shortdate or util.datestr
1175 1175 found = False
1176 1176 filerevmatches = {}
1177 1177 r = prev.get(fn, -1)
1178 1178 if opts.get('all'):
1179 1179 iter = difflinestates(states, prevstates)
1180 1180 else:
1181 1181 iter = [('', l) for l in prevstates]
1182 1182 for change, l in iter:
1183 1183 cols = [fn, str(r)]
1184 1184 if opts.get('line_number'):
1185 1185 cols.append(str(l.linenum))
1186 1186 if opts.get('all'):
1187 1187 cols.append(change)
1188 1188 if opts.get('user'):
1189 1189 cols.append(ui.shortuser(get(r)[1]))
1190 1190 if opts.get('date'):
1191 1191 cols.append(datefunc(get(r)[2]))
1192 1192 if opts.get('files_with_matches'):
1193 1193 c = (fn, r)
1194 1194 if c in filerevmatches:
1195 1195 continue
1196 1196 filerevmatches[c] = 1
1197 1197 else:
1198 1198 cols.append(l.line)
1199 1199 ui.write(sep.join(cols), eol)
1200 1200 found = True
1201 1201 return found
1202 1202
1203 1203 fstate = {}
1204 1204 skip = {}
1205 1205 get = util.cachefunc(lambda r: repo[r].changeset())
1206 1206 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1207 1207 found = False
1208 1208 follow = opts.get('follow')
1209 1209 for st, rev, fns in changeiter:
1210 1210 if st == 'window':
1211 1211 matches.clear()
1212 1212 elif st == 'add':
1213 1213 ctx = repo[rev]
1214 1214 matches[rev] = {}
1215 1215 for fn in fns:
1216 1216 if fn in skip:
1217 1217 continue
1218 1218 try:
1219 1219 grepbody(fn, rev, getfile(fn).read(ctx.filenode(fn)))
1220 1220 fstate.setdefault(fn, [])
1221 1221 if follow:
1222 1222 copied = getfile(fn).renamed(ctx.filenode(fn))
1223 1223 if copied:
1224 1224 copies.setdefault(rev, {})[fn] = copied[0]
1225 1225 except error.LookupError:
1226 1226 pass
1227 1227 elif st == 'iter':
1228 1228 for fn, m in util.sort(matches[rev].items()):
1229 1229 copy = copies.get(rev, {}).get(fn)
1230 1230 if fn in skip:
1231 1231 if copy:
1232 1232 skip[copy] = True
1233 1233 continue
1234 1234 if fn in prev or fstate[fn]:
1235 1235 r = display(fn, rev, m, fstate[fn])
1236 1236 found = found or r
1237 1237 if r and not opts.get('all'):
1238 1238 skip[fn] = True
1239 1239 if copy:
1240 1240 skip[copy] = True
1241 1241 fstate[fn] = m
1242 1242 if copy:
1243 1243 fstate[copy] = m
1244 1244 prev[fn] = rev
1245 1245
1246 1246 for fn, state in util.sort(fstate.items()):
1247 1247 if fn in skip:
1248 1248 continue
1249 1249 if fn not in copies.get(prev[fn], {}):
1250 1250 found = display(fn, rev, {}, state) or found
1251 1251 return (not found and 1) or 0
1252 1252
1253 1253 def heads(ui, repo, *branchrevs, **opts):
1254 1254 """show current repository heads or show branch heads
1255 1255
1256 1256 With no arguments, show all repository head changesets.
1257 1257
1258 1258 If branch or revisions names are given this will show the heads of
1259 1259 the specified branches or the branches those revisions are tagged
1260 1260 with.
1261 1261
1262 1262 Repository "heads" are changesets that don't have child
1263 1263 changesets. They are where development generally takes place and
1264 1264 are the usual targets for update and merge operations.
1265 1265
1266 1266 Branch heads are changesets that have a given branch tag, but have
1267 1267 no child changesets with that tag. They are usually where
1268 1268 development on the given branch takes place.
1269 1269 """
1270 1270 if opts.get('rev'):
1271 1271 start = repo.lookup(opts['rev'])
1272 1272 else:
1273 1273 start = None
1274 1274 closed = not opts.get('active')
1275 1275 if not branchrevs:
1276 1276 # Assume we're looking repo-wide heads if no revs were specified.
1277 1277 heads = repo.heads(start, closed=closed)
1278 1278 else:
1279 1279 heads = []
1280 1280 visitedset = util.set()
1281 1281 for branchrev in branchrevs:
1282 1282 branch = repo[branchrev].branch()
1283 1283 if branch in visitedset:
1284 1284 continue
1285 1285 visitedset.add(branch)
1286 1286 bheads = repo.branchheads(branch, start, closed=closed)
1287 1287 if not bheads:
1288 1288 if branch != branchrev:
1289 1289 ui.warn(_("no changes on branch %s containing %s are "
1290 1290 "reachable from %s\n")
1291 1291 % (branch, branchrev, opts.get('rev')))
1292 1292 else:
1293 1293 ui.warn(_("no changes on branch %s are reachable from %s\n")
1294 1294 % (branch, opts.get('rev')))
1295 1295 heads.extend(bheads)
1296 1296 if not heads:
1297 1297 return 1
1298 1298 displayer = cmdutil.show_changeset(ui, repo, opts)
1299 1299 for n in heads:
1300 1300 displayer.show(repo[n])
1301 1301
1302 1302 def help_(ui, name=None, with_version=False):
1303 1303 """show help for a given topic or a help overview
1304 1304
1305 1305 With no arguments, print a list of commands and short help.
1306 1306
1307 1307 Given a topic, extension, or command name, print help for that topic."""
1308 1308 option_lists = []
1309 1309
1310 1310 def addglobalopts(aliases):
1311 1311 if ui.verbose:
1312 1312 option_lists.append((_("global options:"), globalopts))
1313 1313 if name == 'shortlist':
1314 1314 option_lists.append((_('use "hg help" for the full list '
1315 1315 'of commands'), ()))
1316 1316 else:
1317 1317 if name == 'shortlist':
1318 1318 msg = _('use "hg help" for the full list of commands '
1319 1319 'or "hg -v" for details')
1320 1320 elif aliases:
1321 1321 msg = _('use "hg -v help%s" to show aliases and '
1322 1322 'global options') % (name and " " + name or "")
1323 1323 else:
1324 1324 msg = _('use "hg -v help %s" to show global options') % name
1325 1325 option_lists.append((msg, ()))
1326 1326
1327 1327 def helpcmd(name):
1328 1328 if with_version:
1329 1329 version_(ui)
1330 1330 ui.write('\n')
1331 1331
1332 1332 try:
1333 1333 aliases, i = cmdutil.findcmd(name, table, False)
1334 1334 except error.AmbiguousCommand, inst:
1335 1335 select = lambda c: c.lstrip('^').startswith(inst.args[0])
1336 1336 helplist(_('list of commands:\n\n'), select)
1337 1337 return
1338 1338
1339 1339 # synopsis
1340 1340 if len(i) > 2:
1341 1341 if i[2].startswith('hg'):
1342 1342 ui.write("%s\n" % i[2])
1343 1343 else:
1344 1344 ui.write('hg %s %s\n' % (aliases[0], i[2]))
1345 1345 else:
1346 1346 ui.write('hg %s\n' % aliases[0])
1347 1347
1348 1348 # aliases
1349 1349 if not ui.quiet and len(aliases) > 1:
1350 1350 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
1351 1351
1352 1352 # description
1353 1353 doc = gettext(i[0].__doc__)
1354 1354 if not doc:
1355 1355 doc = _("(no help text available)")
1356 1356 if ui.quiet:
1357 1357 doc = doc.splitlines(0)[0]
1358 1358 ui.write("\n%s\n" % doc.rstrip())
1359 1359
1360 1360 if not ui.quiet:
1361 1361 # options
1362 1362 if i[1]:
1363 1363 option_lists.append((_("options:\n"), i[1]))
1364 1364
1365 1365 addglobalopts(False)
1366 1366
1367 1367 def helplist(header, select=None):
1368 1368 h = {}
1369 1369 cmds = {}
1370 1370 for c, e in table.iteritems():
1371 1371 f = c.split("|", 1)[0]
1372 1372 if select and not select(f):
1373 1373 continue
1374 1374 if (not select and name != 'shortlist' and
1375 1375 e[0].__module__ != __name__):
1376 1376 continue
1377 1377 if name == "shortlist" and not f.startswith("^"):
1378 1378 continue
1379 1379 f = f.lstrip("^")
1380 1380 if not ui.debugflag and f.startswith("debug"):
1381 1381 continue
1382 1382 doc = gettext(e[0].__doc__)
1383 1383 if not doc:
1384 1384 doc = _("(no help text available)")
1385 1385 h[f] = doc.splitlines(0)[0].rstrip()
1386 1386 cmds[f] = c.lstrip("^")
1387 1387
1388 1388 if not h:
1389 1389 ui.status(_('no commands defined\n'))
1390 1390 return
1391 1391
1392 1392 ui.status(header)
1393 1393 fns = util.sort(h)
1394 1394 m = max(map(len, fns))
1395 1395 for f in fns:
1396 1396 if ui.verbose:
1397 1397 commands = cmds[f].replace("|",", ")
1398 1398 ui.write(" %s:\n %s\n"%(commands, h[f]))
1399 1399 else:
1400 1400 ui.write(' %-*s %s\n' % (m, f, h[f]))
1401 1401
1402 1402 exts = list(extensions.extensions())
1403 1403 if exts and name != 'shortlist':
1404 1404 ui.write(_('\nenabled extensions:\n\n'))
1405 1405 maxlength = 0
1406 1406 exthelps = []
1407 1407 for ename, ext in exts:
1408 1408 doc = (ext.__doc__ or _('(no help text available)'))
1409 1409 ename = ename.split('.')[-1]
1410 1410 maxlength = max(len(ename), maxlength)
1411 1411 exthelps.append((ename, doc.splitlines(0)[0].strip()))
1412 1412 for ename, text in exthelps:
1413 1413 ui.write(_(' %s %s\n') % (ename.ljust(maxlength), text))
1414 1414
1415 1415 if not ui.quiet:
1416 1416 addglobalopts(True)
1417 1417
1418 1418 def helptopic(name):
1419 1419 for names, header, doc in help.helptable:
1420 1420 if name in names:
1421 1421 break
1422 1422 else:
1423 1423 raise error.UnknownCommand(name)
1424 1424
1425 1425 # description
1426 1426 if not doc:
1427 1427 doc = _("(no help text available)")
1428 1428 if callable(doc):
1429 1429 doc = doc()
1430 1430
1431 1431 ui.write("%s\n" % header)
1432 1432 ui.write("%s\n" % doc.rstrip())
1433 1433
1434 1434 def helpext(name):
1435 1435 try:
1436 1436 mod = extensions.find(name)
1437 1437 except KeyError:
1438 1438 raise error.UnknownCommand(name)
1439 1439
1440 1440 doc = gettext(mod.__doc__) or _('no help text available')
1441 1441 doc = doc.splitlines(0)
1442 1442 ui.write(_('%s extension - %s\n') % (name.split('.')[-1], doc[0]))
1443 1443 for d in doc[1:]:
1444 1444 ui.write(d, '\n')
1445 1445
1446 1446 ui.status('\n')
1447 1447
1448 1448 try:
1449 1449 ct = mod.cmdtable
1450 1450 except AttributeError:
1451 1451 ct = {}
1452 1452
1453 1453 modcmds = dict.fromkeys([c.split('|', 1)[0] for c in ct])
1454 1454 helplist(_('list of commands:\n\n'), modcmds.has_key)
1455 1455
1456 1456 if name and name != 'shortlist':
1457 1457 i = None
1458 1458 for f in (helptopic, helpcmd, helpext):
1459 1459 try:
1460 1460 f(name)
1461 1461 i = None
1462 1462 break
1463 1463 except error.UnknownCommand, inst:
1464 1464 i = inst
1465 1465 if i:
1466 1466 raise i
1467 1467
1468 1468 else:
1469 1469 # program name
1470 1470 if ui.verbose or with_version:
1471 1471 version_(ui)
1472 1472 else:
1473 1473 ui.status(_("Mercurial Distributed SCM\n"))
1474 1474 ui.status('\n')
1475 1475
1476 1476 # list of commands
1477 1477 if name == "shortlist":
1478 1478 header = _('basic commands:\n\n')
1479 1479 else:
1480 1480 header = _('list of commands:\n\n')
1481 1481
1482 1482 helplist(header)
1483 1483
1484 1484 # list all option lists
1485 1485 opt_output = []
1486 1486 for title, options in option_lists:
1487 1487 opt_output.append(("\n%s" % title, None))
1488 1488 for shortopt, longopt, default, desc in options:
1489 1489 if "DEPRECATED" in desc and not ui.verbose: continue
1490 1490 opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
1491 1491 longopt and " --%s" % longopt),
1492 1492 "%s%s" % (desc,
1493 1493 default
1494 1494 and _(" (default: %s)") % default
1495 1495 or "")))
1496 1496
1497 1497 if not name:
1498 1498 ui.write(_("\nadditional help topics:\n\n"))
1499 1499 topics = []
1500 1500 for names, header, doc in help.helptable:
1501 1501 names = [(-len(name), name) for name in names]
1502 1502 names.sort()
1503 1503 topics.append((names[0][1], header))
1504 1504 topics_len = max([len(s[0]) for s in topics])
1505 1505 for t, desc in topics:
1506 1506 ui.write(" %-*s %s\n" % (topics_len, t, desc))
1507 1507
1508 1508 if opt_output:
1509 1509 opts_len = max([len(line[0]) for line in opt_output if line[1]] or [0])
1510 1510 for first, second in opt_output:
1511 1511 if second:
1512 1512 ui.write(" %-*s %s\n" % (opts_len, first, second))
1513 1513 else:
1514 1514 ui.write("%s\n" % first)
1515 1515
1516 1516 def identify(ui, repo, source=None,
1517 1517 rev=None, num=None, id=None, branch=None, tags=None):
1518 1518 """identify the working copy or specified revision
1519 1519
1520 1520 With no revision, print a summary of the current state of the repo.
1521 1521
1522 1522 With a path, do a lookup in another repository.
1523 1523
1524 1524 This summary identifies the repository state using one or two parent
1525 1525 hash identifiers, followed by a "+" if there are uncommitted changes
1526 1526 in the working directory, a list of tags for this revision and a branch
1527 1527 name for non-default branches.
1528 1528 """
1529 1529
1530 1530 if not repo and not source:
1531 1531 raise util.Abort(_("There is no Mercurial repository here "
1532 1532 "(.hg not found)"))
1533 1533
1534 1534 hexfunc = ui.debugflag and hex or short
1535 1535 default = not (num or id or branch or tags)
1536 1536 output = []
1537 1537
1538 1538 revs = []
1539 1539 if source:
1540 1540 source, revs, checkout = hg.parseurl(ui.expandpath(source), [])
1541 1541 repo = hg.repository(ui, source)
1542 1542
1543 1543 if not repo.local():
1544 1544 if not rev and revs:
1545 1545 rev = revs[0]
1546 1546 if not rev:
1547 1547 rev = "tip"
1548 1548 if num or branch or tags:
1549 1549 raise util.Abort(
1550 1550 "can't query remote revision number, branch, or tags")
1551 1551 output = [hexfunc(repo.lookup(rev))]
1552 1552 elif not rev:
1553 1553 ctx = repo[None]
1554 1554 parents = ctx.parents()
1555 1555 changed = False
1556 1556 if default or id or num:
1557 1557 changed = ctx.files() + ctx.deleted()
1558 1558 if default or id:
1559 1559 output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]),
1560 1560 (changed) and "+" or "")]
1561 1561 if num:
1562 1562 output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]),
1563 1563 (changed) and "+" or ""))
1564 1564 else:
1565 1565 ctx = repo[rev]
1566 1566 if default or id:
1567 1567 output = [hexfunc(ctx.node())]
1568 1568 if num:
1569 1569 output.append(str(ctx.rev()))
1570 1570
1571 1571 if repo.local() and default and not ui.quiet:
1572 1572 b = util.tolocal(ctx.branch())
1573 1573 if b != 'default':
1574 1574 output.append("(%s)" % b)
1575 1575
1576 1576 # multiple tags for a single parent separated by '/'
1577 1577 t = "/".join(ctx.tags())
1578 1578 if t:
1579 1579 output.append(t)
1580 1580
1581 1581 if branch:
1582 1582 output.append(util.tolocal(ctx.branch()))
1583 1583
1584 1584 if tags:
1585 1585 output.extend(ctx.tags())
1586 1586
1587 1587 ui.write("%s\n" % ' '.join(output))
1588 1588
1589 1589 def import_(ui, repo, patch1, *patches, **opts):
1590 1590 """import an ordered set of patches
1591 1591
1592 1592 Import a list of patches and commit them individually.
1593 1593
1594 1594 If there are outstanding changes in the working directory, import
1595 1595 will abort unless given the -f flag.
1596 1596
1597 1597 You can import a patch straight from a mail message. Even patches
1598 1598 as attachments work (body part must be type text/plain or
1599 1599 text/x-patch to be used). From and Subject headers of email
1600 1600 message are used as default committer and commit message. All
1601 1601 text/plain body parts before first diff are added to commit
1602 1602 message.
1603 1603
1604 1604 If the imported patch was generated by hg export, user and description
1605 1605 from patch override values from message headers and body. Values
1606 1606 given on command line with -m and -u override these.
1607 1607
1608 1608 If --exact is specified, import will set the working directory
1609 1609 to the parent of each patch before applying it, and will abort
1610 1610 if the resulting changeset has a different ID than the one
1611 1611 recorded in the patch. This may happen due to character set
1612 1612 problems or other deficiencies in the text patch format.
1613 1613
1614 1614 With --similarity, hg will attempt to discover renames and copies
1615 1615 in the patch in the same way as 'addremove'.
1616 1616
1617 1617 To read a patch from standard input, use patch name "-".
1618 1618 See 'hg help dates' for a list of formats valid for -d/--date.
1619 1619 """
1620 1620 patches = (patch1,) + patches
1621 1621
1622 1622 date = opts.get('date')
1623 1623 if date:
1624 1624 opts['date'] = util.parsedate(date)
1625 1625
1626 1626 try:
1627 1627 sim = float(opts.get('similarity') or 0)
1628 1628 except ValueError:
1629 1629 raise util.Abort(_('similarity must be a number'))
1630 1630 if sim < 0 or sim > 100:
1631 1631 raise util.Abort(_('similarity must be between 0 and 100'))
1632 1632
1633 1633 if opts.get('exact') or not opts.get('force'):
1634 1634 cmdutil.bail_if_changed(repo)
1635 1635
1636 1636 d = opts["base"]
1637 1637 strip = opts["strip"]
1638 1638 wlock = lock = None
1639 1639 try:
1640 1640 wlock = repo.wlock()
1641 1641 lock = repo.lock()
1642 1642 for p in patches:
1643 1643 pf = os.path.join(d, p)
1644 1644
1645 1645 if pf == '-':
1646 1646 ui.status(_("applying patch from stdin\n"))
1647 1647 pf = sys.stdin
1648 1648 else:
1649 1649 ui.status(_("applying %s\n") % p)
1650 1650 pf = url.open(ui, pf)
1651 1651 data = patch.extract(ui, pf)
1652 1652 tmpname, message, user, date, branch, nodeid, p1, p2 = data
1653 1653
1654 1654 if tmpname is None:
1655 1655 raise util.Abort(_('no diffs found'))
1656 1656
1657 1657 try:
1658 1658 cmdline_message = cmdutil.logmessage(opts)
1659 1659 if cmdline_message:
1660 1660 # pickup the cmdline msg
1661 1661 message = cmdline_message
1662 1662 elif message:
1663 1663 # pickup the patch msg
1664 1664 message = message.strip()
1665 1665 else:
1666 1666 # launch the editor
1667 1667 message = None
1668 1668 ui.debug(_('message:\n%s\n') % message)
1669 1669
1670 1670 wp = repo.parents()
1671 1671 if opts.get('exact'):
1672 1672 if not nodeid or not p1:
1673 1673 raise util.Abort(_('not a mercurial patch'))
1674 1674 p1 = repo.lookup(p1)
1675 1675 p2 = repo.lookup(p2 or hex(nullid))
1676 1676
1677 1677 if p1 != wp[0].node():
1678 1678 hg.clean(repo, p1)
1679 1679 repo.dirstate.setparents(p1, p2)
1680 1680 elif p2:
1681 1681 try:
1682 1682 p1 = repo.lookup(p1)
1683 1683 p2 = repo.lookup(p2)
1684 1684 if p1 == wp[0].node():
1685 1685 repo.dirstate.setparents(p1, p2)
1686 1686 except error.RepoError:
1687 1687 pass
1688 1688 if opts.get('exact') or opts.get('import_branch'):
1689 1689 repo.dirstate.setbranch(branch or 'default')
1690 1690
1691 1691 files = {}
1692 1692 try:
1693 fuzz = patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
1694 files=files)
1693 patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
1694 files=files)
1695 1695 finally:
1696 1696 files = patch.updatedir(ui, repo, files, similarity=sim/100.)
1697 1697 if not opts.get('no_commit'):
1698 1698 n = repo.commit(files, message, opts.get('user') or user,
1699 1699 opts.get('date') or date)
1700 1700 if opts.get('exact'):
1701 1701 if hex(n) != nodeid:
1702 1702 repo.rollback()
1703 1703 raise util.Abort(_('patch is damaged'
1704 1704 ' or loses information'))
1705 1705 # Force a dirstate write so that the next transaction
1706 1706 # backups an up-do-date file.
1707 1707 repo.dirstate.write()
1708 1708 finally:
1709 1709 os.unlink(tmpname)
1710 1710 finally:
1711 1711 del lock, wlock
1712 1712
1713 1713 def incoming(ui, repo, source="default", **opts):
1714 1714 """show new changesets found in source
1715 1715
1716 1716 Show new changesets found in the specified path/URL or the default
1717 1717 pull location. These are the changesets that would be pulled if a pull
1718 1718 was requested.
1719 1719
1720 1720 For remote repository, using --bundle avoids downloading the changesets
1721 1721 twice if the incoming is followed by a pull.
1722 1722
1723 1723 See pull for valid source format details.
1724 1724 """
1725 1725 limit = cmdutil.loglimit(opts)
1726 1726 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
1727 1727 cmdutil.setremoteconfig(ui, opts)
1728 1728
1729 1729 other = hg.repository(ui, source)
1730 1730 ui.status(_('comparing with %s\n') % url.hidepassword(source))
1731 1731 if revs:
1732 1732 revs = [other.lookup(rev) for rev in revs]
1733 1733 common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
1734 1734 force=opts["force"])
1735 1735 if not incoming:
1736 1736 try:
1737 1737 os.unlink(opts["bundle"])
1738 1738 except:
1739 1739 pass
1740 1740 ui.status(_("no changes found\n"))
1741 1741 return 1
1742 1742
1743 1743 cleanup = None
1744 1744 try:
1745 1745 fname = opts["bundle"]
1746 1746 if fname or not other.local():
1747 1747 # create a bundle (uncompressed if other repo is not local)
1748 1748
1749 1749 if revs is None and other.capable('changegroupsubset'):
1750 1750 revs = rheads
1751 1751
1752 1752 if revs is None:
1753 1753 cg = other.changegroup(incoming, "incoming")
1754 1754 else:
1755 1755 cg = other.changegroupsubset(incoming, revs, 'incoming')
1756 1756 bundletype = other.local() and "HG10BZ" or "HG10UN"
1757 1757 fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
1758 1758 # keep written bundle?
1759 1759 if opts["bundle"]:
1760 1760 cleanup = None
1761 1761 if not other.local():
1762 1762 # use the created uncompressed bundlerepo
1763 1763 other = bundlerepo.bundlerepository(ui, repo.root, fname)
1764 1764
1765 1765 o = other.changelog.nodesbetween(incoming, revs)[0]
1766 1766 if opts.get('newest_first'):
1767 1767 o.reverse()
1768 1768 displayer = cmdutil.show_changeset(ui, other, opts)
1769 1769 count = 0
1770 1770 for n in o:
1771 1771 if count >= limit:
1772 1772 break
1773 1773 parents = [p for p in other.changelog.parents(n) if p != nullid]
1774 1774 if opts.get('no_merges') and len(parents) == 2:
1775 1775 continue
1776 1776 count += 1
1777 1777 displayer.show(other[n])
1778 1778 finally:
1779 1779 if hasattr(other, 'close'):
1780 1780 other.close()
1781 1781 if cleanup:
1782 1782 os.unlink(cleanup)
1783 1783
1784 1784 def init(ui, dest=".", **opts):
1785 1785 """create a new repository in the given directory
1786 1786
1787 1787 Initialize a new repository in the given directory. If the given
1788 1788 directory does not exist, it is created.
1789 1789
1790 1790 If no directory is given, the current directory is used.
1791 1791
1792 1792 It is possible to specify an ssh:// URL as the destination.
1793 1793 See 'hg help urls' for more information.
1794 1794 """
1795 1795 cmdutil.setremoteconfig(ui, opts)
1796 1796 hg.repository(ui, dest, create=1)
1797 1797
1798 1798 def locate(ui, repo, *pats, **opts):
1799 1799 """locate files matching specific patterns
1800 1800
1801 1801 Print all files under Mercurial control whose names match the
1802 1802 given patterns.
1803 1803
1804 1804 This command searches the entire repository by default. To search
1805 1805 just the current directory and its subdirectories, use
1806 1806 "--include .".
1807 1807
1808 1808 If no patterns are given to match, this command prints all file
1809 1809 names.
1810 1810
1811 1811 If you want to feed the output of this command into the "xargs"
1812 1812 command, use the "-0" option to both this command and "xargs".
1813 1813 This will avoid the problem of "xargs" treating single filenames
1814 1814 that contain white space as multiple filenames.
1815 1815 """
1816 1816 end = opts.get('print0') and '\0' or '\n'
1817 1817 rev = opts.get('rev') or None
1818 1818
1819 1819 ret = 1
1820 1820 m = cmdutil.match(repo, pats, opts, default='relglob')
1821 1821 m.bad = lambda x,y: False
1822 1822 for abs in repo[rev].walk(m):
1823 1823 if not rev and abs not in repo.dirstate:
1824 1824 continue
1825 1825 if opts.get('fullpath'):
1826 1826 ui.write(repo.wjoin(abs), end)
1827 1827 else:
1828 1828 ui.write(((pats and m.rel(abs)) or abs), end)
1829 1829 ret = 0
1830 1830
1831 1831 return ret
1832 1832
1833 1833 def log(ui, repo, *pats, **opts):
1834 1834 """show revision history of entire repository or files
1835 1835
1836 1836 Print the revision history of the specified files or the entire
1837 1837 project.
1838 1838
1839 1839 File history is shown without following rename or copy history of
1840 1840 files. Use -f/--follow with a file name to follow history across
1841 1841 renames and copies. --follow without a file name will only show
1842 1842 ancestors or descendants of the starting revision. --follow-first
1843 1843 only follows the first parent of merge revisions.
1844 1844
1845 1845 If no revision range is specified, the default is tip:0 unless
1846 1846 --follow is set, in which case the working directory parent is
1847 1847 used as the starting revision.
1848 1848
1849 1849 See 'hg help dates' for a list of formats valid for -d/--date.
1850 1850
1851 1851 By default this command outputs: changeset id and hash, tags,
1852 1852 non-trivial parents, user, date and time, and a summary for each
1853 1853 commit. When the -v/--verbose switch is used, the list of changed
1854 1854 files and full commit message is shown.
1855 1855
1856 1856 NOTE: log -p may generate unexpected diff output for merge
1857 1857 changesets, as it will only compare the merge changeset against
1858 1858 its first parent. Also, the files: list will only reflect files
1859 1859 that are different from BOTH parents.
1860 1860
1861 1861 """
1862 1862
1863 1863 get = util.cachefunc(lambda r: repo[r].changeset())
1864 1864 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1865 1865
1866 1866 limit = cmdutil.loglimit(opts)
1867 1867 count = 0
1868 1868
1869 1869 if opts.get('copies') and opts.get('rev'):
1870 1870 endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1
1871 1871 else:
1872 1872 endrev = len(repo)
1873 1873 rcache = {}
1874 1874 ncache = {}
1875 1875 def getrenamed(fn, rev):
1876 1876 '''looks up all renames for a file (up to endrev) the first
1877 1877 time the file is given. It indexes on the changerev and only
1878 1878 parses the manifest if linkrev != changerev.
1879 1879 Returns rename info for fn at changerev rev.'''
1880 1880 if fn not in rcache:
1881 1881 rcache[fn] = {}
1882 1882 ncache[fn] = {}
1883 1883 fl = repo.file(fn)
1884 1884 for i in fl:
1885 1885 node = fl.node(i)
1886 1886 lr = fl.linkrev(i)
1887 1887 renamed = fl.renamed(node)
1888 1888 rcache[fn][lr] = renamed
1889 1889 if renamed:
1890 1890 ncache[fn][node] = renamed
1891 1891 if lr >= endrev:
1892 1892 break
1893 1893 if rev in rcache[fn]:
1894 1894 return rcache[fn][rev]
1895 1895
1896 1896 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1897 1897 # filectx logic.
1898 1898
1899 1899 try:
1900 1900 return repo[rev][fn].renamed()
1901 1901 except error.LookupError:
1902 1902 pass
1903 1903 return None
1904 1904
1905 1905 df = False
1906 1906 if opts["date"]:
1907 1907 df = util.matchdate(opts["date"])
1908 1908
1909 1909 only_branches = opts.get('only_branch')
1910 1910
1911 1911 displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn)
1912 1912 for st, rev, fns in changeiter:
1913 1913 if st == 'add':
1914 1914 parents = [p for p in repo.changelog.parentrevs(rev)
1915 1915 if p != nullrev]
1916 1916 if opts.get('no_merges') and len(parents) == 2:
1917 1917 continue
1918 1918 if opts.get('only_merges') and len(parents) != 2:
1919 1919 continue
1920 1920
1921 1921 if only_branches:
1922 1922 revbranch = get(rev)[5]['branch']
1923 1923 if revbranch not in only_branches:
1924 1924 continue
1925 1925
1926 1926 if df:
1927 1927 changes = get(rev)
1928 1928 if not df(changes[2][0]):
1929 1929 continue
1930 1930
1931 1931 if opts.get('keyword'):
1932 1932 changes = get(rev)
1933 1933 miss = 0
1934 1934 for k in [kw.lower() for kw in opts['keyword']]:
1935 1935 if not (k in changes[1].lower() or
1936 1936 k in changes[4].lower() or
1937 1937 k in " ".join(changes[3]).lower()):
1938 1938 miss = 1
1939 1939 break
1940 1940 if miss:
1941 1941 continue
1942 1942
1943 1943 if opts['user']:
1944 1944 changes = get(rev)
1945 1945 miss = 0
1946 1946 for k in opts['user']:
1947 1947 if k != changes[1]:
1948 1948 miss = 1
1949 1949 break
1950 1950 if miss:
1951 1951 continue
1952 1952
1953 1953 copies = []
1954 1954 if opts.get('copies') and rev:
1955 1955 for fn in get(rev)[3]:
1956 1956 rename = getrenamed(fn, rev)
1957 1957 if rename:
1958 1958 copies.append((fn, rename[0]))
1959 1959 displayer.show(context.changectx(repo, rev), copies=copies)
1960 1960 elif st == 'iter':
1961 1961 if count == limit: break
1962 1962 if displayer.flush(rev):
1963 1963 count += 1
1964 1964
1965 1965 def manifest(ui, repo, node=None, rev=None):
1966 1966 """output the current or given revision of the project manifest
1967 1967
1968 1968 Print a list of version controlled files for the given revision.
1969 1969 If no revision is given, the parent of the working directory is used,
1970 1970 or tip if no revision is checked out.
1971 1971
1972 1972 The manifest is the list of files being version controlled. If no revision
1973 1973 is given then the first parent of the working directory is used.
1974 1974
1975 1975 With -v flag, print file permissions, symlink and executable bits. With
1976 1976 --debug flag, print file revision hashes.
1977 1977 """
1978 1978
1979 1979 if rev and node:
1980 1980 raise util.Abort(_("please specify just one revision"))
1981 1981
1982 1982 if not node:
1983 1983 node = rev
1984 1984
1985 1985 decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
1986 1986 ctx = repo[node]
1987 1987 for f in ctx:
1988 1988 if ui.debugflag:
1989 1989 ui.write("%40s " % hex(ctx.manifest()[f]))
1990 1990 if ui.verbose:
1991 1991 ui.write(decor[ctx.flags(f)])
1992 1992 ui.write("%s\n" % f)
1993 1993
1994 1994 def merge(ui, repo, node=None, force=None, rev=None):
1995 1995 """merge working directory with another revision
1996 1996
1997 1997 Merge the contents of the current working directory and the
1998 1998 requested revision. Files that changed between either parent are
1999 1999 marked as changed for the next commit and a commit must be
2000 2000 performed before any further updates are allowed.
2001 2001
2002 2002 If no revision is specified, the working directory's parent is a
2003 2003 head revision, and the current branch contains exactly one other head,
2004 2004 the other head is merged with by default. Otherwise, an explicit
2005 2005 revision to merge with must be provided.
2006 2006 """
2007 2007
2008 2008 if rev and node:
2009 2009 raise util.Abort(_("please specify just one revision"))
2010 2010 if not node:
2011 2011 node = rev
2012 2012
2013 2013 if not node:
2014 2014 branch = repo.changectx(None).branch()
2015 2015 bheads = repo.branchheads(branch)
2016 2016 if len(bheads) > 2:
2017 2017 raise util.Abort(_("branch '%s' has %d heads - "
2018 2018 "please merge with an explicit rev") %
2019 2019 (branch, len(bheads)))
2020 2020
2021 2021 parent = repo.dirstate.parents()[0]
2022 2022 if len(bheads) == 1:
2023 2023 if len(repo.heads()) > 1:
2024 2024 raise util.Abort(_("branch '%s' has one head - "
2025 2025 "please merge with an explicit rev") %
2026 2026 branch)
2027 2027 msg = _('there is nothing to merge')
2028 2028 if parent != repo.lookup(repo[None].branch()):
2029 2029 msg = _('%s - use "hg update" instead') % msg
2030 2030 raise util.Abort(msg)
2031 2031
2032 2032 if parent not in bheads:
2033 2033 raise util.Abort(_('working dir not at a head rev - '
2034 2034 'use "hg update" or merge with an explicit rev'))
2035 2035 node = parent == bheads[0] and bheads[-1] or bheads[0]
2036 2036 return hg.merge(repo, node, force=force)
2037 2037
2038 2038 def outgoing(ui, repo, dest=None, **opts):
2039 2039 """show changesets not found in destination
2040 2040
2041 2041 Show changesets not found in the specified destination repository or
2042 2042 the default push location. These are the changesets that would be pushed
2043 2043 if a push was requested.
2044 2044
2045 2045 See pull for valid destination format details.
2046 2046 """
2047 2047 limit = cmdutil.loglimit(opts)
2048 2048 dest, revs, checkout = hg.parseurl(
2049 2049 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2050 2050 cmdutil.setremoteconfig(ui, opts)
2051 2051 if revs:
2052 2052 revs = [repo.lookup(rev) for rev in revs]
2053 2053
2054 2054 other = hg.repository(ui, dest)
2055 2055 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
2056 2056 o = repo.findoutgoing(other, force=opts.get('force'))
2057 2057 if not o:
2058 2058 ui.status(_("no changes found\n"))
2059 2059 return 1
2060 2060 o = repo.changelog.nodesbetween(o, revs)[0]
2061 2061 if opts.get('newest_first'):
2062 2062 o.reverse()
2063 2063 displayer = cmdutil.show_changeset(ui, repo, opts)
2064 2064 count = 0
2065 2065 for n in o:
2066 2066 if count >= limit:
2067 2067 break
2068 2068 parents = [p for p in repo.changelog.parents(n) if p != nullid]
2069 2069 if opts.get('no_merges') and len(parents) == 2:
2070 2070 continue
2071 2071 count += 1
2072 2072 displayer.show(repo[n])
2073 2073
2074 2074 def parents(ui, repo, file_=None, **opts):
2075 2075 """show the parents of the working dir or revision
2076 2076
2077 2077 Print the working directory's parent revisions. If a
2078 2078 revision is given via --rev, the parent of that revision
2079 2079 will be printed. If a file argument is given, revision in
2080 2080 which the file was last changed (before the working directory
2081 2081 revision or the argument to --rev if given) is printed.
2082 2082 """
2083 2083 rev = opts.get('rev')
2084 2084 if rev:
2085 2085 ctx = repo[rev]
2086 2086 else:
2087 2087 ctx = repo[None]
2088 2088
2089 2089 if file_:
2090 2090 m = cmdutil.match(repo, (file_,), opts)
2091 2091 if m.anypats() or len(m.files()) != 1:
2092 2092 raise util.Abort(_('can only specify an explicit file name'))
2093 2093 file_ = m.files()[0]
2094 2094 filenodes = []
2095 2095 for cp in ctx.parents():
2096 2096 if not cp:
2097 2097 continue
2098 2098 try:
2099 2099 filenodes.append(cp.filenode(file_))
2100 2100 except error.LookupError:
2101 2101 pass
2102 2102 if not filenodes:
2103 2103 raise util.Abort(_("'%s' not found in manifest!") % file_)
2104 2104 fl = repo.file(file_)
2105 2105 p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
2106 2106 else:
2107 2107 p = [cp.node() for cp in ctx.parents()]
2108 2108
2109 2109 displayer = cmdutil.show_changeset(ui, repo, opts)
2110 2110 for n in p:
2111 2111 if n != nullid:
2112 2112 displayer.show(repo[n])
2113 2113
2114 2114 def paths(ui, repo, search=None):
2115 2115 """show aliases for remote repositories
2116 2116
2117 2117 Show definition of symbolic path name NAME. If no name is given, show
2118 2118 definition of available names.
2119 2119
2120 2120 Path names are defined in the [paths] section of /etc/mercurial/hgrc
2121 2121 and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
2122 2122
2123 2123 See 'hg help urls' for more information.
2124 2124 """
2125 2125 if search:
2126 2126 for name, path in ui.configitems("paths"):
2127 2127 if name == search:
2128 2128 ui.write("%s\n" % url.hidepassword(path))
2129 2129 return
2130 2130 ui.warn(_("not found!\n"))
2131 2131 return 1
2132 2132 else:
2133 2133 for name, path in ui.configitems("paths"):
2134 2134 ui.write("%s = %s\n" % (name, url.hidepassword(path)))
2135 2135
2136 2136 def postincoming(ui, repo, modheads, optupdate, checkout):
2137 2137 if modheads == 0:
2138 2138 return
2139 2139 if optupdate:
2140 2140 if (modheads <= 1 or len(repo.branchheads()) == 1) or checkout:
2141 2141 return hg.update(repo, checkout)
2142 2142 else:
2143 2143 ui.status(_("not updating, since new heads added\n"))
2144 2144 if modheads > 1:
2145 2145 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
2146 2146 else:
2147 2147 ui.status(_("(run 'hg update' to get a working copy)\n"))
2148 2148
2149 2149 def pull(ui, repo, source="default", **opts):
2150 2150 """pull changes from the specified source
2151 2151
2152 2152 Pull changes from a remote repository to a local one.
2153 2153
2154 2154 This finds all changes from the repository at the specified path
2155 2155 or URL and adds them to the local repository. By default, this
2156 2156 does not update the copy of the project in the working directory.
2157 2157
2158 2158 If SOURCE is omitted, the 'default' path will be used.
2159 2159 See 'hg help urls' for more information.
2160 2160 """
2161 2161 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
2162 2162 cmdutil.setremoteconfig(ui, opts)
2163 2163
2164 2164 other = hg.repository(ui, source)
2165 2165 ui.status(_('pulling from %s\n') % url.hidepassword(source))
2166 2166 if revs:
2167 2167 try:
2168 2168 revs = [other.lookup(rev) for rev in revs]
2169 2169 except error.CapabilityError:
2170 2170 err = _("Other repository doesn't support revision lookup, "
2171 2171 "so a rev cannot be specified.")
2172 2172 raise util.Abort(err)
2173 2173
2174 2174 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
2175 2175 return postincoming(ui, repo, modheads, opts.get('update'), checkout)
2176 2176
2177 2177 def push(ui, repo, dest=None, **opts):
2178 2178 """push changes to the specified destination
2179 2179
2180 2180 Push changes from the local repository to the given destination.
2181 2181
2182 2182 This is the symmetrical operation for pull. It helps to move
2183 2183 changes from the current repository to a different one. If the
2184 2184 destination is local this is identical to a pull in that directory
2185 2185 from the current one.
2186 2186
2187 2187 By default, push will refuse to run if it detects the result would
2188 2188 increase the number of remote heads. This generally indicates the
2189 2189 the client has forgotten to pull and merge before pushing.
2190 2190
2191 2191 If -r is used, the named changeset and all its ancestors will be pushed
2192 2192 to the remote repository.
2193 2193
2194 2194 Look at the help text for urls for important details about ssh:// URLs.
2195 2195 If DESTINATION is omitted, a default path will be used.
2196 2196 See 'hg help urls' for more information.
2197 2197 """
2198 2198 dest, revs, checkout = hg.parseurl(
2199 2199 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2200 2200 cmdutil.setremoteconfig(ui, opts)
2201 2201
2202 2202 other = hg.repository(ui, dest)
2203 2203 ui.status(_('pushing to %s\n') % url.hidepassword(dest))
2204 2204 if revs:
2205 2205 revs = [repo.lookup(rev) for rev in revs]
2206 2206 r = repo.push(other, opts.get('force'), revs=revs)
2207 2207 return r == 0
2208 2208
2209 2209 def rawcommit(ui, repo, *pats, **opts):
2210 2210 """raw commit interface (DEPRECATED)
2211 2211
2212 2212 (DEPRECATED)
2213 2213 Lowlevel commit, for use in helper scripts.
2214 2214
2215 2215 This command is not intended to be used by normal users, as it is
2216 2216 primarily useful for importing from other SCMs.
2217 2217
2218 2218 This command is now deprecated and will be removed in a future
2219 2219 release, please use debugsetparents and commit instead.
2220 2220 """
2221 2221
2222 2222 ui.warn(_("(the rawcommit command is deprecated)\n"))
2223 2223
2224 2224 message = cmdutil.logmessage(opts)
2225 2225
2226 2226 files = cmdutil.match(repo, pats, opts).files()
2227 2227 if opts.get('files'):
2228 2228 files += open(opts['files']).read().splitlines()
2229 2229
2230 2230 parents = [repo.lookup(p) for p in opts['parent']]
2231 2231
2232 2232 try:
2233 2233 repo.rawcommit(files, message, opts['user'], opts['date'], *parents)
2234 2234 except ValueError, inst:
2235 2235 raise util.Abort(str(inst))
2236 2236
2237 2237 def recover(ui, repo):
2238 2238 """roll back an interrupted transaction
2239 2239
2240 2240 Recover from an interrupted commit or pull.
2241 2241
2242 2242 This command tries to fix the repository status after an interrupted
2243 2243 operation. It should only be necessary when Mercurial suggests it.
2244 2244 """
2245 2245 if repo.recover():
2246 2246 return hg.verify(repo)
2247 2247 return 1
2248 2248
2249 2249 def remove(ui, repo, *pats, **opts):
2250 2250 """remove the specified files on the next commit
2251 2251
2252 2252 Schedule the indicated files for removal from the repository.
2253 2253
2254 2254 This only removes files from the current branch, not from the entire
2255 2255 project history. -A can be used to remove only files that have already
2256 2256 been deleted, -f can be used to force deletion, and -Af can be used
2257 2257 to remove files from the next revision without deleting them.
2258 2258
2259 2259 The following table details the behavior of remove for different file
2260 2260 states (columns) and option combinations (rows). The file states are
2261 2261 Added, Clean, Modified and Missing (as reported by hg status). The
2262 2262 actions are Warn, Remove (from branch) and Delete (from disk).
2263 2263
2264 2264 A C M !
2265 2265 none W RD W R
2266 2266 -f R RD RD R
2267 2267 -A W W W R
2268 2268 -Af R R R R
2269 2269
2270 2270 This command schedules the files to be removed at the next commit.
2271 2271 To undo a remove before that, see hg revert.
2272 2272 """
2273 2273
2274 2274 after, force = opts.get('after'), opts.get('force')
2275 2275 if not pats and not after:
2276 2276 raise util.Abort(_('no files specified'))
2277 2277
2278 2278 m = cmdutil.match(repo, pats, opts)
2279 2279 s = repo.status(match=m, clean=True)
2280 2280 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2281 2281
2282 2282 def warn(files, reason):
2283 2283 for f in files:
2284 2284 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
2285 2285 % (m.rel(f), reason))
2286 2286
2287 2287 if force:
2288 2288 remove, forget = modified + deleted + clean, added
2289 2289 elif after:
2290 2290 remove, forget = deleted, []
2291 2291 warn(modified + added + clean, _('still exists'))
2292 2292 else:
2293 2293 remove, forget = deleted + clean, []
2294 2294 warn(modified, _('is modified'))
2295 2295 warn(added, _('has been marked for add'))
2296 2296
2297 2297 for f in util.sort(remove + forget):
2298 2298 if ui.verbose or not m.exact(f):
2299 2299 ui.status(_('removing %s\n') % m.rel(f))
2300 2300
2301 2301 repo.forget(forget)
2302 2302 repo.remove(remove, unlink=not after)
2303 2303
2304 2304 def rename(ui, repo, *pats, **opts):
2305 2305 """rename files; equivalent of copy + remove
2306 2306
2307 2307 Mark dest as copies of sources; mark sources for deletion. If
2308 2308 dest is a directory, copies are put in that directory. If dest is
2309 2309 a file, there can only be one source.
2310 2310
2311 2311 By default, this command copies the contents of files as they
2312 2312 exist in the working directory. If invoked with --after, the
2313 2313 operation is recorded, but no copying is performed.
2314 2314
2315 2315 This command takes effect at the next commit. To undo a rename
2316 2316 before that, see hg revert.
2317 2317 """
2318 2318 wlock = repo.wlock(False)
2319 2319 try:
2320 2320 return cmdutil.copy(ui, repo, pats, opts, rename=True)
2321 2321 finally:
2322 2322 del wlock
2323 2323
2324 2324 def resolve(ui, repo, *pats, **opts):
2325 2325 """retry file merges from a merge or update
2326 2326
2327 2327 This command will cleanly retry unresolved file merges using file
2328 2328 revisions preserved from the last update or merge. To attempt to
2329 2329 resolve all unresolved files, use the -a switch.
2330 2330
2331 2331 This command will also allow listing resolved files and manually
2332 2332 marking and unmarking files as resolved.
2333 2333
2334 2334 The codes used to show the status of files are:
2335 2335 U = unresolved
2336 2336 R = resolved
2337 2337 """
2338 2338
2339 2339 all, mark, unmark, show = [opts.get(o) for o in 'all mark unmark list'.split()]
2340 2340
2341 2341 if (show and (mark or unmark)) or (mark and unmark):
2342 2342 raise util.Abort(_("too many options specified"))
2343 2343 if pats and all:
2344 2344 raise util.Abort(_("can't specify --all and patterns"))
2345 2345 if not (all or pats or show or mark or unmark):
2346 2346 raise util.Abort(_('no files or directories specified; '
2347 2347 'use --all to remerge all files'))
2348 2348
2349 2349 ms = merge_.mergestate(repo)
2350 2350 m = cmdutil.match(repo, pats, opts)
2351 2351
2352 2352 for f in ms:
2353 2353 if m(f):
2354 2354 if show:
2355 2355 ui.write("%s %s\n" % (ms[f].upper(), f))
2356 2356 elif mark:
2357 2357 ms.mark(f, "r")
2358 2358 elif unmark:
2359 2359 ms.mark(f, "u")
2360 2360 else:
2361 2361 wctx = repo[None]
2362 2362 mctx = wctx.parents()[-1]
2363 2363
2364 2364 # backup pre-resolve (merge uses .orig for its own purposes)
2365 2365 a = repo.wjoin(f)
2366 2366 util.copyfile(a, a + ".resolve")
2367 2367
2368 2368 # resolve file
2369 2369 ms.resolve(f, wctx, mctx)
2370 2370
2371 2371 # replace filemerge's .orig file with our resolve file
2372 2372 util.rename(a + ".resolve", a + ".orig")
2373 2373
2374 2374 def revert(ui, repo, *pats, **opts):
2375 2375 """restore individual files or dirs to an earlier state
2376 2376
2377 2377 (use update -r to check out earlier revisions, revert does not
2378 2378 change the working dir parents)
2379 2379
2380 2380 With no revision specified, revert the named files or directories
2381 2381 to the contents they had in the parent of the working directory.
2382 2382 This restores the contents of the affected files to an unmodified
2383 2383 state and unschedules adds, removes, copies, and renames. If the
2384 2384 working directory has two parents, you must explicitly specify the
2385 2385 revision to revert to.
2386 2386
2387 2387 Using the -r option, revert the given files or directories to their
2388 2388 contents as of a specific revision. This can be helpful to "roll
2389 2389 back" some or all of an earlier change.
2390 2390 See 'hg help dates' for a list of formats valid for -d/--date.
2391 2391
2392 2392 Revert modifies the working directory. It does not commit any
2393 2393 changes, or change the parent of the working directory. If you
2394 2394 revert to a revision other than the parent of the working
2395 2395 directory, the reverted files will thus appear modified
2396 2396 afterwards.
2397 2397
2398 2398 If a file has been deleted, it is restored. If the executable
2399 2399 mode of a file was changed, it is reset.
2400 2400
2401 2401 If names are given, all files matching the names are reverted.
2402 2402 If no arguments are given, no files are reverted.
2403 2403
2404 2404 Modified files are saved with a .orig suffix before reverting.
2405 2405 To disable these backups, use --no-backup.
2406 2406 """
2407 2407
2408 2408 if opts["date"]:
2409 2409 if opts["rev"]:
2410 2410 raise util.Abort(_("you can't specify a revision and a date"))
2411 2411 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
2412 2412
2413 2413 if not pats and not opts.get('all'):
2414 2414 raise util.Abort(_('no files or directories specified; '
2415 2415 'use --all to revert the whole repo'))
2416 2416
2417 2417 parent, p2 = repo.dirstate.parents()
2418 2418 if not opts.get('rev') and p2 != nullid:
2419 2419 raise util.Abort(_('uncommitted merge - please provide a '
2420 2420 'specific revision'))
2421 2421 ctx = repo[opts.get('rev')]
2422 2422 node = ctx.node()
2423 2423 mf = ctx.manifest()
2424 2424 if node == parent:
2425 2425 pmf = mf
2426 2426 else:
2427 2427 pmf = None
2428 2428
2429 2429 # need all matching names in dirstate and manifest of target rev,
2430 2430 # so have to walk both. do not print errors if files exist in one
2431 2431 # but not other.
2432 2432
2433 2433 names = {}
2434 2434
2435 2435 wlock = repo.wlock()
2436 2436 try:
2437 2437 # walk dirstate.
2438 2438 files = []
2439 2439
2440 2440 m = cmdutil.match(repo, pats, opts)
2441 2441 m.bad = lambda x,y: False
2442 2442 for abs in repo.walk(m):
2443 2443 names[abs] = m.rel(abs), m.exact(abs)
2444 2444
2445 2445 # walk target manifest.
2446 2446
2447 2447 def badfn(path, msg):
2448 2448 if path in names:
2449 2449 return False
2450 2450 path_ = path + '/'
2451 2451 for f in names:
2452 2452 if f.startswith(path_):
2453 2453 return False
2454 2454 repo.ui.warn("%s: %s\n" % (m.rel(path), msg))
2455 2455 return False
2456 2456
2457 2457 m = cmdutil.match(repo, pats, opts)
2458 2458 m.bad = badfn
2459 2459 for abs in repo[node].walk(m):
2460 2460 if abs not in names:
2461 2461 names[abs] = m.rel(abs), m.exact(abs)
2462 2462
2463 2463 m = cmdutil.matchfiles(repo, names)
2464 2464 changes = repo.status(match=m)[:4]
2465 2465 modified, added, removed, deleted = map(dict.fromkeys, changes)
2466 2466
2467 2467 # if f is a rename, also revert the source
2468 2468 cwd = repo.getcwd()
2469 2469 for f in added:
2470 2470 src = repo.dirstate.copied(f)
2471 2471 if src and src not in names and repo.dirstate[src] == 'r':
2472 2472 removed[src] = None
2473 2473 names[src] = (repo.pathto(src, cwd), True)
2474 2474
2475 2475 def removeforget(abs):
2476 2476 if repo.dirstate[abs] == 'a':
2477 2477 return _('forgetting %s\n')
2478 2478 return _('removing %s\n')
2479 2479
2480 2480 revert = ([], _('reverting %s\n'))
2481 2481 add = ([], _('adding %s\n'))
2482 2482 remove = ([], removeforget)
2483 2483 undelete = ([], _('undeleting %s\n'))
2484 2484
2485 2485 disptable = (
2486 2486 # dispatch table:
2487 2487 # file state
2488 2488 # action if in target manifest
2489 2489 # action if not in target manifest
2490 2490 # make backup if in target manifest
2491 2491 # make backup if not in target manifest
2492 2492 (modified, revert, remove, True, True),
2493 2493 (added, revert, remove, True, False),
2494 2494 (removed, undelete, None, False, False),
2495 2495 (deleted, revert, remove, False, False),
2496 2496 )
2497 2497
2498 2498 for abs, (rel, exact) in util.sort(names.items()):
2499 2499 mfentry = mf.get(abs)
2500 2500 target = repo.wjoin(abs)
2501 2501 def handle(xlist, dobackup):
2502 2502 xlist[0].append(abs)
2503 2503 if dobackup and not opts.get('no_backup') and util.lexists(target):
2504 2504 bakname = "%s.orig" % rel
2505 2505 ui.note(_('saving current version of %s as %s\n') %
2506 2506 (rel, bakname))
2507 2507 if not opts.get('dry_run'):
2508 2508 util.copyfile(target, bakname)
2509 2509 if ui.verbose or not exact:
2510 2510 msg = xlist[1]
2511 2511 if not isinstance(msg, basestring):
2512 2512 msg = msg(abs)
2513 2513 ui.status(msg % rel)
2514 2514 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2515 2515 if abs not in table: continue
2516 2516 # file has changed in dirstate
2517 2517 if mfentry:
2518 2518 handle(hitlist, backuphit)
2519 2519 elif misslist is not None:
2520 2520 handle(misslist, backupmiss)
2521 2521 break
2522 2522 else:
2523 2523 if abs not in repo.dirstate:
2524 2524 if mfentry:
2525 2525 handle(add, True)
2526 2526 elif exact:
2527 2527 ui.warn(_('file not managed: %s\n') % rel)
2528 2528 continue
2529 2529 # file has not changed in dirstate
2530 2530 if node == parent:
2531 2531 if exact: ui.warn(_('no changes needed to %s\n') % rel)
2532 2532 continue
2533 2533 if pmf is None:
2534 2534 # only need parent manifest in this unlikely case,
2535 2535 # so do not read by default
2536 2536 pmf = repo[parent].manifest()
2537 2537 if abs in pmf:
2538 2538 if mfentry:
2539 2539 # if version of file is same in parent and target
2540 2540 # manifests, do nothing
2541 2541 if (pmf[abs] != mfentry or
2542 2542 pmf.flags(abs) != mf.flags(abs)):
2543 2543 handle(revert, False)
2544 2544 else:
2545 2545 handle(remove, False)
2546 2546
2547 2547 if not opts.get('dry_run'):
2548 2548 def checkout(f):
2549 2549 fc = ctx[f]
2550 2550 repo.wwrite(f, fc.data(), fc.flags())
2551 2551
2552 2552 audit_path = util.path_auditor(repo.root)
2553 2553 for f in remove[0]:
2554 2554 if repo.dirstate[f] == 'a':
2555 2555 repo.dirstate.forget(f)
2556 2556 continue
2557 2557 audit_path(f)
2558 2558 try:
2559 2559 util.unlink(repo.wjoin(f))
2560 2560 except OSError:
2561 2561 pass
2562 2562 repo.dirstate.remove(f)
2563 2563
2564 2564 normal = None
2565 2565 if node == parent:
2566 2566 # We're reverting to our parent. If possible, we'd like status
2567 2567 # to report the file as clean. We have to use normallookup for
2568 2568 # merges to avoid losing information about merged/dirty files.
2569 2569 if p2 != nullid:
2570 2570 normal = repo.dirstate.normallookup
2571 2571 else:
2572 2572 normal = repo.dirstate.normal
2573 2573 for f in revert[0]:
2574 2574 checkout(f)
2575 2575 if normal:
2576 2576 normal(f)
2577 2577
2578 2578 for f in add[0]:
2579 2579 checkout(f)
2580 2580 repo.dirstate.add(f)
2581 2581
2582 2582 normal = repo.dirstate.normallookup
2583 2583 if node == parent and p2 == nullid:
2584 2584 normal = repo.dirstate.normal
2585 2585 for f in undelete[0]:
2586 2586 checkout(f)
2587 2587 normal(f)
2588 2588
2589 2589 finally:
2590 2590 del wlock
2591 2591
2592 2592 def rollback(ui, repo):
2593 2593 """roll back the last transaction
2594 2594
2595 2595 This command should be used with care. There is only one level of
2596 2596 rollback, and there is no way to undo a rollback. It will also
2597 2597 restore the dirstate at the time of the last transaction, losing
2598 2598 any dirstate changes since that time.
2599 2599
2600 2600 Transactions are used to encapsulate the effects of all commands
2601 2601 that create new changesets or propagate existing changesets into a
2602 2602 repository. For example, the following commands are transactional,
2603 2603 and their effects can be rolled back:
2604 2604
2605 2605 commit
2606 2606 import
2607 2607 pull
2608 2608 push (with this repository as destination)
2609 2609 unbundle
2610 2610
2611 2611 This command is not intended for use on public repositories. Once
2612 2612 changes are visible for pull by other users, rolling a transaction
2613 2613 back locally is ineffective (someone else may already have pulled
2614 2614 the changes). Furthermore, a race is possible with readers of the
2615 2615 repository; for example an in-progress pull from the repository
2616 2616 may fail if a rollback is performed.
2617 2617 """
2618 2618 repo.rollback()
2619 2619
2620 2620 def root(ui, repo):
2621 2621 """print the root (top) of the current working dir
2622 2622
2623 2623 Print the root directory of the current repository.
2624 2624 """
2625 2625 ui.write(repo.root + "\n")
2626 2626
2627 2627 def serve(ui, repo, **opts):
2628 2628 """export the repository via HTTP
2629 2629
2630 2630 Start a local HTTP repository browser and pull server.
2631 2631
2632 2632 By default, the server logs accesses to stdout and errors to
2633 2633 stderr. Use the "-A" and "-E" options to log to files.
2634 2634 """
2635 2635
2636 2636 if opts["stdio"]:
2637 2637 if repo is None:
2638 2638 raise error.RepoError(_("There is no Mercurial repository here"
2639 2639 " (.hg not found)"))
2640 2640 s = sshserver.sshserver(ui, repo)
2641 2641 s.serve_forever()
2642 2642
2643 2643 parentui = ui.parentui or ui
2644 2644 optlist = ("name templates style address port prefix ipv6"
2645 2645 " accesslog errorlog webdir_conf certificate")
2646 2646 for o in optlist.split():
2647 2647 if opts[o]:
2648 2648 parentui.setconfig("web", o, str(opts[o]))
2649 2649 if (repo is not None) and (repo.ui != parentui):
2650 2650 repo.ui.setconfig("web", o, str(opts[o]))
2651 2651
2652 2652 if repo is None and not ui.config("web", "webdir_conf"):
2653 2653 raise error.RepoError(_("There is no Mercurial repository here"
2654 2654 " (.hg not found)"))
2655 2655
2656 2656 class service:
2657 2657 def init(self):
2658 2658 util.set_signal_handler()
2659 2659 self.httpd = hgweb.server.create_server(parentui, repo)
2660 2660
2661 2661 if not ui.verbose: return
2662 2662
2663 2663 if self.httpd.prefix:
2664 2664 prefix = self.httpd.prefix.strip('/') + '/'
2665 2665 else:
2666 2666 prefix = ''
2667 2667
2668 2668 port = ':%d' % self.httpd.port
2669 2669 if port == ':80':
2670 2670 port = ''
2671 2671
2672 2672 bindaddr = self.httpd.addr
2673 2673 if bindaddr == '0.0.0.0':
2674 2674 bindaddr = '*'
2675 2675 elif ':' in bindaddr: # IPv6
2676 2676 bindaddr = '[%s]' % bindaddr
2677 2677
2678 2678 fqaddr = self.httpd.fqaddr
2679 2679 if ':' in fqaddr:
2680 2680 fqaddr = '[%s]' % fqaddr
2681 2681 ui.status(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
2682 2682 (fqaddr, port, prefix, bindaddr, self.httpd.port))
2683 2683
2684 2684 def run(self):
2685 2685 self.httpd.serve_forever()
2686 2686
2687 2687 service = service()
2688 2688
2689 2689 cmdutil.service(opts, initfn=service.init, runfn=service.run)
2690 2690
2691 2691 def status(ui, repo, *pats, **opts):
2692 2692 """show changed files in the working directory
2693 2693
2694 2694 Show status of files in the repository. If names are given, only
2695 2695 files that match are shown. Files that are clean or ignored or
2696 2696 source of a copy/move operation, are not listed unless -c (clean),
2697 2697 -i (ignored), -C (copies) or -A is given. Unless options described
2698 2698 with "show only ..." are given, the options -mardu are used.
2699 2699
2700 2700 Option -q/--quiet hides untracked (unknown and ignored) files
2701 2701 unless explicitly requested with -u/--unknown or -i/-ignored.
2702 2702
2703 2703 NOTE: status may appear to disagree with diff if permissions have
2704 2704 changed or a merge has occurred. The standard diff format does not
2705 2705 report permission changes and diff only reports changes relative
2706 2706 to one merge parent.
2707 2707
2708 2708 If one revision is given, it is used as the base revision.
2709 2709 If two revisions are given, the difference between them is shown.
2710 2710
2711 2711 The codes used to show the status of files are:
2712 2712 M = modified
2713 2713 A = added
2714 2714 R = removed
2715 2715 C = clean
2716 2716 ! = deleted, but still tracked
2717 2717 ? = not tracked
2718 2718 I = ignored
2719 2719 = the previous added file was copied from here
2720 2720 """
2721 2721
2722 2722 node1, node2 = cmdutil.revpair(repo, opts.get('rev'))
2723 2723 cwd = (pats and repo.getcwd()) or ''
2724 2724 end = opts.get('print0') and '\0' or '\n'
2725 2725 copy = {}
2726 2726 states = 'modified added removed deleted unknown ignored clean'.split()
2727 2727 show = [k for k in states if opts.get(k)]
2728 2728 if opts.get('all'):
2729 2729 show += ui.quiet and (states[:4] + ['clean']) or states
2730 2730 if not show:
2731 2731 show = ui.quiet and states[:4] or states[:5]
2732 2732
2733 2733 stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts),
2734 2734 'ignored' in show, 'clean' in show, 'unknown' in show)
2735 2735 changestates = zip(states, 'MAR!?IC', stat)
2736 2736
2737 2737 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
2738 2738 ctxn = repo[nullid]
2739 2739 ctx1 = repo[node1]
2740 2740 ctx2 = repo[node2]
2741 2741 added = stat[1]
2742 2742 if node2 is None:
2743 2743 added = stat[0] + stat[1] # merged?
2744 2744
2745 2745 for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
2746 2746 if k in added:
2747 2747 copy[k] = v
2748 2748 elif v in added:
2749 2749 copy[v] = k
2750 2750
2751 2751 for state, char, files in changestates:
2752 2752 if state in show:
2753 2753 format = "%s %%s%s" % (char, end)
2754 2754 if opts.get('no_status'):
2755 2755 format = "%%s%s" % end
2756 2756
2757 2757 for f in files:
2758 2758 ui.write(format % repo.pathto(f, cwd))
2759 2759 if f in copy:
2760 2760 ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end))
2761 2761
2762 2762 def tag(ui, repo, name1, *names, **opts):
2763 2763 """add one or more tags for the current or given revision
2764 2764
2765 2765 Name a particular revision using <name>.
2766 2766
2767 2767 Tags are used to name particular revisions of the repository and are
2768 2768 very useful to compare different revisions, to go back to significant
2769 2769 earlier versions or to mark branch points as releases, etc.
2770 2770
2771 2771 If no revision is given, the parent of the working directory is used,
2772 2772 or tip if no revision is checked out.
2773 2773
2774 2774 To facilitate version control, distribution, and merging of tags,
2775 2775 they are stored as a file named ".hgtags" which is managed
2776 2776 similarly to other project files and can be hand-edited if
2777 2777 necessary. The file '.hg/localtags' is used for local tags (not
2778 2778 shared among repositories).
2779 2779
2780 2780 See 'hg help dates' for a list of formats valid for -d/--date.
2781 2781 """
2782 2782
2783 2783 rev_ = "."
2784 2784 names = (name1,) + names
2785 2785 if len(names) != len(dict.fromkeys(names)):
2786 2786 raise util.Abort(_('tag names must be unique'))
2787 2787 for n in names:
2788 2788 if n in ['tip', '.', 'null']:
2789 2789 raise util.Abort(_('the name \'%s\' is reserved') % n)
2790 2790 if opts.get('rev') and opts.get('remove'):
2791 2791 raise util.Abort(_("--rev and --remove are incompatible"))
2792 2792 if opts.get('rev'):
2793 2793 rev_ = opts['rev']
2794 2794 message = opts.get('message')
2795 2795 if opts.get('remove'):
2796 2796 expectedtype = opts.get('local') and 'local' or 'global'
2797 2797 for n in names:
2798 2798 if not repo.tagtype(n):
2799 2799 raise util.Abort(_('tag \'%s\' does not exist') % n)
2800 2800 if repo.tagtype(n) != expectedtype:
2801 2801 raise util.Abort(_('tag \'%s\' is not a %s tag') %
2802 2802 (n, expectedtype))
2803 2803 rev_ = nullid
2804 2804 if not message:
2805 2805 message = _('Removed tag %s') % ', '.join(names)
2806 2806 elif not opts.get('force'):
2807 2807 for n in names:
2808 2808 if n in repo.tags():
2809 2809 raise util.Abort(_('tag \'%s\' already exists '
2810 2810 '(use -f to force)') % n)
2811 2811 if not rev_ and repo.dirstate.parents()[1] != nullid:
2812 2812 raise util.Abort(_('uncommitted merge - please provide a '
2813 2813 'specific revision'))
2814 2814 r = repo[rev_].node()
2815 2815
2816 2816 if not message:
2817 2817 message = (_('Added tag %s for changeset %s') %
2818 2818 (', '.join(names), short(r)))
2819 2819
2820 2820 date = opts.get('date')
2821 2821 if date:
2822 2822 date = util.parsedate(date)
2823 2823
2824 2824 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
2825 2825
2826 2826 def tags(ui, repo):
2827 2827 """list repository tags
2828 2828
2829 2829 This lists both regular and local tags. When the -v/--verbose switch
2830 2830 is used, a third column "local" is printed for local tags.
2831 2831 """
2832 2832
2833 2833 l = repo.tagslist()
2834 2834 l.reverse()
2835 2835 hexfunc = ui.debugflag and hex or short
2836 2836 tagtype = ""
2837 2837
2838 2838 for t, n in l:
2839 2839 if ui.quiet:
2840 2840 ui.write("%s\n" % t)
2841 2841 continue
2842 2842
2843 2843 try:
2844 2844 hn = hexfunc(n)
2845 2845 r = "%5d:%s" % (repo.changelog.rev(n), hn)
2846 2846 except error.LookupError:
2847 2847 r = " ?:%s" % hn
2848 2848 else:
2849 2849 spaces = " " * (30 - util.colwidth(t))
2850 2850 if ui.verbose:
2851 2851 if repo.tagtype(t) == 'local':
2852 2852 tagtype = " local"
2853 2853 else:
2854 2854 tagtype = ""
2855 2855 ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
2856 2856
2857 2857 def tip(ui, repo, **opts):
2858 2858 """show the tip revision
2859 2859
2860 2860 The tip revision (usually just called the tip) is the most
2861 2861 recently added changeset in the repository, the most recently
2862 2862 changed head.
2863 2863
2864 2864 If you have just made a commit, that commit will be the tip. If
2865 2865 you have just pulled changes from another repository, the tip of
2866 2866 that repository becomes the current tip. The "tip" tag is special
2867 2867 and cannot be renamed or assigned to a different changeset.
2868 2868 """
2869 2869 cmdutil.show_changeset(ui, repo, opts).show(repo[len(repo) - 1])
2870 2870
2871 2871 def unbundle(ui, repo, fname1, *fnames, **opts):
2872 2872 """apply one or more changegroup files
2873 2873
2874 2874 Apply one or more compressed changegroup files generated by the
2875 2875 bundle command.
2876 2876 """
2877 2877 fnames = (fname1,) + fnames
2878 2878
2879 2879 lock = None
2880 2880 try:
2881 2881 lock = repo.lock()
2882 2882 for fname in fnames:
2883 2883 f = url.open(ui, fname)
2884 2884 gen = changegroup.readbundle(f, fname)
2885 2885 modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
2886 2886 finally:
2887 2887 del lock
2888 2888
2889 2889 return postincoming(ui, repo, modheads, opts.get('update'), None)
2890 2890
2891 2891 def update(ui, repo, node=None, rev=None, clean=False, date=None):
2892 2892 """update working directory
2893 2893
2894 2894 Update the repository's working directory to the specified revision,
2895 2895 or the tip of the current branch if none is specified. Use null as
2896 2896 the revision to remove the working copy (like 'hg clone -U').
2897 2897
2898 2898 When the working dir contains no uncommitted changes, it will be
2899 2899 replaced by the state of the requested revision from the repo. When
2900 2900 the requested revision is on a different branch, the working dir
2901 2901 will additionally be switched to that branch.
2902 2902
2903 2903 When there are uncommitted changes, use option -C to discard them,
2904 2904 forcibly replacing the state of the working dir with the requested
2905 2905 revision.
2906 2906
2907 2907 When there are uncommitted changes and option -C is not used, and
2908 2908 the parent revision and requested revision are on the same branch,
2909 2909 and one of them is an ancestor of the other, then the new working
2910 2910 directory will contain the requested revision merged with the
2911 2911 uncommitted changes. Otherwise, the update will fail with a
2912 2912 suggestion to use 'merge' or 'update -C' instead.
2913 2913
2914 2914 If you want to update just one file to an older revision, use revert.
2915 2915
2916 2916 See 'hg help dates' for a list of formats valid for --date.
2917 2917 """
2918 2918 if rev and node:
2919 2919 raise util.Abort(_("please specify just one revision"))
2920 2920
2921 2921 if not rev:
2922 2922 rev = node
2923 2923
2924 2924 if date:
2925 2925 if rev:
2926 2926 raise util.Abort(_("you can't specify a revision and a date"))
2927 2927 rev = cmdutil.finddate(ui, repo, date)
2928 2928
2929 2929 if clean:
2930 2930 return hg.clean(repo, rev)
2931 2931 else:
2932 2932 return hg.update(repo, rev)
2933 2933
2934 2934 def verify(ui, repo):
2935 2935 """verify the integrity of the repository
2936 2936
2937 2937 Verify the integrity of the current repository.
2938 2938
2939 2939 This will perform an extensive check of the repository's
2940 2940 integrity, validating the hashes and checksums of each entry in
2941 2941 the changelog, manifest, and tracked files, as well as the
2942 2942 integrity of their crosslinks and indices.
2943 2943 """
2944 2944 return hg.verify(repo)
2945 2945
2946 2946 def version_(ui):
2947 2947 """output version and copyright information"""
2948 2948 ui.write(_("Mercurial Distributed SCM (version %s)\n")
2949 2949 % util.version())
2950 2950 ui.status(_(
2951 2951 "\nCopyright (C) 2005-2009 Matt Mackall <mpm@selenic.com> and others\n"
2952 2952 "This is free software; see the source for copying conditions. "
2953 2953 "There is NO\nwarranty; "
2954 2954 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
2955 2955 ))
2956 2956
2957 2957 # Command options and aliases are listed here, alphabetically
2958 2958
2959 2959 globalopts = [
2960 2960 ('R', 'repository', '',
2961 2961 _('repository root directory or symbolic path name')),
2962 2962 ('', 'cwd', '', _('change working directory')),
2963 2963 ('y', 'noninteractive', None,
2964 2964 _('do not prompt, assume \'yes\' for any required answers')),
2965 2965 ('q', 'quiet', None, _('suppress output')),
2966 2966 ('v', 'verbose', None, _('enable additional output')),
2967 2967 ('', 'config', [], _('set/override config option')),
2968 2968 ('', 'debug', None, _('enable debugging output')),
2969 2969 ('', 'debugger', None, _('start debugger')),
2970 2970 ('', 'encoding', util._encoding, _('set the charset encoding')),
2971 2971 ('', 'encodingmode', util._encodingmode, _('set the charset encoding mode')),
2972 2972 ('', 'lsprof', None, _('print improved command execution profile')),
2973 2973 ('', 'traceback', None, _('print traceback on exception')),
2974 2974 ('', 'time', None, _('time how long the command takes')),
2975 2975 ('', 'profile', None, _('print command execution profile')),
2976 2976 ('', 'version', None, _('output version information and exit')),
2977 2977 ('h', 'help', None, _('display help and exit')),
2978 2978 ]
2979 2979
2980 2980 dryrunopts = [('n', 'dry-run', None,
2981 2981 _('do not perform actions, just print output'))]
2982 2982
2983 2983 remoteopts = [
2984 2984 ('e', 'ssh', '', _('specify ssh command to use')),
2985 2985 ('', 'remotecmd', '', _('specify hg command to run on the remote side')),
2986 2986 ]
2987 2987
2988 2988 walkopts = [
2989 2989 ('I', 'include', [], _('include names matching the given patterns')),
2990 2990 ('X', 'exclude', [], _('exclude names matching the given patterns')),
2991 2991 ]
2992 2992
2993 2993 commitopts = [
2994 2994 ('m', 'message', '', _('use <text> as commit message')),
2995 2995 ('l', 'logfile', '', _('read commit message from <file>')),
2996 2996 ]
2997 2997
2998 2998 commitopts2 = [
2999 2999 ('d', 'date', '', _('record datecode as commit date')),
3000 3000 ('u', 'user', '', _('record user as committer')),
3001 3001 ]
3002 3002
3003 3003 templateopts = [
3004 3004 ('', 'style', '', _('display using template map file')),
3005 3005 ('', 'template', '', _('display with template')),
3006 3006 ]
3007 3007
3008 3008 logopts = [
3009 3009 ('p', 'patch', None, _('show patch')),
3010 3010 ('g', 'git', None, _('use git extended diff format')),
3011 3011 ('l', 'limit', '', _('limit number of changes displayed')),
3012 3012 ('M', 'no-merges', None, _('do not show merges')),
3013 3013 ] + templateopts
3014 3014
3015 3015 diffopts = [
3016 3016 ('a', 'text', None, _('treat all files as text')),
3017 3017 ('g', 'git', None, _('use git extended diff format')),
3018 3018 ('', 'nodates', None, _("don't include dates in diff headers"))
3019 3019 ]
3020 3020
3021 3021 diffopts2 = [
3022 3022 ('p', 'show-function', None, _('show which function each change is in')),
3023 3023 ('w', 'ignore-all-space', None,
3024 3024 _('ignore white space when comparing lines')),
3025 3025 ('b', 'ignore-space-change', None,
3026 3026 _('ignore changes in the amount of white space')),
3027 3027 ('B', 'ignore-blank-lines', None,
3028 3028 _('ignore changes whose lines are all blank')),
3029 3029 ('U', 'unified', '', _('number of lines of context to show'))
3030 3030 ]
3031 3031
3032 3032 similarityopts = [
3033 3033 ('s', 'similarity', '',
3034 3034 _('guess renamed files by similarity (0<=s<=100)'))
3035 3035 ]
3036 3036
3037 3037 table = {
3038 3038 "^add": (add, walkopts + dryrunopts, _('[OPTION]... [FILE]...')),
3039 3039 "addremove":
3040 3040 (addremove, similarityopts + walkopts + dryrunopts,
3041 3041 _('[OPTION]... [FILE]...')),
3042 3042 "^annotate|blame":
3043 3043 (annotate,
3044 3044 [('r', 'rev', '', _('annotate the specified revision')),
3045 3045 ('f', 'follow', None, _('follow file copies and renames')),
3046 3046 ('a', 'text', None, _('treat all files as text')),
3047 3047 ('u', 'user', None, _('list the author (long with -v)')),
3048 3048 ('d', 'date', None, _('list the date (short with -q)')),
3049 3049 ('n', 'number', None, _('list the revision number (default)')),
3050 3050 ('c', 'changeset', None, _('list the changeset')),
3051 3051 ('l', 'line-number', None,
3052 3052 _('show line number at the first appearance'))
3053 3053 ] + walkopts,
3054 3054 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')),
3055 3055 "archive":
3056 3056 (archive,
3057 3057 [('', 'no-decode', None, _('do not pass files through decoders')),
3058 3058 ('p', 'prefix', '', _('directory prefix for files in archive')),
3059 3059 ('r', 'rev', '', _('revision to distribute')),
3060 3060 ('t', 'type', '', _('type of distribution to create')),
3061 3061 ] + walkopts,
3062 3062 _('[OPTION]... DEST')),
3063 3063 "backout":
3064 3064 (backout,
3065 3065 [('', 'merge', None,
3066 3066 _('merge with old dirstate parent after backout')),
3067 3067 ('', 'parent', '', _('parent to choose when backing out merge')),
3068 3068 ('r', 'rev', '', _('revision to backout')),
3069 3069 ] + walkopts + commitopts + commitopts2,
3070 3070 _('[OPTION]... [-r] REV')),
3071 3071 "bisect":
3072 3072 (bisect,
3073 3073 [('r', 'reset', False, _('reset bisect state')),
3074 3074 ('g', 'good', False, _('mark changeset good')),
3075 3075 ('b', 'bad', False, _('mark changeset bad')),
3076 3076 ('s', 'skip', False, _('skip testing changeset')),
3077 3077 ('c', 'command', '', _('use command to check changeset state')),
3078 3078 ('U', 'noupdate', False, _('do not update to target'))],
3079 3079 _("[-gbsr] [-c CMD] [REV]")),
3080 3080 "branch":
3081 3081 (branch,
3082 3082 [('f', 'force', None,
3083 3083 _('set branch name even if it shadows an existing branch')),
3084 3084 ('C', 'clean', None, _('reset branch name to parent branch name'))],
3085 3085 _('[-fC] [NAME]')),
3086 3086 "branches":
3087 3087 (branches,
3088 3088 [('a', 'active', False,
3089 3089 _('show only branches that have unmerged heads'))],
3090 3090 _('[-a]')),
3091 3091 "bundle":
3092 3092 (bundle,
3093 3093 [('f', 'force', None,
3094 3094 _('run even when remote repository is unrelated')),
3095 3095 ('r', 'rev', [],
3096 3096 _('a changeset up to which you would like to bundle')),
3097 3097 ('', 'base', [],
3098 3098 _('a base changeset to specify instead of a destination')),
3099 3099 ('a', 'all', None, _('bundle all changesets in the repository')),
3100 3100 ('t', 'type', 'bzip2', _('bundle compression type to use')),
3101 3101 ] + remoteopts,
3102 3102 _('[-f] [-a] [-r REV]... [--base REV]... FILE [DEST]')),
3103 3103 "cat":
3104 3104 (cat,
3105 3105 [('o', 'output', '', _('print output to file with formatted name')),
3106 3106 ('r', 'rev', '', _('print the given revision')),
3107 3107 ('', 'decode', None, _('apply any matching decode filter')),
3108 3108 ] + walkopts,
3109 3109 _('[OPTION]... FILE...')),
3110 3110 "^clone":
3111 3111 (clone,
3112 3112 [('U', 'noupdate', None,
3113 3113 _('the clone will only contain a repository (no working copy)')),
3114 3114 ('r', 'rev', [],
3115 3115 _('a changeset you would like to have after cloning')),
3116 3116 ('', 'pull', None, _('use pull protocol to copy metadata')),
3117 3117 ('', 'uncompressed', None,
3118 3118 _('use uncompressed transfer (fast over LAN)')),
3119 3119 ] + remoteopts,
3120 3120 _('[OPTION]... SOURCE [DEST]')),
3121 3121 "^commit|ci":
3122 3122 (commit,
3123 3123 [('A', 'addremove', None,
3124 3124 _('mark new/missing files as added/removed before committing')),
3125 3125 ('', 'close-branch', None,
3126 3126 _('mark a branch as closed, hiding it from the branch list')),
3127 3127 ] + walkopts + commitopts + commitopts2,
3128 3128 _('[OPTION]... [FILE]...')),
3129 3129 "copy|cp":
3130 3130 (copy,
3131 3131 [('A', 'after', None, _('record a copy that has already occurred')),
3132 3132 ('f', 'force', None,
3133 3133 _('forcibly copy over an existing managed file')),
3134 3134 ] + walkopts + dryrunopts,
3135 3135 _('[OPTION]... [SOURCE]... DEST')),
3136 3136 "debugancestor": (debugancestor, [], _('[INDEX] REV1 REV2')),
3137 3137 "debugcheckstate": (debugcheckstate, []),
3138 3138 "debugcomplete":
3139 3139 (debugcomplete,
3140 3140 [('o', 'options', None, _('show the command options'))],
3141 3141 _('[-o] CMD')),
3142 3142 "debugdate":
3143 3143 (debugdate,
3144 3144 [('e', 'extended', None, _('try extended date formats'))],
3145 3145 _('[-e] DATE [RANGE]')),
3146 3146 "debugdata": (debugdata, [], _('FILE REV')),
3147 3147 "debugfsinfo": (debugfsinfo, [], _('[PATH]')),
3148 3148 "debugindex": (debugindex, [], _('FILE')),
3149 3149 "debugindexdot": (debugindexdot, [], _('FILE')),
3150 3150 "debuginstall": (debuginstall, []),
3151 3151 "debugrawcommit|rawcommit":
3152 3152 (rawcommit,
3153 3153 [('p', 'parent', [], _('parent')),
3154 3154 ('F', 'files', '', _('file list'))
3155 3155 ] + commitopts + commitopts2,
3156 3156 _('[OPTION]... [FILE]...')),
3157 3157 "debugrebuildstate":
3158 3158 (debugrebuildstate,
3159 3159 [('r', 'rev', '', _('revision to rebuild to'))],
3160 3160 _('[-r REV] [REV]')),
3161 3161 "debugrename":
3162 3162 (debugrename,
3163 3163 [('r', 'rev', '', _('revision to debug'))],
3164 3164 _('[-r REV] FILE')),
3165 3165 "debugsetparents":
3166 3166 (debugsetparents, [], _('REV1 [REV2]')),
3167 3167 "debugstate":
3168 3168 (debugstate,
3169 3169 [('', 'nodates', None, _('do not display the saved mtime'))],
3170 3170 _('[OPTION]...')),
3171 3171 "debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')),
3172 3172 "^diff":
3173 3173 (diff,
3174 3174 [('r', 'rev', [], _('revision')),
3175 3175 ('c', 'change', '', _('change made by revision'))
3176 3176 ] + diffopts + diffopts2 + walkopts,
3177 3177 _('[OPTION]... [-r REV1 [-r REV2]] [FILE]...')),
3178 3178 "^export":
3179 3179 (export,
3180 3180 [('o', 'output', '', _('print output to file with formatted name')),
3181 3181 ('', 'switch-parent', None, _('diff against the second parent'))
3182 3182 ] + diffopts,
3183 3183 _('[OPTION]... [-o OUTFILESPEC] REV...')),
3184 3184 "grep":
3185 3185 (grep,
3186 3186 [('0', 'print0', None, _('end fields with NUL')),
3187 3187 ('', 'all', None, _('print all revisions that match')),
3188 3188 ('f', 'follow', None,
3189 3189 _('follow changeset history, or file history across copies and renames')),
3190 3190 ('i', 'ignore-case', None, _('ignore case when matching')),
3191 3191 ('l', 'files-with-matches', None,
3192 3192 _('print only filenames and revs that match')),
3193 3193 ('n', 'line-number', None, _('print matching line numbers')),
3194 3194 ('r', 'rev', [], _('search in given revision range')),
3195 3195 ('u', 'user', None, _('list the author (long with -v)')),
3196 3196 ('d', 'date', None, _('list the date (short with -q)')),
3197 3197 ] + walkopts,
3198 3198 _('[OPTION]... PATTERN [FILE]...')),
3199 3199 "heads":
3200 3200 (heads,
3201 3201 [('r', 'rev', '', _('show only heads which are descendants of rev')),
3202 3202 ('a', 'active', False,
3203 3203 _('show only the active heads from open branches')),
3204 3204 ] + templateopts,
3205 3205 _('[-r REV] [REV]...')),
3206 3206 "help": (help_, [], _('[TOPIC]')),
3207 3207 "identify|id":
3208 3208 (identify,
3209 3209 [('r', 'rev', '', _('identify the specified rev')),
3210 3210 ('n', 'num', None, _('show local revision number')),
3211 3211 ('i', 'id', None, _('show global revision id')),
3212 3212 ('b', 'branch', None, _('show branch')),
3213 3213 ('t', 'tags', None, _('show tags'))],
3214 3214 _('[-nibt] [-r REV] [SOURCE]')),
3215 3215 "import|patch":
3216 3216 (import_,
3217 3217 [('p', 'strip', 1,
3218 3218 _('directory strip option for patch. This has the same\n'
3219 3219 'meaning as the corresponding patch option')),
3220 3220 ('b', 'base', '', _('base path')),
3221 3221 ('f', 'force', None,
3222 3222 _('skip check for outstanding uncommitted changes')),
3223 3223 ('', 'no-commit', None, _("don't commit, just update the working directory")),
3224 3224 ('', 'exact', None,
3225 3225 _('apply patch to the nodes from which it was generated')),
3226 3226 ('', 'import-branch', None,
3227 3227 _('Use any branch information in patch (implied by --exact)'))] +
3228 3228 commitopts + commitopts2 + similarityopts,
3229 3229 _('[OPTION]... PATCH...')),
3230 3230 "incoming|in":
3231 3231 (incoming,
3232 3232 [('f', 'force', None,
3233 3233 _('run even when remote repository is unrelated')),
3234 3234 ('n', 'newest-first', None, _('show newest record first')),
3235 3235 ('', 'bundle', '', _('file to store the bundles into')),
3236 3236 ('r', 'rev', [],
3237 3237 _('a specific revision up to which you would like to pull')),
3238 3238 ] + logopts + remoteopts,
3239 3239 _('[-p] [-n] [-M] [-f] [-r REV]...'
3240 3240 ' [--bundle FILENAME] [SOURCE]')),
3241 3241 "^init":
3242 3242 (init,
3243 3243 remoteopts,
3244 3244 _('[-e CMD] [--remotecmd CMD] [DEST]')),
3245 3245 "locate":
3246 3246 (locate,
3247 3247 [('r', 'rev', '', _('search the repository as it stood at rev')),
3248 3248 ('0', 'print0', None,
3249 3249 _('end filenames with NUL, for use with xargs')),
3250 3250 ('f', 'fullpath', None,
3251 3251 _('print complete paths from the filesystem root')),
3252 3252 ] + walkopts,
3253 3253 _('[OPTION]... [PATTERN]...')),
3254 3254 "^log|history":
3255 3255 (log,
3256 3256 [('f', 'follow', None,
3257 3257 _('follow changeset history, or file history across copies and renames')),
3258 3258 ('', 'follow-first', None,
3259 3259 _('only follow the first parent of merge changesets')),
3260 3260 ('d', 'date', '', _('show revs matching date spec')),
3261 3261 ('C', 'copies', None, _('show copied files')),
3262 3262 ('k', 'keyword', [], _('do case-insensitive search for a keyword')),
3263 3263 ('r', 'rev', [], _('show the specified revision or range')),
3264 3264 ('', 'removed', None, _('include revs where files were removed')),
3265 3265 ('m', 'only-merges', None, _('show only merges')),
3266 3266 ('u', 'user', [], _('revs committed by user')),
3267 3267 ('b', 'only-branch', [],
3268 3268 _('show only changesets within the given named branch')),
3269 3269 ('P', 'prune', [], _('do not display revision or any of its ancestors')),
3270 3270 ] + logopts + walkopts,
3271 3271 _('[OPTION]... [FILE]')),
3272 3272 "manifest":
3273 3273 (manifest,
3274 3274 [('r', 'rev', '', _('revision to display'))],
3275 3275 _('[-r REV]')),
3276 3276 "^merge":
3277 3277 (merge,
3278 3278 [('f', 'force', None, _('force a merge with outstanding changes')),
3279 3279 ('r', 'rev', '', _('revision to merge')),
3280 3280 ],
3281 3281 _('[-f] [[-r] REV]')),
3282 3282 "outgoing|out":
3283 3283 (outgoing,
3284 3284 [('f', 'force', None,
3285 3285 _('run even when remote repository is unrelated')),
3286 3286 ('r', 'rev', [],
3287 3287 _('a specific revision up to which you would like to push')),
3288 3288 ('n', 'newest-first', None, _('show newest record first')),
3289 3289 ] + logopts + remoteopts,
3290 3290 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')),
3291 3291 "^parents":
3292 3292 (parents,
3293 3293 [('r', 'rev', '', _('show parents from the specified rev')),
3294 3294 ] + templateopts,
3295 3295 _('hg parents [-r REV] [FILE]')),
3296 3296 "paths": (paths, [], _('[NAME]')),
3297 3297 "^pull":
3298 3298 (pull,
3299 3299 [('u', 'update', None,
3300 3300 _('update to new tip if changesets were pulled')),
3301 3301 ('f', 'force', None,
3302 3302 _('run even when remote repository is unrelated')),
3303 3303 ('r', 'rev', [],
3304 3304 _('a specific revision up to which you would like to pull')),
3305 3305 ] + remoteopts,
3306 3306 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')),
3307 3307 "^push":
3308 3308 (push,
3309 3309 [('f', 'force', None, _('force push')),
3310 3310 ('r', 'rev', [],
3311 3311 _('a specific revision up to which you would like to push')),
3312 3312 ] + remoteopts,
3313 3313 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')),
3314 3314 "recover": (recover, []),
3315 3315 "^remove|rm":
3316 3316 (remove,
3317 3317 [('A', 'after', None, _('record delete for missing files')),
3318 3318 ('f', 'force', None,
3319 3319 _('remove (and delete) file even if added or modified')),
3320 3320 ] + walkopts,
3321 3321 _('[OPTION]... FILE...')),
3322 3322 "rename|mv":
3323 3323 (rename,
3324 3324 [('A', 'after', None, _('record a rename that has already occurred')),
3325 3325 ('f', 'force', None,
3326 3326 _('forcibly copy over an existing managed file')),
3327 3327 ] + walkopts + dryrunopts,
3328 3328 _('[OPTION]... SOURCE... DEST')),
3329 3329 "resolve":
3330 3330 (resolve,
3331 3331 [('a', 'all', None, _('remerge all unresolved files')),
3332 3332 ('l', 'list', None, _('list state of files needing merge')),
3333 3333 ('m', 'mark', None, _('mark files as resolved')),
3334 3334 ('u', 'unmark', None, _('unmark files as resolved'))]
3335 3335 + walkopts,
3336 3336 _('[OPTION]... [FILE]...')),
3337 3337 "revert":
3338 3338 (revert,
3339 3339 [('a', 'all', None, _('revert all changes when no arguments given')),
3340 3340 ('d', 'date', '', _('tipmost revision matching date')),
3341 3341 ('r', 'rev', '', _('revision to revert to')),
3342 3342 ('', 'no-backup', None, _('do not save backup copies of files')),
3343 3343 ] + walkopts + dryrunopts,
3344 3344 _('[OPTION]... [-r REV] [NAME]...')),
3345 3345 "rollback": (rollback, []),
3346 3346 "root": (root, []),
3347 3347 "^serve":
3348 3348 (serve,
3349 3349 [('A', 'accesslog', '', _('name of access log file to write to')),
3350 3350 ('d', 'daemon', None, _('run server in background')),
3351 3351 ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
3352 3352 ('E', 'errorlog', '', _('name of error log file to write to')),
3353 3353 ('p', 'port', 0, _('port to listen on (default: 8000)')),
3354 3354 ('a', 'address', '', _('address to listen on (default: all interfaces)')),
3355 3355 ('', 'prefix', '', _('prefix path to serve from (default: server root)')),
3356 3356 ('n', 'name', '',
3357 3357 _('name to show in web pages (default: working dir)')),
3358 3358 ('', 'webdir-conf', '', _('name of the webdir config file'
3359 3359 ' (serve more than one repo)')),
3360 3360 ('', 'pid-file', '', _('name of file to write process ID to')),
3361 3361 ('', 'stdio', None, _('for remote clients')),
3362 3362 ('t', 'templates', '', _('web templates to use')),
3363 3363 ('', 'style', '', _('template style to use')),
3364 3364 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
3365 3365 ('', 'certificate', '', _('SSL certificate file'))],
3366 3366 _('[OPTION]...')),
3367 3367 "showconfig|debugconfig":
3368 3368 (showconfig,
3369 3369 [('u', 'untrusted', None, _('show untrusted configuration options'))],
3370 3370 _('[-u] [NAME]...')),
3371 3371 "^status|st":
3372 3372 (status,
3373 3373 [('A', 'all', None, _('show status of all files')),
3374 3374 ('m', 'modified', None, _('show only modified files')),
3375 3375 ('a', 'added', None, _('show only added files')),
3376 3376 ('r', 'removed', None, _('show only removed files')),
3377 3377 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
3378 3378 ('c', 'clean', None, _('show only files without changes')),
3379 3379 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
3380 3380 ('i', 'ignored', None, _('show only ignored files')),
3381 3381 ('n', 'no-status', None, _('hide status prefix')),
3382 3382 ('C', 'copies', None, _('show source of copied files')),
3383 3383 ('0', 'print0', None,
3384 3384 _('end filenames with NUL, for use with xargs')),
3385 3385 ('', 'rev', [], _('show difference from revision')),
3386 3386 ] + walkopts,
3387 3387 _('[OPTION]... [FILE]...')),
3388 3388 "tag":
3389 3389 (tag,
3390 3390 [('f', 'force', None, _('replace existing tag')),
3391 3391 ('l', 'local', None, _('make the tag local')),
3392 3392 ('r', 'rev', '', _('revision to tag')),
3393 3393 ('', 'remove', None, _('remove a tag')),
3394 3394 # -l/--local is already there, commitopts cannot be used
3395 3395 ('m', 'message', '', _('use <text> as commit message')),
3396 3396 ] + commitopts2,
3397 3397 _('[-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')),
3398 3398 "tags": (tags, []),
3399 3399 "tip":
3400 3400 (tip,
3401 3401 [('p', 'patch', None, _('show patch')),
3402 3402 ('g', 'git', None, _('use git extended diff format')),
3403 3403 ] + templateopts,
3404 3404 _('[-p]')),
3405 3405 "unbundle":
3406 3406 (unbundle,
3407 3407 [('u', 'update', None,
3408 3408 _('update to new tip if changesets were unbundled'))],
3409 3409 _('[-u] FILE...')),
3410 3410 "^update|up|checkout|co":
3411 3411 (update,
3412 3412 [('C', 'clean', None, _('overwrite locally modified files (no backup)')),
3413 3413 ('d', 'date', '', _('tipmost revision matching date')),
3414 3414 ('r', 'rev', '', _('revision'))],
3415 3415 _('[-C] [-d DATE] [[-r] REV]')),
3416 3416 "verify": (verify, []),
3417 3417 "version": (version_, []),
3418 3418 }
3419 3419
3420 3420 norepo = ("clone init version help debugcomplete debugdata"
3421 3421 " debugindex debugindexdot debugdate debuginstall debugfsinfo")
3422 3422 optionalrepo = ("identify paths serve showconfig debugancestor")
@@ -1,806 +1,806 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import nullid, nullrev, short, hex
9 9 from i18n import _
10 10 import ancestor, bdiff, error, util, os, errno
11 11
12 12 class propertycache(object):
13 13 def __init__(self, func):
14 14 self.func = func
15 15 self.name = func.__name__
16 16 def __get__(self, obj, type=None):
17 17 result = self.func(obj)
18 18 setattr(obj, self.name, result)
19 19 return result
20 20
21 21 class changectx(object):
22 22 """A changecontext object makes access to data related to a particular
23 23 changeset convenient."""
24 24 def __init__(self, repo, changeid=''):
25 25 """changeid is a revision number, node, or tag"""
26 26 if changeid == '':
27 27 changeid = '.'
28 28 self._repo = repo
29 29 if isinstance(changeid, (long, int)):
30 30 self._rev = changeid
31 31 self._node = self._repo.changelog.node(changeid)
32 32 else:
33 33 self._node = self._repo.lookup(changeid)
34 34 self._rev = self._repo.changelog.rev(self._node)
35 35
36 36 def __str__(self):
37 37 return short(self.node())
38 38
39 39 def __int__(self):
40 40 return self.rev()
41 41
42 42 def __repr__(self):
43 43 return "<changectx %s>" % str(self)
44 44
45 45 def __hash__(self):
46 46 try:
47 47 return hash(self._rev)
48 48 except AttributeError:
49 49 return id(self)
50 50
51 51 def __eq__(self, other):
52 52 try:
53 53 return self._rev == other._rev
54 54 except AttributeError:
55 55 return False
56 56
57 57 def __ne__(self, other):
58 58 return not (self == other)
59 59
60 60 def __nonzero__(self):
61 61 return self._rev != nullrev
62 62
63 63 def _changeset(self):
64 64 return self._repo.changelog.read(self.node())
65 65 _changeset = propertycache(_changeset)
66 66
67 67 def _manifest(self):
68 68 return self._repo.manifest.read(self._changeset[0])
69 69 _manifest = propertycache(_manifest)
70 70
71 71 def _manifestdelta(self):
72 72 return self._repo.manifest.readdelta(self._changeset[0])
73 73 _manifestdelta = propertycache(_manifestdelta)
74 74
75 75 def _parents(self):
76 76 p = self._repo.changelog.parentrevs(self._rev)
77 77 if p[1] == nullrev:
78 78 p = p[:-1]
79 79 return [changectx(self._repo, x) for x in p]
80 80 _parents = propertycache(_parents)
81 81
82 82 def __contains__(self, key):
83 83 return key in self._manifest
84 84
85 85 def __getitem__(self, key):
86 86 return self.filectx(key)
87 87
88 88 def __iter__(self):
89 89 for f in util.sort(self._manifest):
90 90 yield f
91 91
92 92 def changeset(self): return self._changeset
93 93 def manifest(self): return self._manifest
94 94
95 95 def rev(self): return self._rev
96 96 def node(self): return self._node
97 97 def hex(self): return hex(self._node)
98 98 def user(self): return self._changeset[1]
99 99 def date(self): return self._changeset[2]
100 100 def files(self): return self._changeset[3]
101 101 def description(self): return self._changeset[4]
102 102 def branch(self): return self._changeset[5].get("branch")
103 103 def extra(self): return self._changeset[5]
104 104 def tags(self): return self._repo.nodetags(self._node)
105 105
106 106 def parents(self):
107 107 """return contexts for each parent changeset"""
108 108 return self._parents
109 109
110 110 def children(self):
111 111 """return contexts for each child changeset"""
112 112 c = self._repo.changelog.children(self._node)
113 113 return [changectx(self._repo, x) for x in c]
114 114
115 115 def ancestors(self):
116 116 for a in self._repo.changelog.ancestors(self._rev):
117 117 yield changectx(self._repo, a)
118 118
119 119 def descendants(self):
120 120 for d in self._repo.changelog.descendants(self._rev):
121 121 yield changectx(self._repo, d)
122 122
123 123 def _fileinfo(self, path):
124 124 if '_manifest' in self.__dict__:
125 125 try:
126 126 return self._manifest[path], self._manifest.flags(path)
127 127 except KeyError:
128 128 raise error.LookupError(self._node, path,
129 129 _('not found in manifest'))
130 130 if '_manifestdelta' in self.__dict__ or path in self.files():
131 131 if path in self._manifestdelta:
132 132 return self._manifestdelta[path], self._manifestdelta.flags(path)
133 133 node, flag = self._repo.manifest.find(self._changeset[0], path)
134 134 if not node:
135 135 raise error.LookupError(self._node, path,
136 136 _('not found in manifest'))
137 137
138 138 return node, flag
139 139
140 140 def filenode(self, path):
141 141 return self._fileinfo(path)[0]
142 142
143 143 def flags(self, path):
144 144 try:
145 145 return self._fileinfo(path)[1]
146 146 except error.LookupError:
147 147 return ''
148 148
149 149 def filectx(self, path, fileid=None, filelog=None):
150 150 """get a file context from this changeset"""
151 151 if fileid is None:
152 152 fileid = self.filenode(path)
153 153 return filectx(self._repo, path, fileid=fileid,
154 154 changectx=self, filelog=filelog)
155 155
156 156 def ancestor(self, c2):
157 157 """
158 158 return the ancestor context of self and c2
159 159 """
160 160 n = self._repo.changelog.ancestor(self._node, c2._node)
161 161 return changectx(self._repo, n)
162 162
163 163 def walk(self, match):
164 164 fdict = dict.fromkeys(match.files())
165 165 # for dirstate.walk, files=['.'] means "walk the whole tree".
166 166 # follow that here, too
167 167 fdict.pop('.', None)
168 168 for fn in self:
169 169 for ffn in fdict:
170 170 # match if the file is the exact name or a directory
171 171 if ffn == fn or fn.startswith("%s/" % ffn):
172 172 del fdict[ffn]
173 173 break
174 174 if match(fn):
175 175 yield fn
176 176 for fn in util.sort(fdict):
177 177 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
178 178 yield fn
179 179
180 180 class filectx(object):
181 181 """A filecontext object makes access to data related to a particular
182 182 filerevision convenient."""
183 183 def __init__(self, repo, path, changeid=None, fileid=None,
184 184 filelog=None, changectx=None):
185 185 """changeid can be a changeset revision, node, or tag.
186 186 fileid can be a file revision or node."""
187 187 self._repo = repo
188 188 self._path = path
189 189
190 190 assert (changeid is not None
191 191 or fileid is not None
192 192 or changectx is not None)
193 193
194 194 if filelog:
195 195 self._filelog = filelog
196 196
197 197 if changeid is not None:
198 198 self._changeid = changeid
199 199 if changectx is not None:
200 200 self._changectx = changectx
201 201 if fileid is not None:
202 202 self._fileid = fileid
203 203
204 204 def _changectx(self):
205 205 return changectx(self._repo, self._changeid)
206 206 _changectx = propertycache(_changectx)
207 207
208 208 def _filelog(self):
209 209 return self._repo.file(self._path)
210 210 _filelog = propertycache(_filelog)
211 211
212 212 def _changeid(self):
213 213 if '_changectx' in self.__dict__:
214 214 return self._changectx.rev()
215 215 else:
216 216 return self._filelog.linkrev(self._filerev)
217 217 _changeid = propertycache(_changeid)
218 218
219 219 def _filenode(self):
220 220 if '_fileid' in self.__dict__:
221 221 return self._filelog.lookup(self._fileid)
222 222 else:
223 223 return self._changectx.filenode(self._path)
224 224 _filenode = propertycache(_filenode)
225 225
226 226 def _filerev(self):
227 227 return self._filelog.rev(self._filenode)
228 228 _filerev = propertycache(_filerev)
229 229
230 230 def _repopath(self):
231 231 return self._path
232 232 _repopath = propertycache(_repopath)
233 233
234 234 def __nonzero__(self):
235 235 try:
236 n = self._filenode
236 self._filenode
237 237 return True
238 238 except error.LookupError:
239 239 # file is missing
240 240 return False
241 241
242 242 def __str__(self):
243 243 return "%s@%s" % (self.path(), short(self.node()))
244 244
245 245 def __repr__(self):
246 246 return "<filectx %s>" % str(self)
247 247
248 248 def __hash__(self):
249 249 try:
250 250 return hash((self._path, self._fileid))
251 251 except AttributeError:
252 252 return id(self)
253 253
254 254 def __eq__(self, other):
255 255 try:
256 256 return (self._path == other._path
257 257 and self._fileid == other._fileid)
258 258 except AttributeError:
259 259 return False
260 260
261 261 def __ne__(self, other):
262 262 return not (self == other)
263 263
264 264 def filectx(self, fileid):
265 265 '''opens an arbitrary revision of the file without
266 266 opening a new filelog'''
267 267 return filectx(self._repo, self._path, fileid=fileid,
268 268 filelog=self._filelog)
269 269
270 270 def filerev(self): return self._filerev
271 271 def filenode(self): return self._filenode
272 272 def flags(self): return self._changectx.flags(self._path)
273 273 def filelog(self): return self._filelog
274 274
275 275 def rev(self):
276 276 if '_changectx' in self.__dict__:
277 277 return self._changectx.rev()
278 278 if '_changeid' in self.__dict__:
279 279 return self._changectx.rev()
280 280 return self._filelog.linkrev(self._filerev)
281 281
282 282 def linkrev(self): return self._filelog.linkrev(self._filerev)
283 283 def node(self): return self._changectx.node()
284 284 def user(self): return self._changectx.user()
285 285 def date(self): return self._changectx.date()
286 286 def files(self): return self._changectx.files()
287 287 def description(self): return self._changectx.description()
288 288 def branch(self): return self._changectx.branch()
289 289 def manifest(self): return self._changectx.manifest()
290 290 def changectx(self): return self._changectx
291 291
292 292 def data(self): return self._filelog.read(self._filenode)
293 293 def path(self): return self._path
294 294 def size(self): return self._filelog.size(self._filerev)
295 295
296 296 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
297 297
298 298 def renamed(self):
299 299 """check if file was actually renamed in this changeset revision
300 300
301 301 If rename logged in file revision, we report copy for changeset only
302 302 if file revisions linkrev points back to the changeset in question
303 303 or both changeset parents contain different file revisions.
304 304 """
305 305
306 306 renamed = self._filelog.renamed(self._filenode)
307 307 if not renamed:
308 308 return renamed
309 309
310 310 if self.rev() == self.linkrev():
311 311 return renamed
312 312
313 313 name = self.path()
314 314 fnode = self._filenode
315 315 for p in self._changectx.parents():
316 316 try:
317 317 if fnode == p.filenode(name):
318 318 return None
319 319 except error.LookupError:
320 320 pass
321 321 return renamed
322 322
323 323 def parents(self):
324 324 p = self._path
325 325 fl = self._filelog
326 326 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
327 327
328 328 r = self._filelog.renamed(self._filenode)
329 329 if r:
330 330 pl[0] = (r[0], r[1], None)
331 331
332 332 return [filectx(self._repo, p, fileid=n, filelog=l)
333 333 for p,n,l in pl if n != nullid]
334 334
335 335 def children(self):
336 336 # hard for renames
337 337 c = self._filelog.children(self._filenode)
338 338 return [filectx(self._repo, self._path, fileid=x,
339 339 filelog=self._filelog) for x in c]
340 340
341 341 def annotate(self, follow=False, linenumber=None):
342 342 '''returns a list of tuples of (ctx, line) for each line
343 343 in the file, where ctx is the filectx of the node where
344 344 that line was last changed.
345 345 This returns tuples of ((ctx, linenumber), line) for each line,
346 346 if "linenumber" parameter is NOT "None".
347 347 In such tuples, linenumber means one at the first appearance
348 348 in the managed file.
349 349 To reduce annotation cost,
350 350 this returns fixed value(False is used) as linenumber,
351 351 if "linenumber" parameter is "False".'''
352 352
353 353 def decorate_compat(text, rev):
354 354 return ([rev] * len(text.splitlines()), text)
355 355
356 356 def without_linenumber(text, rev):
357 357 return ([(rev, False)] * len(text.splitlines()), text)
358 358
359 359 def with_linenumber(text, rev):
360 360 size = len(text.splitlines())
361 361 return ([(rev, i) for i in xrange(1, size + 1)], text)
362 362
363 363 decorate = (((linenumber is None) and decorate_compat) or
364 364 (linenumber and with_linenumber) or
365 365 without_linenumber)
366 366
367 367 def pair(parent, child):
368 368 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
369 369 child[0][b1:b2] = parent[0][a1:a2]
370 370 return child
371 371
372 372 getlog = util.cachefunc(lambda x: self._repo.file(x))
373 373 def getctx(path, fileid):
374 374 log = path == self._path and self._filelog or getlog(path)
375 375 return filectx(self._repo, path, fileid=fileid, filelog=log)
376 376 getctx = util.cachefunc(getctx)
377 377
378 378 def parents(f):
379 379 # we want to reuse filectx objects as much as possible
380 380 p = f._path
381 381 if f._filerev is None: # working dir
382 382 pl = [(n.path(), n.filerev()) for n in f.parents()]
383 383 else:
384 384 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
385 385
386 386 if follow:
387 387 r = f.renamed()
388 388 if r:
389 389 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
390 390
391 391 return [getctx(p, n) for p, n in pl if n != nullrev]
392 392
393 393 # use linkrev to find the first changeset where self appeared
394 394 if self.rev() != self.linkrev():
395 395 base = self.filectx(self.filerev())
396 396 else:
397 397 base = self
398 398
399 399 # find all ancestors
400 400 needed = {base: 1}
401 401 visit = [base]
402 402 files = [base._path]
403 403 while visit:
404 404 f = visit.pop(0)
405 405 for p in parents(f):
406 406 if p not in needed:
407 407 needed[p] = 1
408 408 visit.append(p)
409 409 if p._path not in files:
410 410 files.append(p._path)
411 411 else:
412 412 # count how many times we'll use this
413 413 needed[p] += 1
414 414
415 415 # sort by revision (per file) which is a topological order
416 416 visit = []
417 417 for f in files:
418 418 fn = [(n.rev(), n) for n in needed if n._path == f]
419 419 visit.extend(fn)
420 420
421 421 hist = {}
422 422 for r, f in util.sort(visit):
423 423 curr = decorate(f.data(), f)
424 424 for p in parents(f):
425 425 if p != nullid:
426 426 curr = pair(hist[p], curr)
427 427 # trim the history of unneeded revs
428 428 needed[p] -= 1
429 429 if not needed[p]:
430 430 del hist[p]
431 431 hist[f] = curr
432 432
433 433 return zip(hist[f][0], hist[f][1].splitlines(1))
434 434
435 435 def ancestor(self, fc2):
436 436 """
437 437 find the common ancestor file context, if any, of self, and fc2
438 438 """
439 439
440 440 acache = {}
441 441
442 442 # prime the ancestor cache for the working directory
443 443 for c in (self, fc2):
444 444 if c._filerev == None:
445 445 pl = [(n.path(), n.filenode()) for n in c.parents()]
446 446 acache[(c._path, None)] = pl
447 447
448 448 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
449 449 def parents(vertex):
450 450 if vertex in acache:
451 451 return acache[vertex]
452 452 f, n = vertex
453 453 if f not in flcache:
454 454 flcache[f] = self._repo.file(f)
455 455 fl = flcache[f]
456 456 pl = [(f, p) for p in fl.parents(n) if p != nullid]
457 457 re = fl.renamed(n)
458 458 if re:
459 459 pl.append(re)
460 460 acache[vertex] = pl
461 461 return pl
462 462
463 463 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
464 464 v = ancestor.ancestor(a, b, parents)
465 465 if v:
466 466 f, n = v
467 467 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
468 468
469 469 return None
470 470
471 471 class workingctx(changectx):
472 472 """A workingctx object makes access to data related to
473 473 the current working directory convenient.
474 474 parents - a pair of parent nodeids, or None to use the dirstate.
475 475 date - any valid date string or (unixtime, offset), or None.
476 476 user - username string, or None.
477 477 extra - a dictionary of extra values, or None.
478 478 changes - a list of file lists as returned by localrepo.status()
479 479 or None to use the repository status.
480 480 """
481 481 def __init__(self, repo, parents=None, text="", user=None, date=None,
482 482 extra=None, changes=None):
483 483 self._repo = repo
484 484 self._rev = None
485 485 self._node = None
486 486 self._text = text
487 487 if date:
488 488 self._date = util.parsedate(date)
489 489 if user:
490 490 self._user = user
491 491 if parents:
492 492 self._parents = [changectx(self._repo, p) for p in parents]
493 493 if changes:
494 494 self._status = list(changes)
495 495
496 496 self._extra = {}
497 497 if extra:
498 498 self._extra = extra.copy()
499 499 if 'branch' not in self._extra:
500 500 branch = self._repo.dirstate.branch()
501 501 try:
502 502 branch = branch.decode('UTF-8').encode('UTF-8')
503 503 except UnicodeDecodeError:
504 504 raise util.Abort(_('branch name not in UTF-8!'))
505 505 self._extra['branch'] = branch
506 506 if self._extra['branch'] == '':
507 507 self._extra['branch'] = 'default'
508 508
509 509 def __str__(self):
510 510 return str(self._parents[0]) + "+"
511 511
512 512 def __nonzero__(self):
513 513 return True
514 514
515 515 def __contains__(self, key):
516 516 return self._dirstate[key] not in "?r"
517 517
518 518 def _manifest(self):
519 519 """generate a manifest corresponding to the working directory"""
520 520
521 521 man = self._parents[0].manifest().copy()
522 522 copied = self._repo.dirstate.copies()
523 523 cf = lambda x: man.flags(copied.get(x, x))
524 524 ff = self._repo.dirstate.flagfunc(cf)
525 525 modified, added, removed, deleted, unknown = self._status[:5]
526 526 for i, l in (("a", added), ("m", modified), ("u", unknown)):
527 527 for f in l:
528 528 man[f] = man.get(copied.get(f, f), nullid) + i
529 529 try:
530 530 man.set(f, ff(f))
531 531 except OSError:
532 532 pass
533 533
534 534 for f in deleted + removed:
535 535 if f in man:
536 536 del man[f]
537 537
538 538 return man
539 539 _manifest = propertycache(_manifest)
540 540
541 541 def _status(self):
542 542 return self._repo.status(unknown=True)
543 543 _status = propertycache(_status)
544 544
545 545 def _user(self):
546 546 return self._repo.ui.username()
547 547 _user = propertycache(_user)
548 548
549 549 def _date(self):
550 550 return util.makedate()
551 551 _date = propertycache(_date)
552 552
553 553 def _parents(self):
554 554 p = self._repo.dirstate.parents()
555 555 if p[1] == nullid:
556 556 p = p[:-1]
557 557 self._parents = [changectx(self._repo, x) for x in p]
558 558 return self._parents
559 559 _parents = propertycache(_parents)
560 560
561 561 def manifest(self): return self._manifest
562 562
563 563 def user(self): return self._user or self._repo.ui.username()
564 564 def date(self): return self._date
565 565 def description(self): return self._text
566 566 def files(self):
567 567 return util.sort(self._status[0] + self._status[1] + self._status[2])
568 568
569 569 def modified(self): return self._status[0]
570 570 def added(self): return self._status[1]
571 571 def removed(self): return self._status[2]
572 572 def deleted(self): return self._status[3]
573 573 def unknown(self): return self._status[4]
574 574 def clean(self): return self._status[5]
575 575 def branch(self): return self._extra['branch']
576 576 def extra(self): return self._extra
577 577
578 578 def tags(self):
579 579 t = []
580 580 [t.extend(p.tags()) for p in self.parents()]
581 581 return t
582 582
583 583 def children(self):
584 584 return []
585 585
586 586 def flags(self, path):
587 587 if '_manifest' in self.__dict__:
588 588 try:
589 589 return self._manifest.flags(path)
590 590 except KeyError:
591 591 return ''
592 592
593 593 pnode = self._parents[0].changeset()[0]
594 594 orig = self._repo.dirstate.copies().get(path, path)
595 595 node, flag = self._repo.manifest.find(pnode, orig)
596 596 try:
597 597 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
598 598 return ff(path)
599 599 except OSError:
600 600 pass
601 601
602 602 if not node or path in self.deleted() or path in self.removed():
603 603 return ''
604 604 return flag
605 605
606 606 def filectx(self, path, filelog=None):
607 607 """get a file context from the working directory"""
608 608 return workingfilectx(self._repo, path, workingctx=self,
609 609 filelog=filelog)
610 610
611 611 def ancestor(self, c2):
612 612 """return the ancestor context of self and c2"""
613 613 return self._parents[0].ancestor(c2) # punt on two parents for now
614 614
615 615 def walk(self, match):
616 616 return util.sort(self._repo.dirstate.walk(match, True, False).keys())
617 617
618 618 class workingfilectx(filectx):
619 619 """A workingfilectx object makes access to data related to a particular
620 620 file in the working directory convenient."""
621 621 def __init__(self, repo, path, filelog=None, workingctx=None):
622 622 """changeid can be a changeset revision, node, or tag.
623 623 fileid can be a file revision or node."""
624 624 self._repo = repo
625 625 self._path = path
626 626 self._changeid = None
627 627 self._filerev = self._filenode = None
628 628
629 629 if filelog:
630 630 self._filelog = filelog
631 631 if workingctx:
632 632 self._changectx = workingctx
633 633
634 634 def _changectx(self):
635 635 return workingctx(self._repo)
636 636 _changectx = propertycache(_changectx)
637 637
638 638 def _repopath(self):
639 639 return self._repo.dirstate.copied(self._path) or self._path
640 640 _repopath = propertycache(_repopath)
641 641
642 642 def _filelog(self):
643 643 return self._repo.file(self._repopath)
644 644 _filelog = propertycache(_filelog)
645 645
646 646 def __nonzero__(self):
647 647 return True
648 648
649 649 def __str__(self):
650 650 return "%s@%s" % (self.path(), self._changectx)
651 651
652 652 def filectx(self, fileid):
653 653 '''opens an arbitrary revision of the file without
654 654 opening a new filelog'''
655 655 return filectx(self._repo, self._repopath, fileid=fileid,
656 656 filelog=self._filelog)
657 657
658 658 def rev(self):
659 659 if '_changectx' in self.__dict__:
660 660 return self._changectx.rev()
661 661 return self._filelog.linkrev(self._filerev)
662 662
663 663 def data(self): return self._repo.wread(self._path)
664 664 def renamed(self):
665 665 rp = self._repopath
666 666 if rp == self._path:
667 667 return None
668 668 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
669 669
670 670 def parents(self):
671 671 '''return parent filectxs, following copies if necessary'''
672 672 p = self._path
673 673 rp = self._repopath
674 674 pcl = self._changectx._parents
675 675 fl = self._filelog
676 676 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
677 677 if len(pcl) > 1:
678 678 if rp != p:
679 679 fl = None
680 680 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
681 681
682 682 return [filectx(self._repo, p, fileid=n, filelog=l)
683 683 for p,n,l in pl if n != nullid]
684 684
685 685 def children(self):
686 686 return []
687 687
688 688 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
689 689 def date(self):
690 690 t, tz = self._changectx.date()
691 691 try:
692 692 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
693 693 except OSError, err:
694 694 if err.errno != errno.ENOENT: raise
695 695 return (t, tz)
696 696
697 697 def cmp(self, text): return self._repo.wread(self._path) == text
698 698
699 699 class memctx(object):
700 700 """Use memctx to perform in-memory commits via localrepo.commitctx().
701 701
702 702 Revision information is supplied at initialization time while
703 703 related files data and is made available through a callback
704 704 mechanism. 'repo' is the current localrepo, 'parents' is a
705 705 sequence of two parent revisions identifiers (pass None for every
706 706 missing parent), 'text' is the commit message and 'files' lists
707 707 names of files touched by the revision (normalized and relative to
708 708 repository root).
709 709
710 710 filectxfn(repo, memctx, path) is a callable receiving the
711 711 repository, the current memctx object and the normalized path of
712 712 requested file, relative to repository root. It is fired by the
713 713 commit function for every file in 'files', but calls order is
714 714 undefined. If the file is available in the revision being
715 715 committed (updated or added), filectxfn returns a memfilectx
716 716 object. If the file was removed, filectxfn raises an
717 717 IOError. Moved files are represented by marking the source file
718 718 removed and the new file added with copy information (see
719 719 memfilectx).
720 720
721 721 user receives the committer name and defaults to current
722 722 repository username, date is the commit date in any format
723 723 supported by util.parsedate() and defaults to current date, extra
724 724 is a dictionary of metadata or is left empty.
725 725 """
726 726 def __init__(self, repo, parents, text, files, filectxfn, user=None,
727 727 date=None, extra=None):
728 728 self._repo = repo
729 729 self._rev = None
730 730 self._node = None
731 731 self._text = text
732 732 self._date = date and util.parsedate(date) or util.makedate()
733 733 self._user = user
734 734 parents = [(p or nullid) for p in parents]
735 735 p1, p2 = parents
736 736 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
737 737 files = util.sort(util.unique(files))
738 738 self._status = [files, [], [], [], []]
739 739 self._filectxfn = filectxfn
740 740
741 741 self._extra = extra and extra.copy() or {}
742 742 if 'branch' not in self._extra:
743 743 self._extra['branch'] = 'default'
744 744 elif self._extra.get('branch') == '':
745 745 self._extra['branch'] = 'default'
746 746
747 747 def __str__(self):
748 748 return str(self._parents[0]) + "+"
749 749
750 750 def __int__(self):
751 751 return self._rev
752 752
753 753 def __nonzero__(self):
754 754 return True
755 755
756 756 def user(self): return self._user or self._repo.ui.username()
757 757 def date(self): return self._date
758 758 def description(self): return self._text
759 759 def files(self): return self.modified()
760 760 def modified(self): return self._status[0]
761 761 def added(self): return self._status[1]
762 762 def removed(self): return self._status[2]
763 763 def deleted(self): return self._status[3]
764 764 def unknown(self): return self._status[4]
765 765 def clean(self): return self._status[5]
766 766 def branch(self): return self._extra['branch']
767 767 def extra(self): return self._extra
768 768 def flags(self, f): return self[f].flags()
769 769
770 770 def parents(self):
771 771 """return contexts for each parent changeset"""
772 772 return self._parents
773 773
774 774 def filectx(self, path, filelog=None):
775 775 """get a file context from the working directory"""
776 776 return self._filectxfn(self._repo, self, path)
777 777
778 778 class memfilectx(object):
779 779 """memfilectx represents an in-memory file to commit.
780 780
781 781 See memctx for more details.
782 782 """
783 783 def __init__(self, path, data, islink, isexec, copied):
784 784 """
785 785 path is the normalized file path relative to repository root.
786 786 data is the file content as a string.
787 787 islink is True if the file is a symbolic link.
788 788 isexec is True if the file is executable.
789 789 copied is the source file path if current file was copied in the
790 790 revision being committed, or None."""
791 791 self._path = path
792 792 self._data = data
793 793 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
794 794 self._copied = None
795 795 if copied:
796 796 self._copied = (copied, nullid)
797 797
798 798 def __nonzero__(self): return True
799 799 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
800 800 def path(self): return self._path
801 801 def data(self): return self._data
802 802 def flags(self): return self._flags
803 803 def isexec(self): return 'x' in self._flags
804 804 def islink(self): return 'l' in self._flags
805 805 def renamed(self): return self._copied
806 806
@@ -1,641 +1,641 b''
1 1 # This library is free software; you can redistribute it and/or
2 2 # modify it under the terms of the GNU Lesser General Public
3 3 # License as published by the Free Software Foundation; either
4 4 # version 2.1 of the License, or (at your option) any later version.
5 5 #
6 6 # This library is distributed in the hope that it will be useful,
7 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 9 # Lesser General Public License for more details.
10 10 #
11 11 # You should have received a copy of the GNU Lesser General Public
12 12 # License along with this library; if not, write to the
13 13 # Free Software Foundation, Inc.,
14 14 # 59 Temple Place, Suite 330,
15 15 # Boston, MA 02111-1307 USA
16 16
17 17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
18 18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
19 19
20 20 # Modified by Benoit Boissinot:
21 21 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
22 22 # Modified by Dirkjan Ochtman:
23 23 # - import md5 function from a local util module
24 24
25 25 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
26 26
27 27 >>> import urllib2
28 28 >>> from keepalive import HTTPHandler
29 29 >>> keepalive_handler = HTTPHandler()
30 30 >>> opener = urllib2.build_opener(keepalive_handler)
31 31 >>> urllib2.install_opener(opener)
32 32 >>>
33 33 >>> fo = urllib2.urlopen('http://www.python.org')
34 34
35 35 If a connection to a given host is requested, and all of the existing
36 36 connections are still in use, another connection will be opened. If
37 37 the handler tries to use an existing connection but it fails in some
38 38 way, it will be closed and removed from the pool.
39 39
40 40 To remove the handler, simply re-run build_opener with no arguments, and
41 41 install that opener.
42 42
43 43 You can explicitly close connections by using the close_connection()
44 44 method of the returned file-like object (described below) or you can
45 45 use the handler methods:
46 46
47 47 close_connection(host)
48 48 close_all()
49 49 open_connections()
50 50
51 51 NOTE: using the close_connection and close_all methods of the handler
52 52 should be done with care when using multiple threads.
53 53 * there is nothing that prevents another thread from creating new
54 54 connections immediately after connections are closed
55 55 * no checks are done to prevent in-use connections from being closed
56 56
57 57 >>> keepalive_handler.close_all()
58 58
59 59 EXTRA ATTRIBUTES AND METHODS
60 60
61 61 Upon a status of 200, the object returned has a few additional
62 62 attributes and methods, which should not be used if you want to
63 63 remain consistent with the normal urllib2-returned objects:
64 64
65 65 close_connection() - close the connection to the host
66 66 readlines() - you know, readlines()
67 67 status - the return status (ie 404)
68 68 reason - english translation of status (ie 'File not found')
69 69
70 70 If you want the best of both worlds, use this inside an
71 71 AttributeError-catching try:
72 72
73 73 >>> try: status = fo.status
74 74 >>> except AttributeError: status = None
75 75
76 76 Unfortunately, these are ONLY there if status == 200, so it's not
77 77 easy to distinguish between non-200 responses. The reason is that
78 78 urllib2 tries to do clever things with error codes 301, 302, 401,
79 79 and 407, and it wraps the object upon return.
80 80
81 81 For python versions earlier than 2.4, you can avoid this fancy error
82 82 handling by setting the module-level global HANDLE_ERRORS to zero.
83 83 You see, prior to 2.4, it's the HTTP Handler's job to determine what
84 84 to handle specially, and what to just pass up. HANDLE_ERRORS == 0
85 85 means "pass everything up". In python 2.4, however, this job no
86 86 longer belongs to the HTTP Handler and is now done by a NEW handler,
87 87 HTTPErrorProcessor. Here's the bottom line:
88 88
89 89 python version < 2.4
90 90 HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
91 91 errors
92 92 HANDLE_ERRORS == 0 pass everything up, error processing is
93 93 left to the calling code
94 94 python version >= 2.4
95 95 HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
96 96 HANDLE_ERRORS == 0 (default) pass everything up, let the
97 97 other handlers (specifically,
98 98 HTTPErrorProcessor) decide what to do
99 99
100 100 In practice, setting the variable either way makes little difference
101 101 in python 2.4, so for the most consistent behavior across versions,
102 102 you probably just want to use the defaults, which will give you
103 103 exceptions on errors.
104 104
105 105 """
106 106
107 107 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
108 108
109 109 import urllib2
110 110 import httplib
111 111 import socket
112 112 import thread
113 113
114 114 DEBUG = None
115 115
116 116 import sys
117 117 if sys.version_info < (2, 4): HANDLE_ERRORS = 1
118 118 else: HANDLE_ERRORS = 0
119 119
120 120 class ConnectionManager:
121 121 """
122 122 The connection manager must be able to:
123 123 * keep track of all existing
124 124 """
125 125 def __init__(self):
126 126 self._lock = thread.allocate_lock()
127 127 self._hostmap = {} # map hosts to a list of connections
128 128 self._connmap = {} # map connections to host
129 129 self._readymap = {} # map connection to ready state
130 130
131 131 def add(self, host, connection, ready):
132 132 self._lock.acquire()
133 133 try:
134 134 if not host in self._hostmap: self._hostmap[host] = []
135 135 self._hostmap[host].append(connection)
136 136 self._connmap[connection] = host
137 137 self._readymap[connection] = ready
138 138 finally:
139 139 self._lock.release()
140 140
141 141 def remove(self, connection):
142 142 self._lock.acquire()
143 143 try:
144 144 try:
145 145 host = self._connmap[connection]
146 146 except KeyError:
147 147 pass
148 148 else:
149 149 del self._connmap[connection]
150 150 del self._readymap[connection]
151 151 self._hostmap[host].remove(connection)
152 152 if not self._hostmap[host]: del self._hostmap[host]
153 153 finally:
154 154 self._lock.release()
155 155
156 156 def set_ready(self, connection, ready):
157 157 try: self._readymap[connection] = ready
158 158 except KeyError: pass
159 159
160 160 def get_ready_conn(self, host):
161 161 conn = None
162 162 self._lock.acquire()
163 163 try:
164 164 if host in self._hostmap:
165 165 for c in self._hostmap[host]:
166 166 if self._readymap[c]:
167 167 self._readymap[c] = 0
168 168 conn = c
169 169 break
170 170 finally:
171 171 self._lock.release()
172 172 return conn
173 173
174 174 def get_all(self, host=None):
175 175 if host:
176 176 return list(self._hostmap.get(host, []))
177 177 else:
178 178 return dict(self._hostmap)
179 179
180 180 class KeepAliveHandler:
181 181 def __init__(self):
182 182 self._cm = ConnectionManager()
183 183
184 184 #### Connection Management
185 185 def open_connections(self):
186 186 """return a list of connected hosts and the number of connections
187 187 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
188 188 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
189 189
190 190 def close_connection(self, host):
191 191 """close connection(s) to <host>
192 192 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
193 193 no error occurs if there is no connection to that host."""
194 194 for h in self._cm.get_all(host):
195 195 self._cm.remove(h)
196 196 h.close()
197 197
198 198 def close_all(self):
199 199 """close all open connections"""
200 200 for host, conns in self._cm.get_all().iteritems():
201 201 for h in conns:
202 202 self._cm.remove(h)
203 203 h.close()
204 204
205 205 def _request_closed(self, request, host, connection):
206 206 """tells us that this request is now closed and the the
207 207 connection is ready for another request"""
208 208 self._cm.set_ready(connection, 1)
209 209
210 210 def _remove_connection(self, host, connection, close=0):
211 211 if close: connection.close()
212 212 self._cm.remove(connection)
213 213
214 214 #### Transaction Execution
215 215 def http_open(self, req):
216 216 return self.do_open(HTTPConnection, req)
217 217
218 218 def do_open(self, http_class, req):
219 219 host = req.get_host()
220 220 if not host:
221 221 raise urllib2.URLError('no host given')
222 222
223 223 try:
224 224 h = self._cm.get_ready_conn(host)
225 225 while h:
226 226 r = self._reuse_connection(h, req, host)
227 227
228 228 # if this response is non-None, then it worked and we're
229 229 # done. Break out, skipping the else block.
230 230 if r: break
231 231
232 232 # connection is bad - possibly closed by server
233 233 # discard it and ask for the next free connection
234 234 h.close()
235 235 self._cm.remove(h)
236 236 h = self._cm.get_ready_conn(host)
237 237 else:
238 238 # no (working) free connections were found. Create a new one.
239 239 h = http_class(host)
240 240 if DEBUG: DEBUG.info("creating new connection to %s (%d)",
241 241 host, id(h))
242 242 self._cm.add(host, h, 0)
243 243 self._start_transaction(h, req)
244 244 r = h.getresponse()
245 245 except (socket.error, httplib.HTTPException), err:
246 246 raise urllib2.URLError(err)
247 247
248 248 # if not a persistent connection, don't try to reuse it
249 249 if r.will_close: self._cm.remove(h)
250 250
251 251 if DEBUG: DEBUG.info("STATUS: %s, %s", r.status, r.reason)
252 252 r._handler = self
253 253 r._host = host
254 254 r._url = req.get_full_url()
255 255 r._connection = h
256 256 r.code = r.status
257 257 r.headers = r.msg
258 258 r.msg = r.reason
259 259
260 260 if r.status == 200 or not HANDLE_ERRORS:
261 261 return r
262 262 else:
263 263 return self.parent.error('http', req, r,
264 264 r.status, r.msg, r.headers)
265 265
266 266 def _reuse_connection(self, h, req, host):
267 267 """start the transaction with a re-used connection
268 268 return a response object (r) upon success or None on failure.
269 269 This DOES not close or remove bad connections in cases where
270 270 it returns. However, if an unexpected exception occurs, it
271 271 will close and remove the connection before re-raising.
272 272 """
273 273 try:
274 274 self._start_transaction(h, req)
275 275 r = h.getresponse()
276 276 # note: just because we got something back doesn't mean it
277 277 # worked. We'll check the version below, too.
278 278 except (socket.error, httplib.HTTPException):
279 279 r = None
280 280 except:
281 281 # adding this block just in case we've missed
282 282 # something we will still raise the exception, but
283 283 # lets try and close the connection and remove it
284 284 # first. We previously got into a nasty loop
285 285 # where an exception was uncaught, and so the
286 286 # connection stayed open. On the next try, the
287 287 # same exception was raised, etc. The tradeoff is
288 288 # that it's now possible this call will raise
289 289 # a DIFFERENT exception
290 290 if DEBUG: DEBUG.error("unexpected exception - closing " + \
291 291 "connection to %s (%d)", host, id(h))
292 292 self._cm.remove(h)
293 293 h.close()
294 294 raise
295 295
296 296 if r is None or r.version == 9:
297 297 # httplib falls back to assuming HTTP 0.9 if it gets a
298 298 # bad header back. This is most likely to happen if
299 299 # the socket has been closed by the server since we
300 300 # last used the connection.
301 301 if DEBUG: DEBUG.info("failed to re-use connection to %s (%d)",
302 302 host, id(h))
303 303 r = None
304 304 else:
305 305 if DEBUG: DEBUG.info("re-using connection to %s (%d)", host, id(h))
306 306
307 307 return r
308 308
309 309 def _start_transaction(self, h, req):
310 310 headers = req.headers.copy()
311 311 body = req.data
312 312 if sys.version_info >= (2, 4):
313 313 headers.update(req.unredirected_hdrs)
314 314 try:
315 315 h.request(req.get_method(), req.get_selector(), body, headers)
316 316 except socket.error, err: # XXX what error?
317 317 raise urllib2.URLError(err)
318 318
319 319 class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
320 320 pass
321 321
322 322 class HTTPResponse(httplib.HTTPResponse):
323 323 # we need to subclass HTTPResponse in order to
324 324 # 1) add readline() and readlines() methods
325 325 # 2) add close_connection() methods
326 326 # 3) add info() and geturl() methods
327 327
328 328 # in order to add readline(), read must be modified to deal with a
329 329 # buffer. example: readline must read a buffer and then spit back
330 330 # one line at a time. The only real alternative is to read one
331 331 # BYTE at a time (ick). Once something has been read, it can't be
332 332 # put back (ok, maybe it can, but that's even uglier than this),
333 333 # so if you THEN do a normal read, you must first take stuff from
334 334 # the buffer.
335 335
336 336 # the read method wraps the original to accomodate buffering,
337 337 # although read() never adds to the buffer.
338 338 # Both readline and readlines have been stolen with almost no
339 339 # modification from socket.py
340 340
341 341
342 342 def __init__(self, sock, debuglevel=0, strict=0, method=None):
343 343 if method: # the httplib in python 2.3 uses the method arg
344 344 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
345 345 else: # 2.2 doesn't
346 346 httplib.HTTPResponse.__init__(self, sock, debuglevel)
347 347 self.fileno = sock.fileno
348 348 self.code = None
349 349 self._rbuf = ''
350 350 self._rbufsize = 8096
351 351 self._handler = None # inserted by the handler later
352 352 self._host = None # (same)
353 353 self._url = None # (same)
354 354 self._connection = None # (same)
355 355
356 356 _raw_read = httplib.HTTPResponse.read
357 357
358 358 def close(self):
359 359 if self.fp:
360 360 self.fp.close()
361 361 self.fp = None
362 362 if self._handler:
363 363 self._handler._request_closed(self, self._host,
364 364 self._connection)
365 365
366 366 def close_connection(self):
367 367 self._handler._remove_connection(self._host, self._connection, close=1)
368 368 self.close()
369 369
370 370 def info(self):
371 371 return self.headers
372 372
373 373 def geturl(self):
374 374 return self._url
375 375
376 376 def read(self, amt=None):
377 377 # the _rbuf test is only in this first if for speed. It's not
378 378 # logically necessary
379 379 if self._rbuf and not amt is None:
380 380 L = len(self._rbuf)
381 381 if amt > L:
382 382 amt -= L
383 383 else:
384 384 s = self._rbuf[:amt]
385 385 self._rbuf = self._rbuf[amt:]
386 386 return s
387 387
388 388 s = self._rbuf + self._raw_read(amt)
389 389 self._rbuf = ''
390 390 return s
391 391
392 392 # stolen from Python SVN #68532 to fix issue1088
393 393 def _read_chunked(self, amt):
394 394 chunk_left = self.chunk_left
395 395 value = ''
396 396
397 397 # XXX This accumulates chunks by repeated string concatenation,
398 398 # which is not efficient as the number or size of chunks gets big.
399 399 while True:
400 400 if chunk_left is None:
401 401 line = self.fp.readline()
402 402 i = line.find(';')
403 403 if i >= 0:
404 404 line = line[:i] # strip chunk-extensions
405 405 try:
406 406 chunk_left = int(line, 16)
407 407 except ValueError:
408 408 # close the connection as protocol synchronisation is
409 409 # probably lost
410 410 self.close()
411 411 raise httplib.IncompleteRead(value)
412 412 if chunk_left == 0:
413 413 break
414 414 if amt is None:
415 415 value += self._safe_read(chunk_left)
416 416 elif amt < chunk_left:
417 417 value += self._safe_read(amt)
418 418 self.chunk_left = chunk_left - amt
419 419 return value
420 420 elif amt == chunk_left:
421 421 value += self._safe_read(amt)
422 422 self._safe_read(2) # toss the CRLF at the end of the chunk
423 423 self.chunk_left = None
424 424 return value
425 425 else:
426 426 value += self._safe_read(chunk_left)
427 427 amt -= chunk_left
428 428
429 429 # we read the whole chunk, get another
430 430 self._safe_read(2) # toss the CRLF at the end of the chunk
431 431 chunk_left = None
432 432
433 433 # read and discard trailer up to the CRLF terminator
434 434 ### note: we shouldn't have any trailers!
435 435 while True:
436 436 line = self.fp.readline()
437 437 if not line:
438 438 # a vanishingly small number of sites EOF without
439 439 # sending the trailer
440 440 break
441 441 if line == '\r\n':
442 442 break
443 443
444 444 # we read everything; close the "file"
445 445 self.close()
446 446
447 447 return value
448 448
449 449 def readline(self, limit=-1):
450 450 data = ""
451 451 i = self._rbuf.find('\n')
452 452 while i < 0 and not (0 < limit <= len(self._rbuf)):
453 453 new = self._raw_read(self._rbufsize)
454 454 if not new: break
455 455 i = new.find('\n')
456 456 if i >= 0: i = i + len(self._rbuf)
457 457 self._rbuf = self._rbuf + new
458 458 if i < 0: i = len(self._rbuf)
459 459 else: i = i+1
460 460 if 0 <= limit < len(self._rbuf): i = limit
461 461 data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
462 462 return data
463 463
464 464 def readlines(self, sizehint = 0):
465 465 total = 0
466 466 list = []
467 467 while 1:
468 468 line = self.readline()
469 469 if not line: break
470 470 list.append(line)
471 471 total += len(line)
472 472 if sizehint and total >= sizehint:
473 473 break
474 474 return list
475 475
476 476
477 477 class HTTPConnection(httplib.HTTPConnection):
478 478 # use the modified response class
479 479 response_class = HTTPResponse
480 480
481 481 #########################################################################
482 482 ##### TEST FUNCTIONS
483 483 #########################################################################
484 484
485 485 def error_handler(url):
486 486 global HANDLE_ERRORS
487 487 orig = HANDLE_ERRORS
488 488 keepalive_handler = HTTPHandler()
489 489 opener = urllib2.build_opener(keepalive_handler)
490 490 urllib2.install_opener(opener)
491 491 pos = {0: 'off', 1: 'on'}
492 492 for i in (0, 1):
493 493 print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
494 494 HANDLE_ERRORS = i
495 495 try:
496 496 fo = urllib2.urlopen(url)
497 foo = fo.read()
497 fo.read()
498 498 fo.close()
499 499 try: status, reason = fo.status, fo.reason
500 500 except AttributeError: status, reason = None, None
501 501 except IOError, e:
502 502 print " EXCEPTION: %s" % e
503 503 raise
504 504 else:
505 505 print " status = %s, reason = %s" % (status, reason)
506 506 HANDLE_ERRORS = orig
507 507 hosts = keepalive_handler.open_connections()
508 508 print "open connections:", hosts
509 509 keepalive_handler.close_all()
510 510
511 511 def continuity(url):
512 512 from util import md5
513 513 format = '%25s: %s'
514 514
515 515 # first fetch the file with the normal http handler
516 516 opener = urllib2.build_opener()
517 517 urllib2.install_opener(opener)
518 518 fo = urllib2.urlopen(url)
519 519 foo = fo.read()
520 520 fo.close()
521 521 m = md5.new(foo)
522 522 print format % ('normal urllib', m.hexdigest())
523 523
524 524 # now install the keepalive handler and try again
525 525 opener = urllib2.build_opener(HTTPHandler())
526 526 urllib2.install_opener(opener)
527 527
528 528 fo = urllib2.urlopen(url)
529 529 foo = fo.read()
530 530 fo.close()
531 531 m = md5.new(foo)
532 532 print format % ('keepalive read', m.hexdigest())
533 533
534 534 fo = urllib2.urlopen(url)
535 535 foo = ''
536 536 while 1:
537 537 f = fo.readline()
538 538 if f: foo = foo + f
539 539 else: break
540 540 fo.close()
541 541 m = md5.new(foo)
542 542 print format % ('keepalive readline', m.hexdigest())
543 543
544 544 def comp(N, url):
545 545 print ' making %i connections to:\n %s' % (N, url)
546 546
547 547 sys.stdout.write(' first using the normal urllib handlers')
548 548 # first use normal opener
549 549 opener = urllib2.build_opener()
550 550 urllib2.install_opener(opener)
551 551 t1 = fetch(N, url)
552 552 print ' TIME: %.3f s' % t1
553 553
554 554 sys.stdout.write(' now using the keepalive handler ')
555 555 # now install the keepalive handler and try again
556 556 opener = urllib2.build_opener(HTTPHandler())
557 557 urllib2.install_opener(opener)
558 558 t2 = fetch(N, url)
559 559 print ' TIME: %.3f s' % t2
560 560 print ' improvement factor: %.2f' % (t1/t2, )
561 561
562 562 def fetch(N, url, delay=0):
563 563 import time
564 564 lens = []
565 565 starttime = time.time()
566 566 for i in range(N):
567 567 if delay and i > 0: time.sleep(delay)
568 568 fo = urllib2.urlopen(url)
569 569 foo = fo.read()
570 570 fo.close()
571 571 lens.append(len(foo))
572 572 diff = time.time() - starttime
573 573
574 574 j = 0
575 575 for i in lens[1:]:
576 576 j = j + 1
577 577 if not i == lens[0]:
578 578 print "WARNING: inconsistent length on read %i: %i" % (j, i)
579 579
580 580 return diff
581 581
582 582 def test_timeout(url):
583 583 global DEBUG
584 584 dbbackup = DEBUG
585 585 class FakeLogger:
586 586 def debug(self, msg, *args): print msg % args
587 587 info = warning = error = debug
588 588 DEBUG = FakeLogger()
589 589 print " fetching the file to establish a connection"
590 590 fo = urllib2.urlopen(url)
591 591 data1 = fo.read()
592 592 fo.close()
593 593
594 594 i = 20
595 595 print " waiting %i seconds for the server to close the connection" % i
596 596 while i > 0:
597 597 sys.stdout.write('\r %2i' % i)
598 598 sys.stdout.flush()
599 599 time.sleep(1)
600 600 i -= 1
601 601 sys.stderr.write('\r')
602 602
603 603 print " fetching the file a second time"
604 604 fo = urllib2.urlopen(url)
605 605 data2 = fo.read()
606 606 fo.close()
607 607
608 608 if data1 == data2:
609 609 print ' data are identical'
610 610 else:
611 611 print ' ERROR: DATA DIFFER'
612 612
613 613 DEBUG = dbbackup
614 614
615 615
616 616 def test(url, N=10):
617 617 print "checking error hander (do this on a non-200)"
618 618 try: error_handler(url)
619 619 except IOError, e:
620 620 print "exiting - exception will prevent further tests"
621 621 sys.exit()
622 622 print
623 623 print "performing continuity test (making sure stuff isn't corrupted)"
624 624 continuity(url)
625 625 print
626 626 print "performing speed comparison"
627 627 comp(N, url)
628 628 print
629 629 print "performing dropped-connection check"
630 630 test_timeout(url)
631 631
632 632 if __name__ == '__main__':
633 633 import time
634 634 import sys
635 635 try:
636 636 N = int(sys.argv[1])
637 637 url = sys.argv[2]
638 638 except:
639 639 print "%s <integer> <url>" % sys.argv[0]
640 640 else:
641 641 test(url, N)
@@ -1,1361 +1,1361 b''
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 # import stuff from node for others to import from revlog
14 14 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 15 from i18n import _
16 16 import changegroup, errno, ancestor, mdiff, parsers
17 17 import struct, util, zlib, error
18 18
19 19 _pack = struct.pack
20 20 _unpack = struct.unpack
21 21 _compress = zlib.compress
22 22 _decompress = zlib.decompress
23 23 _sha = util.sha1
24 24
25 25 # revlog flags
26 26 REVLOGV0 = 0
27 27 REVLOGNG = 1
28 28 REVLOGNGINLINEDATA = (1 << 16)
29 29 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
30 30 REVLOG_DEFAULT_FORMAT = REVLOGNG
31 31 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
32 32
33 33 RevlogError = error.RevlogError
34 34 LookupError = error.LookupError
35 35
36 36 def getoffset(q):
37 37 return int(q >> 16)
38 38
39 39 def gettype(q):
40 40 return int(q & 0xFFFF)
41 41
42 42 def offset_type(offset, type):
43 43 return long(long(offset) << 16 | type)
44 44
45 45 def hash(text, p1, p2):
46 46 """generate a hash from the given text and its parent hashes
47 47
48 48 This hash combines both the current file contents and its history
49 49 in a manner that makes it easy to distinguish nodes with the same
50 50 content in the revision graph.
51 51 """
52 52 l = [p1, p2]
53 53 l.sort()
54 54 s = _sha(l[0])
55 55 s.update(l[1])
56 56 s.update(text)
57 57 return s.digest()
58 58
59 59 def compress(text):
60 60 """ generate a possibly-compressed representation of text """
61 61 if not text:
62 62 return ("", text)
63 63 l = len(text)
64 64 bin = None
65 65 if l < 44:
66 66 pass
67 67 elif l > 1000000:
68 68 # zlib makes an internal copy, thus doubling memory usage for
69 69 # large files, so lets do this in pieces
70 70 z = zlib.compressobj()
71 71 p = []
72 72 pos = 0
73 73 while pos < l:
74 74 pos2 = pos + 2**20
75 75 p.append(z.compress(text[pos:pos2]))
76 76 pos = pos2
77 77 p.append(z.flush())
78 78 if sum(map(len, p)) < l:
79 79 bin = "".join(p)
80 80 else:
81 81 bin = _compress(text)
82 82 if bin is None or len(bin) > l:
83 83 if text[0] == '\0':
84 84 return ("", text)
85 85 return ('u', text)
86 86 return ("", bin)
87 87
88 88 def decompress(bin):
89 89 """ decompress the given input """
90 90 if not bin:
91 91 return bin
92 92 t = bin[0]
93 93 if t == '\0':
94 94 return bin
95 95 if t == 'x':
96 96 return _decompress(bin)
97 97 if t == 'u':
98 98 return bin[1:]
99 99 raise RevlogError(_("unknown compression type %r") % t)
100 100
101 101 class lazyparser(object):
102 102 """
103 103 this class avoids the need to parse the entirety of large indices
104 104 """
105 105
106 106 # lazyparser is not safe to use on windows if win32 extensions not
107 107 # available. it keeps file handle open, which make it not possible
108 108 # to break hardlinks on local cloned repos.
109 109
110 110 def __init__(self, dataf, size):
111 111 self.dataf = dataf
112 112 self.s = struct.calcsize(indexformatng)
113 113 self.datasize = size
114 114 self.l = size/self.s
115 115 self.index = [None] * self.l
116 116 self.map = {nullid: nullrev}
117 117 self.allmap = 0
118 118 self.all = 0
119 119 self.mapfind_count = 0
120 120
121 121 def loadmap(self):
122 122 """
123 123 during a commit, we need to make sure the rev being added is
124 124 not a duplicate. This requires loading the entire index,
125 125 which is fairly slow. loadmap can load up just the node map,
126 126 which takes much less time.
127 127 """
128 128 if self.allmap:
129 129 return
130 130 end = self.datasize
131 131 self.allmap = 1
132 132 cur = 0
133 133 count = 0
134 134 blocksize = self.s * 256
135 135 self.dataf.seek(0)
136 136 while cur < end:
137 137 data = self.dataf.read(blocksize)
138 138 off = 0
139 139 for x in xrange(256):
140 140 n = data[off + ngshaoffset:off + ngshaoffset + 20]
141 141 self.map[n] = count
142 142 count += 1
143 143 if count >= self.l:
144 144 break
145 145 off += self.s
146 146 cur += blocksize
147 147
148 148 def loadblock(self, blockstart, blocksize, data=None):
149 149 if self.all:
150 150 return
151 151 if data is None:
152 152 self.dataf.seek(blockstart)
153 153 if blockstart + blocksize > self.datasize:
154 154 # the revlog may have grown since we've started running,
155 155 # but we don't have space in self.index for more entries.
156 156 # limit blocksize so that we don't get too much data.
157 157 blocksize = max(self.datasize - blockstart, 0)
158 158 data = self.dataf.read(blocksize)
159 159 lend = len(data) / self.s
160 160 i = blockstart / self.s
161 161 off = 0
162 162 # lazyindex supports __delitem__
163 163 if lend > len(self.index) - i:
164 164 lend = len(self.index) - i
165 165 for x in xrange(lend):
166 166 if self.index[i + x] == None:
167 167 b = data[off : off + self.s]
168 168 self.index[i + x] = b
169 169 n = b[ngshaoffset:ngshaoffset + 20]
170 170 self.map[n] = i + x
171 171 off += self.s
172 172
173 173 def findnode(self, node):
174 174 """search backwards through the index file for a specific node"""
175 175 if self.allmap:
176 176 return None
177 177
178 178 # hg log will cause many many searches for the manifest
179 179 # nodes. After we get called a few times, just load the whole
180 180 # thing.
181 181 if self.mapfind_count > 8:
182 182 self.loadmap()
183 183 if node in self.map:
184 184 return node
185 185 return None
186 186 self.mapfind_count += 1
187 187 last = self.l - 1
188 188 while self.index[last] != None:
189 189 if last == 0:
190 190 self.all = 1
191 191 self.allmap = 1
192 192 return None
193 193 last -= 1
194 194 end = (last + 1) * self.s
195 195 blocksize = self.s * 256
196 196 while end >= 0:
197 197 start = max(end - blocksize, 0)
198 198 self.dataf.seek(start)
199 199 data = self.dataf.read(end - start)
200 200 findend = end - start
201 201 while True:
202 202 # we're searching backwards, so we have to make sure
203 203 # we don't find a changeset where this node is a parent
204 204 off = data.find(node, 0, findend)
205 205 findend = off
206 206 if off >= 0:
207 207 i = off / self.s
208 208 off = i * self.s
209 209 n = data[off + ngshaoffset:off + ngshaoffset + 20]
210 210 if n == node:
211 211 self.map[n] = i + start / self.s
212 212 return node
213 213 else:
214 214 break
215 215 end -= blocksize
216 216 return None
217 217
218 218 def loadindex(self, i=None, end=None):
219 219 if self.all:
220 220 return
221 221 all = False
222 222 if i == None:
223 223 blockstart = 0
224 224 blocksize = (65536 / self.s) * self.s
225 225 end = self.datasize
226 226 all = True
227 227 else:
228 228 if end:
229 229 blockstart = i * self.s
230 230 end = end * self.s
231 231 blocksize = end - blockstart
232 232 else:
233 233 blockstart = (i & ~1023) * self.s
234 234 blocksize = self.s * 1024
235 235 end = blockstart + blocksize
236 236 while blockstart < end:
237 237 self.loadblock(blockstart, blocksize)
238 238 blockstart += blocksize
239 239 if all:
240 240 self.all = True
241 241
242 242 class lazyindex(object):
243 243 """a lazy version of the index array"""
244 244 def __init__(self, parser):
245 245 self.p = parser
246 246 def __len__(self):
247 247 return len(self.p.index)
248 248 def load(self, pos):
249 249 if pos < 0:
250 250 pos += len(self.p.index)
251 251 self.p.loadindex(pos)
252 252 return self.p.index[pos]
253 253 def __getitem__(self, pos):
254 254 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
255 255 def __setitem__(self, pos, item):
256 256 self.p.index[pos] = _pack(indexformatng, *item)
257 257 def __delitem__(self, pos):
258 258 del self.p.index[pos]
259 259 def insert(self, pos, e):
260 260 self.p.index.insert(pos, _pack(indexformatng, *e))
261 261 def append(self, e):
262 262 self.p.index.append(_pack(indexformatng, *e))
263 263
264 264 class lazymap(object):
265 265 """a lazy version of the node map"""
266 266 def __init__(self, parser):
267 267 self.p = parser
268 268 def load(self, key):
269 269 n = self.p.findnode(key)
270 270 if n == None:
271 271 raise KeyError(key)
272 272 def __contains__(self, key):
273 273 if key in self.p.map:
274 274 return True
275 275 self.p.loadmap()
276 276 return key in self.p.map
277 277 def __iter__(self):
278 278 yield nullid
279 279 for i in xrange(self.p.l):
280 280 ret = self.p.index[i]
281 281 if not ret:
282 282 self.p.loadindex(i)
283 283 ret = self.p.index[i]
284 284 if isinstance(ret, str):
285 285 ret = _unpack(indexformatng, ret)
286 286 yield ret[7]
287 287 def __getitem__(self, key):
288 288 try:
289 289 return self.p.map[key]
290 290 except KeyError:
291 291 try:
292 292 self.load(key)
293 293 return self.p.map[key]
294 294 except KeyError:
295 295 raise KeyError("node " + hex(key))
296 296 def __setitem__(self, key, val):
297 297 self.p.map[key] = val
298 298 def __delitem__(self, key):
299 299 del self.p.map[key]
300 300
301 301 indexformatv0 = ">4l20s20s20s"
302 302 v0shaoffset = 56
303 303
304 304 class revlogoldio(object):
305 305 def __init__(self):
306 306 self.size = struct.calcsize(indexformatv0)
307 307
308 308 def parseindex(self, fp, inline):
309 309 s = self.size
310 310 index = []
311 311 nodemap = {nullid: nullrev}
312 312 n = off = 0
313 313 data = fp.read()
314 314 l = len(data)
315 315 while off + s <= l:
316 316 cur = data[off:off + s]
317 317 off += s
318 318 e = _unpack(indexformatv0, cur)
319 319 # transform to revlogv1 format
320 320 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
321 321 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
322 322 index.append(e2)
323 323 nodemap[e[6]] = n
324 324 n += 1
325 325
326 326 return index, nodemap, None
327 327
328 328 def packentry(self, entry, node, version, rev):
329 329 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
330 330 node(entry[5]), node(entry[6]), entry[7])
331 331 return _pack(indexformatv0, *e2)
332 332
333 333 # index ng:
334 334 # 6 bytes offset
335 335 # 2 bytes flags
336 336 # 4 bytes compressed length
337 337 # 4 bytes uncompressed length
338 338 # 4 bytes: base rev
339 339 # 4 bytes link rev
340 340 # 4 bytes parent 1 rev
341 341 # 4 bytes parent 2 rev
342 342 # 32 bytes: nodeid
343 343 indexformatng = ">Qiiiiii20s12x"
344 344 ngshaoffset = 32
345 345 versionformat = ">I"
346 346
347 347 class revlogio(object):
348 348 def __init__(self):
349 349 self.size = struct.calcsize(indexformatng)
350 350
351 351 def parseindex(self, fp, inline):
352 352 try:
353 353 size = util.fstat(fp).st_size
354 354 except AttributeError:
355 355 size = 0
356 356
357 357 if util.openhardlinks() and not inline and size > 1000000:
358 358 # big index, let's parse it on demand
359 359 parser = lazyparser(fp, size)
360 360 index = lazyindex(parser)
361 361 nodemap = lazymap(parser)
362 362 e = list(index[0])
363 363 type = gettype(e[0])
364 364 e[0] = offset_type(0, type)
365 365 index[0] = e
366 366 return index, nodemap, None
367 367
368 368 data = fp.read()
369 369 # call the C implementation to parse the index data
370 370 index, nodemap, cache = parsers.parse_index(data, inline)
371 371 return index, nodemap, cache
372 372
373 373 def packentry(self, entry, node, version, rev):
374 374 p = _pack(indexformatng, *entry)
375 375 if rev == 0:
376 376 p = _pack(versionformat, version) + p[4:]
377 377 return p
378 378
379 379 class revlog(object):
380 380 """
381 381 the underlying revision storage object
382 382
383 383 A revlog consists of two parts, an index and the revision data.
384 384
385 385 The index is a file with a fixed record size containing
386 386 information on each revision, including its nodeid (hash), the
387 387 nodeids of its parents, the position and offset of its data within
388 388 the data file, and the revision it's based on. Finally, each entry
389 389 contains a linkrev entry that can serve as a pointer to external
390 390 data.
391 391
392 392 The revision data itself is a linear collection of data chunks.
393 393 Each chunk represents a revision and is usually represented as a
394 394 delta against the previous chunk. To bound lookup time, runs of
395 395 deltas are limited to about 2 times the length of the original
396 396 version data. This makes retrieval of a version proportional to
397 397 its size, or O(1) relative to the number of revisions.
398 398
399 399 Both pieces of the revlog are written to in an append-only
400 400 fashion, which means we never need to rewrite a file to insert or
401 401 remove data, and can use some simple techniques to avoid the need
402 402 for locking while reading.
403 403 """
404 404 def __init__(self, opener, indexfile):
405 405 """
406 406 create a revlog object
407 407
408 408 opener is a function that abstracts the file opening operation
409 409 and can be used to implement COW semantics or the like.
410 410 """
411 411 self.indexfile = indexfile
412 412 self.datafile = indexfile[:-2] + ".d"
413 413 self.opener = opener
414 414 self._cache = None
415 415 self._chunkcache = None
416 416 self.nodemap = {nullid: nullrev}
417 417 self.index = []
418 418
419 419 v = REVLOG_DEFAULT_VERSION
420 420 if hasattr(opener, "defversion"):
421 421 v = opener.defversion
422 422 if v & REVLOGNG:
423 423 v |= REVLOGNGINLINEDATA
424 424
425 425 i = ""
426 426 try:
427 427 f = self.opener(self.indexfile)
428 428 i = f.read(4)
429 429 f.seek(0)
430 430 if len(i) > 0:
431 431 v = struct.unpack(versionformat, i)[0]
432 432 except IOError, inst:
433 433 if inst.errno != errno.ENOENT:
434 434 raise
435 435
436 436 self.version = v
437 437 self._inline = v & REVLOGNGINLINEDATA
438 438 flags = v & ~0xFFFF
439 439 fmt = v & 0xFFFF
440 440 if fmt == REVLOGV0 and flags:
441 441 raise RevlogError(_("index %s unknown flags %#04x for format v0")
442 442 % (self.indexfile, flags >> 16))
443 443 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
444 444 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
445 445 % (self.indexfile, flags >> 16))
446 446 elif fmt > REVLOGNG:
447 447 raise RevlogError(_("index %s unknown format %d")
448 448 % (self.indexfile, fmt))
449 449
450 450 self._io = revlogio()
451 451 if self.version == REVLOGV0:
452 452 self._io = revlogoldio()
453 453 if i:
454 454 d = self._io.parseindex(f, self._inline)
455 455 self.index, self.nodemap, self._chunkcache = d
456 456
457 457 # add the magic null revision at -1 (if it hasn't been done already)
458 458 if (self.index == [] or isinstance(self.index, lazyindex) or
459 459 self.index[-1][7] != nullid) :
460 460 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
461 461
462 462 def _loadindex(self, start, end):
463 463 """load a block of indexes all at once from the lazy parser"""
464 464 if isinstance(self.index, lazyindex):
465 465 self.index.p.loadindex(start, end)
466 466
467 467 def _loadindexmap(self):
468 468 """loads both the map and the index from the lazy parser"""
469 469 if isinstance(self.index, lazyindex):
470 470 p = self.index.p
471 471 p.loadindex()
472 472 self.nodemap = p.map
473 473
474 474 def _loadmap(self):
475 475 """loads the map from the lazy parser"""
476 476 if isinstance(self.nodemap, lazymap):
477 477 self.nodemap.p.loadmap()
478 478 self.nodemap = self.nodemap.p.map
479 479
480 480 def tip(self):
481 481 return self.node(len(self.index) - 2)
482 482 def __len__(self):
483 483 return len(self.index) - 1
484 484 def __iter__(self):
485 485 for i in xrange(len(self)):
486 486 yield i
487 487 def rev(self, node):
488 488 try:
489 489 return self.nodemap[node]
490 490 except KeyError:
491 491 raise LookupError(node, self.indexfile, _('no node'))
492 492 def node(self, rev):
493 493 return self.index[rev][7]
494 494 def linkrev(self, rev):
495 495 return self.index[rev][4]
496 496 def parents(self, node):
497 497 i = self.index
498 498 d = i[self.rev(node)]
499 499 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
500 500 def parentrevs(self, rev):
501 501 return self.index[rev][5:7]
502 502 def start(self, rev):
503 503 return int(self.index[rev][0] >> 16)
504 504 def end(self, rev):
505 505 return self.start(rev) + self.length(rev)
506 506 def length(self, rev):
507 507 return self.index[rev][1]
508 508 def base(self, rev):
509 509 return self.index[rev][3]
510 510
511 511 def size(self, rev):
512 512 """return the length of the uncompressed text for a given revision"""
513 513 l = self.index[rev][2]
514 514 if l >= 0:
515 515 return l
516 516
517 517 t = self.revision(self.node(rev))
518 518 return len(t)
519 519
520 520 # alternate implementation, The advantage to this code is it
521 521 # will be faster for a single revision. But, the results are not
522 522 # cached, so finding the size of every revision will be slower.
523 523 """
524 524 if self.cache and self.cache[1] == rev:
525 525 return len(self.cache[2])
526 526
527 527 base = self.base(rev)
528 528 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
529 529 base = self.cache[1]
530 530 text = self.cache[2]
531 531 else:
532 532 text = self.revision(self.node(base))
533 533
534 534 l = len(text)
535 535 for x in xrange(base + 1, rev + 1):
536 536 l = mdiff.patchedsize(l, self.chunk(x))
537 537 return l
538 538 """
539 539
540 540 def reachable(self, node, stop=None):
541 541 """return a hash of all nodes ancestral to a given node, including
542 542 the node itself, stopping when stop is matched"""
543 543 reachable = {}
544 544 visit = [node]
545 545 reachable[node] = 1
546 546 if stop:
547 547 stopn = self.rev(stop)
548 548 else:
549 549 stopn = 0
550 550 while visit:
551 551 n = visit.pop(0)
552 552 if n == stop:
553 553 continue
554 554 if n == nullid:
555 555 continue
556 556 for p in self.parents(n):
557 557 if self.rev(p) < stopn:
558 558 continue
559 559 if p not in reachable:
560 560 reachable[p] = 1
561 561 visit.append(p)
562 562 return reachable
563 563
564 564 def ancestors(self, *revs):
565 565 'Generate the ancestors of revs using a breadth-first visit'
566 566 visit = list(revs)
567 567 seen = util.set([nullrev])
568 568 while visit:
569 569 for parent in self.parentrevs(visit.pop(0)):
570 570 if parent not in seen:
571 571 visit.append(parent)
572 572 seen.add(parent)
573 573 yield parent
574 574
575 575 def descendants(self, *revs):
576 576 'Generate the descendants of revs in topological order'
577 577 seen = util.set(revs)
578 578 for i in xrange(min(revs) + 1, len(self)):
579 579 for x in self.parentrevs(i):
580 580 if x != nullrev and x in seen:
581 581 seen.add(i)
582 582 yield i
583 583 break
584 584
585 585 def findmissing(self, common=None, heads=None):
586 586 '''
587 587 returns the topologically sorted list of nodes from the set:
588 588 missing = (ancestors(heads) \ ancestors(common))
589 589
590 590 where ancestors() is the set of ancestors from heads, heads included
591 591
592 592 if heads is None, the heads of the revlog are used
593 593 if common is None, nullid is assumed to be a common node
594 594 '''
595 595 if common is None:
596 596 common = [nullid]
597 597 if heads is None:
598 598 heads = self.heads()
599 599
600 600 common = [self.rev(n) for n in common]
601 601 heads = [self.rev(n) for n in heads]
602 602
603 603 # we want the ancestors, but inclusive
604 604 has = dict.fromkeys(self.ancestors(*common))
605 605 has[nullrev] = None
606 606 for r in common:
607 607 has[r] = None
608 608
609 609 # take all ancestors from heads that aren't in has
610 610 missing = {}
611 611 visit = [r for r in heads if r not in has]
612 612 while visit:
613 613 r = visit.pop(0)
614 614 if r in missing:
615 615 continue
616 616 else:
617 617 missing[r] = None
618 618 for p in self.parentrevs(r):
619 619 if p not in has:
620 620 visit.append(p)
621 621 missing = missing.keys()
622 622 missing.sort()
623 623 return [self.node(r) for r in missing]
624 624
625 625 def nodesbetween(self, roots=None, heads=None):
626 626 """Return a tuple containing three elements. Elements 1 and 2 contain
627 627 a final list bases and heads after all the unreachable ones have been
628 628 pruned. Element 0 contains a topologically sorted list of all
629 629
630 630 nodes that satisfy these constraints:
631 631 1. All nodes must be descended from a node in roots (the nodes on
632 632 roots are considered descended from themselves).
633 633 2. All nodes must also be ancestors of a node in heads (the nodes in
634 634 heads are considered to be their own ancestors).
635 635
636 636 If roots is unspecified, nullid is assumed as the only root.
637 637 If heads is unspecified, it is taken to be the output of the
638 638 heads method (i.e. a list of all nodes in the repository that
639 639 have no children)."""
640 640 nonodes = ([], [], [])
641 641 if roots is not None:
642 642 roots = list(roots)
643 643 if not roots:
644 644 return nonodes
645 645 lowestrev = min([self.rev(n) for n in roots])
646 646 else:
647 647 roots = [nullid] # Everybody's a descendent of nullid
648 648 lowestrev = nullrev
649 649 if (lowestrev == nullrev) and (heads is None):
650 650 # We want _all_ the nodes!
651 651 return ([self.node(r) for r in self], [nullid], list(self.heads()))
652 652 if heads is None:
653 653 # All nodes are ancestors, so the latest ancestor is the last
654 654 # node.
655 655 highestrev = len(self) - 1
656 656 # Set ancestors to None to signal that every node is an ancestor.
657 657 ancestors = None
658 658 # Set heads to an empty dictionary for later discovery of heads
659 659 heads = {}
660 660 else:
661 661 heads = list(heads)
662 662 if not heads:
663 663 return nonodes
664 664 ancestors = {}
665 665 # Turn heads into a dictionary so we can remove 'fake' heads.
666 666 # Also, later we will be using it to filter out the heads we can't
667 667 # find from roots.
668 668 heads = dict.fromkeys(heads, 0)
669 669 # Start at the top and keep marking parents until we're done.
670 670 nodestotag = heads.keys()
671 671 # Remember where the top was so we can use it as a limit later.
672 672 highestrev = max([self.rev(n) for n in nodestotag])
673 673 while nodestotag:
674 674 # grab a node to tag
675 675 n = nodestotag.pop()
676 676 # Never tag nullid
677 677 if n == nullid:
678 678 continue
679 679 # A node's revision number represents its place in a
680 680 # topologically sorted list of nodes.
681 681 r = self.rev(n)
682 682 if r >= lowestrev:
683 683 if n not in ancestors:
684 684 # If we are possibly a descendent of one of the roots
685 685 # and we haven't already been marked as an ancestor
686 686 ancestors[n] = 1 # Mark as ancestor
687 687 # Add non-nullid parents to list of nodes to tag.
688 688 nodestotag.extend([p for p in self.parents(n) if
689 689 p != nullid])
690 690 elif n in heads: # We've seen it before, is it a fake head?
691 691 # So it is, real heads should not be the ancestors of
692 692 # any other heads.
693 693 heads.pop(n)
694 694 if not ancestors:
695 695 return nonodes
696 696 # Now that we have our set of ancestors, we want to remove any
697 697 # roots that are not ancestors.
698 698
699 699 # If one of the roots was nullid, everything is included anyway.
700 700 if lowestrev > nullrev:
701 701 # But, since we weren't, let's recompute the lowest rev to not
702 702 # include roots that aren't ancestors.
703 703
704 704 # Filter out roots that aren't ancestors of heads
705 705 roots = [n for n in roots if n in ancestors]
706 706 # Recompute the lowest revision
707 707 if roots:
708 708 lowestrev = min([self.rev(n) for n in roots])
709 709 else:
710 710 # No more roots? Return empty list
711 711 return nonodes
712 712 else:
713 713 # We are descending from nullid, and don't need to care about
714 714 # any other roots.
715 715 lowestrev = nullrev
716 716 roots = [nullid]
717 717 # Transform our roots list into a 'set' (i.e. a dictionary where the
718 718 # values don't matter.
719 719 descendents = dict.fromkeys(roots, 1)
720 720 # Also, keep the original roots so we can filter out roots that aren't
721 721 # 'real' roots (i.e. are descended from other roots).
722 722 roots = descendents.copy()
723 723 # Our topologically sorted list of output nodes.
724 724 orderedout = []
725 725 # Don't start at nullid since we don't want nullid in our output list,
726 726 # and if nullid shows up in descedents, empty parents will look like
727 727 # they're descendents.
728 728 for r in xrange(max(lowestrev, 0), highestrev + 1):
729 729 n = self.node(r)
730 730 isdescendent = False
731 731 if lowestrev == nullrev: # Everybody is a descendent of nullid
732 732 isdescendent = True
733 733 elif n in descendents:
734 734 # n is already a descendent
735 735 isdescendent = True
736 736 # This check only needs to be done here because all the roots
737 737 # will start being marked is descendents before the loop.
738 738 if n in roots:
739 739 # If n was a root, check if it's a 'real' root.
740 740 p = tuple(self.parents(n))
741 741 # If any of its parents are descendents, it's not a root.
742 742 if (p[0] in descendents) or (p[1] in descendents):
743 743 roots.pop(n)
744 744 else:
745 745 p = tuple(self.parents(n))
746 746 # A node is a descendent if either of its parents are
747 747 # descendents. (We seeded the dependents list with the roots
748 748 # up there, remember?)
749 749 if (p[0] in descendents) or (p[1] in descendents):
750 750 descendents[n] = 1
751 751 isdescendent = True
752 752 if isdescendent and ((ancestors is None) or (n in ancestors)):
753 753 # Only include nodes that are both descendents and ancestors.
754 754 orderedout.append(n)
755 755 if (ancestors is not None) and (n in heads):
756 756 # We're trying to figure out which heads are reachable
757 757 # from roots.
758 758 # Mark this head as having been reached
759 759 heads[n] = 1
760 760 elif ancestors is None:
761 761 # Otherwise, we're trying to discover the heads.
762 762 # Assume this is a head because if it isn't, the next step
763 763 # will eventually remove it.
764 764 heads[n] = 1
765 765 # But, obviously its parents aren't.
766 766 for p in self.parents(n):
767 767 heads.pop(p, None)
768 768 heads = [n for n in heads.iterkeys() if heads[n] != 0]
769 769 roots = roots.keys()
770 770 assert orderedout
771 771 assert roots
772 772 assert heads
773 773 return (orderedout, roots, heads)
774 774
775 775 def heads(self, start=None, stop=None):
776 776 """return the list of all nodes that have no children
777 777
778 778 if start is specified, only heads that are descendants of
779 779 start will be returned
780 780 if stop is specified, it will consider all the revs from stop
781 781 as if they had no children
782 782 """
783 783 if start is None and stop is None:
784 784 count = len(self)
785 785 if not count:
786 786 return [nullid]
787 787 ishead = [1] * (count + 1)
788 788 index = self.index
789 789 for r in xrange(count):
790 790 e = index[r]
791 791 ishead[e[5]] = ishead[e[6]] = 0
792 792 return [self.node(r) for r in xrange(count) if ishead[r]]
793 793
794 794 if start is None:
795 795 start = nullid
796 796 if stop is None:
797 797 stop = []
798 798 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
799 799 startrev = self.rev(start)
800 800 reachable = {startrev: 1}
801 801 heads = {startrev: 1}
802 802
803 803 parentrevs = self.parentrevs
804 804 for r in xrange(startrev + 1, len(self)):
805 805 for p in parentrevs(r):
806 806 if p in reachable:
807 807 if r not in stoprevs:
808 808 reachable[r] = 1
809 809 heads[r] = 1
810 810 if p in heads and p not in stoprevs:
811 811 del heads[p]
812 812
813 813 return [self.node(r) for r in heads]
814 814
815 815 def children(self, node):
816 816 """find the children of a given node"""
817 817 c = []
818 818 p = self.rev(node)
819 819 for r in range(p + 1, len(self)):
820 820 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
821 821 if prevs:
822 822 for pr in prevs:
823 823 if pr == p:
824 824 c.append(self.node(r))
825 825 elif p == nullrev:
826 826 c.append(self.node(r))
827 827 return c
828 828
829 829 def _match(self, id):
830 830 if isinstance(id, (long, int)):
831 831 # rev
832 832 return self.node(id)
833 833 if len(id) == 20:
834 834 # possibly a binary node
835 835 # odds of a binary node being all hex in ASCII are 1 in 10**25
836 836 try:
837 837 node = id
838 r = self.rev(node) # quick search the index
838 self.rev(node) # quick search the index
839 839 return node
840 840 except LookupError:
841 841 pass # may be partial hex id
842 842 try:
843 843 # str(rev)
844 844 rev = int(id)
845 845 if str(rev) != id:
846 846 raise ValueError
847 847 if rev < 0:
848 848 rev = len(self) + rev
849 849 if rev < 0 or rev >= len(self):
850 850 raise ValueError
851 851 return self.node(rev)
852 852 except (ValueError, OverflowError):
853 853 pass
854 854 if len(id) == 40:
855 855 try:
856 856 # a full hex nodeid?
857 857 node = bin(id)
858 r = self.rev(node)
858 self.rev(node)
859 859 return node
860 860 except (TypeError, LookupError):
861 861 pass
862 862
863 863 def _partialmatch(self, id):
864 864 if len(id) < 40:
865 865 try:
866 866 # hex(node)[:...]
867 867 l = len(id) / 2 # grab an even number of digits
868 868 bin_id = bin(id[:l*2])
869 869 nl = [n for n in self.nodemap if n[:l] == bin_id]
870 870 nl = [n for n in nl if hex(n).startswith(id)]
871 871 if len(nl) > 0:
872 872 if len(nl) == 1:
873 873 return nl[0]
874 874 raise LookupError(id, self.indexfile,
875 875 _('ambiguous identifier'))
876 876 return None
877 877 except TypeError:
878 878 pass
879 879
880 880 def lookup(self, id):
881 881 """locate a node based on:
882 882 - revision number or str(revision number)
883 883 - nodeid or subset of hex nodeid
884 884 """
885 885 n = self._match(id)
886 886 if n is not None:
887 887 return n
888 888 n = self._partialmatch(id)
889 889 if n:
890 890 return n
891 891
892 892 raise LookupError(id, self.indexfile, _('no match found'))
893 893
894 894 def cmp(self, node, text):
895 895 """compare text with a given file revision"""
896 896 p1, p2 = self.parents(node)
897 897 return hash(text, p1, p2) != node
898 898
899 899 def chunk(self, rev, df=None):
900 900 def loadcache(df):
901 901 if not df:
902 902 if self._inline:
903 903 df = self.opener(self.indexfile)
904 904 else:
905 905 df = self.opener(self.datafile)
906 906 df.seek(start)
907 907 self._chunkcache = (start, df.read(cache_length))
908 908
909 909 start, length = self.start(rev), self.length(rev)
910 910 if self._inline:
911 911 start += (rev + 1) * self._io.size
912 912 end = start + length
913 913
914 914 offset = 0
915 915 if not self._chunkcache:
916 916 cache_length = max(65536, length)
917 917 loadcache(df)
918 918 else:
919 919 cache_start = self._chunkcache[0]
920 920 cache_length = len(self._chunkcache[1])
921 921 cache_end = cache_start + cache_length
922 922 if start >= cache_start and end <= cache_end:
923 923 # it is cached
924 924 offset = start - cache_start
925 925 else:
926 926 cache_length = max(65536, length)
927 927 loadcache(df)
928 928
929 929 # avoid copying large chunks
930 930 c = self._chunkcache[1]
931 931 if cache_length != length:
932 932 c = c[offset:offset + length]
933 933
934 934 return decompress(c)
935 935
936 936 def revdiff(self, rev1, rev2):
937 937 """return or calculate a delta between two revisions"""
938 938 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
939 939 return self.chunk(rev2)
940 940
941 941 return mdiff.textdiff(self.revision(self.node(rev1)),
942 942 self.revision(self.node(rev2)))
943 943
944 944 def revision(self, node):
945 945 """return an uncompressed revision of a given node"""
946 946 if node == nullid:
947 947 return ""
948 948 if self._cache and self._cache[0] == node:
949 949 return str(self._cache[2])
950 950
951 951 # look up what we need to read
952 952 text = None
953 953 rev = self.rev(node)
954 954 base = self.base(rev)
955 955
956 956 # check rev flags
957 957 if self.index[rev][0] & 0xFFFF:
958 958 raise RevlogError(_('incompatible revision flag %x') %
959 959 (self.index[rev][0] & 0xFFFF))
960 960
961 961 df = None
962 962
963 963 # do we have useful data cached?
964 964 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
965 965 base = self._cache[1]
966 966 text = str(self._cache[2])
967 967 self._loadindex(base, rev + 1)
968 968 if not self._inline and rev > base + 1:
969 969 df = self.opener(self.datafile)
970 970 else:
971 971 self._loadindex(base, rev + 1)
972 972 if not self._inline and rev > base:
973 973 df = self.opener(self.datafile)
974 974 text = self.chunk(base, df=df)
975 975
976 976 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
977 977 text = mdiff.patches(text, bins)
978 978 p1, p2 = self.parents(node)
979 979 if node != hash(text, p1, p2):
980 980 raise RevlogError(_("integrity check failed on %s:%d")
981 981 % (self.datafile, rev))
982 982
983 983 self._cache = (node, rev, text)
984 984 return text
985 985
986 986 def checkinlinesize(self, tr, fp=None):
987 987 if not self._inline:
988 988 return
989 989 if not fp:
990 990 fp = self.opener(self.indexfile, 'r')
991 991 fp.seek(0, 2)
992 992 size = fp.tell()
993 993 if size < 131072:
994 994 return
995 995 trinfo = tr.find(self.indexfile)
996 996 if trinfo == None:
997 997 raise RevlogError(_("%s not found in the transaction")
998 998 % self.indexfile)
999 999
1000 1000 trindex = trinfo[2]
1001 1001 dataoff = self.start(trindex)
1002 1002
1003 1003 tr.add(self.datafile, dataoff)
1004 1004 df = self.opener(self.datafile, 'w')
1005 1005 try:
1006 1006 calc = self._io.size
1007 1007 for r in self:
1008 1008 start = self.start(r) + (r + 1) * calc
1009 1009 length = self.length(r)
1010 1010 fp.seek(start)
1011 1011 d = fp.read(length)
1012 1012 df.write(d)
1013 1013 finally:
1014 1014 df.close()
1015 1015
1016 1016 fp.close()
1017 1017 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1018 1018 self.version &= ~(REVLOGNGINLINEDATA)
1019 1019 self._inline = False
1020 1020 for i in self:
1021 1021 e = self._io.packentry(self.index[i], self.node, self.version, i)
1022 1022 fp.write(e)
1023 1023
1024 1024 # if we don't call rename, the temp file will never replace the
1025 1025 # real index
1026 1026 fp.rename()
1027 1027
1028 1028 tr.replace(self.indexfile, trindex * calc)
1029 1029 self._chunkcache = None
1030 1030
1031 1031 def addrevision(self, text, transaction, link, p1, p2, d=None):
1032 1032 """add a revision to the log
1033 1033
1034 1034 text - the revision data to add
1035 1035 transaction - the transaction object used for rollback
1036 1036 link - the linkrev data to add
1037 1037 p1, p2 - the parent nodeids of the revision
1038 1038 d - an optional precomputed delta
1039 1039 """
1040 1040 dfh = None
1041 1041 if not self._inline:
1042 1042 dfh = self.opener(self.datafile, "a")
1043 1043 ifh = self.opener(self.indexfile, "a+")
1044 1044 try:
1045 1045 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1046 1046 finally:
1047 1047 if dfh:
1048 1048 dfh.close()
1049 1049 ifh.close()
1050 1050
1051 1051 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1052 1052 node = hash(text, p1, p2)
1053 1053 if node in self.nodemap:
1054 1054 return node
1055 1055
1056 1056 curr = len(self)
1057 1057 prev = curr - 1
1058 1058 base = self.base(prev)
1059 1059 offset = self.end(prev)
1060 1060
1061 1061 if curr:
1062 1062 if not d:
1063 1063 ptext = self.revision(self.node(prev))
1064 1064 d = mdiff.textdiff(ptext, text)
1065 1065 data = compress(d)
1066 1066 l = len(data[1]) + len(data[0])
1067 1067 dist = l + offset - self.start(base)
1068 1068
1069 1069 # full versions are inserted when the needed deltas
1070 1070 # become comparable to the uncompressed text
1071 1071 if not curr or dist > len(text) * 2:
1072 1072 data = compress(text)
1073 1073 l = len(data[1]) + len(data[0])
1074 1074 base = curr
1075 1075
1076 1076 e = (offset_type(offset, 0), l, len(text),
1077 1077 base, link, self.rev(p1), self.rev(p2), node)
1078 1078 self.index.insert(-1, e)
1079 1079 self.nodemap[node] = curr
1080 1080
1081 1081 entry = self._io.packentry(e, self.node, self.version, curr)
1082 1082 if not self._inline:
1083 1083 transaction.add(self.datafile, offset)
1084 1084 transaction.add(self.indexfile, curr * len(entry))
1085 1085 if data[0]:
1086 1086 dfh.write(data[0])
1087 1087 dfh.write(data[1])
1088 1088 dfh.flush()
1089 1089 ifh.write(entry)
1090 1090 else:
1091 1091 offset += curr * self._io.size
1092 1092 transaction.add(self.indexfile, offset, curr)
1093 1093 ifh.write(entry)
1094 1094 ifh.write(data[0])
1095 1095 ifh.write(data[1])
1096 1096 self.checkinlinesize(transaction, ifh)
1097 1097
1098 1098 self._cache = (node, curr, text)
1099 1099 return node
1100 1100
1101 1101 def ancestor(self, a, b):
1102 1102 """calculate the least common ancestor of nodes a and b"""
1103 1103
1104 1104 def parents(rev):
1105 1105 return [p for p in self.parentrevs(rev) if p != nullrev]
1106 1106
1107 1107 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1108 1108 if c is None:
1109 1109 return nullid
1110 1110
1111 1111 return self.node(c)
1112 1112
1113 1113 def group(self, nodelist, lookup, infocollect=None):
1114 1114 """calculate a delta group
1115 1115
1116 1116 Given a list of changeset revs, return a set of deltas and
1117 1117 metadata corresponding to nodes. the first delta is
1118 1118 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1119 1119 have this parent as it has all history before these
1120 1120 changesets. parent is parent[0]
1121 1121 """
1122 1122 revs = [self.rev(n) for n in nodelist]
1123 1123
1124 1124 # if we don't have any revisions touched by these changesets, bail
1125 1125 if not revs:
1126 1126 yield changegroup.closechunk()
1127 1127 return
1128 1128
1129 1129 # add the parent of the first rev
1130 1130 p = self.parents(self.node(revs[0]))[0]
1131 1131 revs.insert(0, self.rev(p))
1132 1132
1133 1133 # build deltas
1134 1134 for d in xrange(0, len(revs) - 1):
1135 1135 a, b = revs[d], revs[d + 1]
1136 1136 nb = self.node(b)
1137 1137
1138 1138 if infocollect is not None:
1139 1139 infocollect(nb)
1140 1140
1141 1141 p = self.parents(nb)
1142 1142 meta = nb + p[0] + p[1] + lookup(nb)
1143 1143 if a == -1:
1144 1144 d = self.revision(nb)
1145 1145 meta += mdiff.trivialdiffheader(len(d))
1146 1146 else:
1147 1147 d = self.revdiff(a, b)
1148 1148 yield changegroup.chunkheader(len(meta) + len(d))
1149 1149 yield meta
1150 1150 if len(d) > 2**20:
1151 1151 pos = 0
1152 1152 while pos < len(d):
1153 1153 pos2 = pos + 2 ** 18
1154 1154 yield d[pos:pos2]
1155 1155 pos = pos2
1156 1156 else:
1157 1157 yield d
1158 1158
1159 1159 yield changegroup.closechunk()
1160 1160
1161 1161 def addgroup(self, revs, linkmapper, transaction):
1162 1162 """
1163 1163 add a delta group
1164 1164
1165 1165 given a set of deltas, add them to the revision log. the
1166 1166 first delta is against its parent, which should be in our
1167 1167 log, the rest are against the previous delta.
1168 1168 """
1169 1169
1170 1170 #track the base of the current delta log
1171 1171 r = len(self)
1172 1172 t = r - 1
1173 1173 node = None
1174 1174
1175 1175 base = prev = nullrev
1176 1176 start = end = textlen = 0
1177 1177 if r:
1178 1178 end = self.end(t)
1179 1179
1180 1180 ifh = self.opener(self.indexfile, "a+")
1181 1181 isize = r * self._io.size
1182 1182 if self._inline:
1183 1183 transaction.add(self.indexfile, end + isize, r)
1184 1184 dfh = None
1185 1185 else:
1186 1186 transaction.add(self.indexfile, isize, r)
1187 1187 transaction.add(self.datafile, end)
1188 1188 dfh = self.opener(self.datafile, "a")
1189 1189
1190 1190 try:
1191 1191 # loop through our set of deltas
1192 1192 chain = None
1193 1193 for chunk in revs:
1194 1194 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1195 1195 link = linkmapper(cs)
1196 1196 if node in self.nodemap:
1197 1197 # this can happen if two branches make the same change
1198 1198 chain = node
1199 1199 continue
1200 1200 delta = buffer(chunk, 80)
1201 1201 del chunk
1202 1202
1203 1203 for p in (p1, p2):
1204 1204 if not p in self.nodemap:
1205 1205 raise LookupError(p, self.indexfile, _('unknown parent'))
1206 1206
1207 1207 if not chain:
1208 1208 # retrieve the parent revision of the delta chain
1209 1209 chain = p1
1210 1210 if not chain in self.nodemap:
1211 1211 raise LookupError(chain, self.indexfile, _('unknown base'))
1212 1212
1213 1213 # full versions are inserted when the needed deltas become
1214 1214 # comparable to the uncompressed text or when the previous
1215 1215 # version is not the one we have a delta against. We use
1216 1216 # the size of the previous full rev as a proxy for the
1217 1217 # current size.
1218 1218
1219 1219 if chain == prev:
1220 1220 cdelta = compress(delta)
1221 1221 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1222 1222 textlen = mdiff.patchedsize(textlen, delta)
1223 1223
1224 1224 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1225 1225 # flush our writes here so we can read it in revision
1226 1226 if dfh:
1227 1227 dfh.flush()
1228 1228 ifh.flush()
1229 1229 text = self.revision(chain)
1230 1230 if len(text) == 0:
1231 1231 # skip over trivial delta header
1232 1232 text = buffer(delta, 12)
1233 1233 else:
1234 1234 text = mdiff.patches(text, [delta])
1235 1235 del delta
1236 1236 chk = self._addrevision(text, transaction, link, p1, p2, None,
1237 1237 ifh, dfh)
1238 1238 if not dfh and not self._inline:
1239 1239 # addrevision switched from inline to conventional
1240 1240 # reopen the index
1241 1241 dfh = self.opener(self.datafile, "a")
1242 1242 ifh = self.opener(self.indexfile, "a")
1243 1243 if chk != node:
1244 1244 raise RevlogError(_("consistency error adding group"))
1245 1245 textlen = len(text)
1246 1246 else:
1247 1247 e = (offset_type(end, 0), cdeltalen, textlen, base,
1248 1248 link, self.rev(p1), self.rev(p2), node)
1249 1249 self.index.insert(-1, e)
1250 1250 self.nodemap[node] = r
1251 1251 entry = self._io.packentry(e, self.node, self.version, r)
1252 1252 if self._inline:
1253 1253 ifh.write(entry)
1254 1254 ifh.write(cdelta[0])
1255 1255 ifh.write(cdelta[1])
1256 1256 self.checkinlinesize(transaction, ifh)
1257 1257 if not self._inline:
1258 1258 dfh = self.opener(self.datafile, "a")
1259 1259 ifh = self.opener(self.indexfile, "a")
1260 1260 else:
1261 1261 dfh.write(cdelta[0])
1262 1262 dfh.write(cdelta[1])
1263 1263 ifh.write(entry)
1264 1264
1265 1265 t, r, chain, prev = r, r + 1, node, node
1266 1266 base = self.base(t)
1267 1267 start = self.start(base)
1268 1268 end = self.end(t)
1269 1269 finally:
1270 1270 if dfh:
1271 1271 dfh.close()
1272 1272 ifh.close()
1273 1273
1274 1274 return node
1275 1275
1276 1276 def strip(self, minlink):
1277 1277 """truncate the revlog on the first revision with a linkrev >= minlink
1278 1278
1279 1279 This function is called when we're stripping revision minlink and
1280 1280 its descendants from the repository.
1281 1281
1282 1282 We have to remove all revisions with linkrev >= minlink, because
1283 1283 the equivalent changelog revisions will be renumbered after the
1284 1284 strip.
1285 1285
1286 1286 So we truncate the revlog on the first of these revisions, and
1287 1287 trust that the caller has saved the revisions that shouldn't be
1288 1288 removed and that it'll readd them after this truncation.
1289 1289 """
1290 1290 if len(self) == 0:
1291 1291 return
1292 1292
1293 1293 if isinstance(self.index, lazyindex):
1294 1294 self._loadindexmap()
1295 1295
1296 1296 for rev in self:
1297 1297 if self.index[rev][4] >= minlink:
1298 1298 break
1299 1299 else:
1300 1300 return
1301 1301
1302 1302 # first truncate the files on disk
1303 1303 end = self.start(rev)
1304 1304 if not self._inline:
1305 1305 df = self.opener(self.datafile, "a")
1306 1306 df.truncate(end)
1307 1307 end = rev * self._io.size
1308 1308 else:
1309 1309 end += rev * self._io.size
1310 1310
1311 1311 indexf = self.opener(self.indexfile, "a")
1312 1312 indexf.truncate(end)
1313 1313
1314 1314 # then reset internal state in memory to forget those revisions
1315 1315 self._cache = None
1316 1316 self._chunkcache = None
1317 1317 for x in xrange(rev, len(self)):
1318 1318 del self.nodemap[self.node(x)]
1319 1319
1320 1320 del self.index[rev:-1]
1321 1321
1322 1322 def checksize(self):
1323 1323 expected = 0
1324 1324 if len(self):
1325 1325 expected = max(0, self.end(len(self) - 1))
1326 1326
1327 1327 try:
1328 1328 f = self.opener(self.datafile)
1329 1329 f.seek(0, 2)
1330 1330 actual = f.tell()
1331 1331 dd = actual - expected
1332 1332 except IOError, inst:
1333 1333 if inst.errno != errno.ENOENT:
1334 1334 raise
1335 1335 dd = 0
1336 1336
1337 1337 try:
1338 1338 f = self.opener(self.indexfile)
1339 1339 f.seek(0, 2)
1340 1340 actual = f.tell()
1341 1341 s = self._io.size
1342 1342 i = max(0, actual / s)
1343 1343 di = actual - (i * s)
1344 1344 if self._inline:
1345 1345 databytes = 0
1346 1346 for r in self:
1347 1347 databytes += max(0, self.length(r))
1348 1348 dd = 0
1349 1349 di = actual - len(self) * s - databytes
1350 1350 except IOError, inst:
1351 1351 if inst.errno != errno.ENOENT:
1352 1352 raise
1353 1353 di = 0
1354 1354
1355 1355 return (dd, di)
1356 1356
1357 1357 def files(self):
1358 1358 res = [ self.indexfile ]
1359 1359 if not self._inline:
1360 1360 res.append(self.datafile)
1361 1361 return res
@@ -1,244 +1,244 b''
1 1 # verify.py - repository integrity checking for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import revlog, util, error
11 11
12 12 def verify(repo):
13 13 lock = repo.lock()
14 14 try:
15 15 return _verify(repo)
16 16 finally:
17 17 del lock
18 18
19 19 def _verify(repo):
20 20 mflinkrevs = {}
21 21 filelinkrevs = {}
22 22 filenodes = {}
23 23 revisions = 0
24 24 badrevs = {}
25 25 errors = [0]
26 26 warnings = [0]
27 27 ui = repo.ui
28 28 cl = repo.changelog
29 29 mf = repo.manifest
30 30
31 31 if not repo.cancopy():
32 32 raise util.Abort(_("cannot verify bundle or remote repos"))
33 33
34 34 def err(linkrev, msg, filename=None):
35 35 if linkrev != None:
36 36 badrevs[linkrev] = True
37 37 else:
38 38 linkrev = '?'
39 39 msg = "%s: %s" % (linkrev, msg)
40 40 if filename:
41 41 msg = "%s@%s" % (filename, msg)
42 42 ui.warn(" " + msg + "\n")
43 43 errors[0] += 1
44 44
45 45 def exc(linkrev, msg, inst, filename=None):
46 46 if isinstance(inst, KeyboardInterrupt):
47 47 ui.warn(_("interrupted"))
48 48 raise
49 49 err(linkrev, "%s: %s" % (msg, inst), filename)
50 50
51 51 def warn(msg):
52 52 ui.warn(msg + "\n")
53 53 warnings[0] += 1
54 54
55 55 def checklog(obj, name):
56 56 if not len(obj) and (havecl or havemf):
57 57 err(0, _("empty or missing %s") % name)
58 58 return
59 59
60 60 d = obj.checksize()
61 61 if d[0]:
62 62 err(None, _("data length off by %d bytes") % d[0], name)
63 63 if d[1]:
64 64 err(None, _("index contains %d extra bytes") % d[1], name)
65 65
66 66 if obj.version != revlog.REVLOGV0:
67 67 if not revlogv1:
68 68 warn(_("warning: `%s' uses revlog format 1") % name)
69 69 elif revlogv1:
70 70 warn(_("warning: `%s' uses revlog format 0") % name)
71 71
72 72 def checkentry(obj, i, node, seen, linkrevs, f):
73 73 lr = obj.linkrev(obj.rev(node))
74 74 if lr < 0 or (havecl and lr not in linkrevs):
75 75 t = "unexpected"
76 76 if lr < 0 or lr >= len(cl):
77 77 t = "nonexistent"
78 78 err(None, _("rev %d point to %s changeset %d") % (i, t, lr), f)
79 79 if linkrevs:
80 80 warn(_(" (expected %s)") % " ".join(map(str,linkrevs)))
81 81 lr = None # can't be trusted
82 82
83 83 try:
84 84 p1, p2 = obj.parents(node)
85 85 if p1 not in seen and p1 != nullid:
86 86 err(lr, _("unknown parent 1 %s of %s") %
87 87 (short(p1), short(n)), f)
88 88 if p2 not in seen and p2 != nullid:
89 89 err(lr, _("unknown parent 2 %s of %s") %
90 90 (short(p2), short(p1)), f)
91 91 except Exception, inst:
92 92 exc(lr, _("checking parents of %s") % short(node), inst, f)
93 93
94 94 if node in seen:
95 95 err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f)
96 96 seen[n] = i
97 97 return lr
98 98
99 99 revlogv1 = cl.version != revlog.REVLOGV0
100 100 if ui.verbose or not revlogv1:
101 101 ui.status(_("repository uses revlog format %d\n") %
102 102 (revlogv1 and 1 or 0))
103 103
104 104 havecl = len(cl) > 0
105 105 havemf = len(mf) > 0
106 106
107 107 ui.status(_("checking changesets\n"))
108 108 seen = {}
109 109 checklog(cl, "changelog")
110 110 for i in repo:
111 111 n = cl.node(i)
112 112 checkentry(cl, i, n, seen, [i], "changelog")
113 113
114 114 try:
115 115 changes = cl.read(n)
116 116 mflinkrevs.setdefault(changes[0], []).append(i)
117 117 for f in changes[3]:
118 118 filelinkrevs.setdefault(f, []).append(i)
119 119 except Exception, inst:
120 120 exc(i, _("unpacking changeset %s") % short(n), inst)
121 121
122 122 ui.status(_("checking manifests\n"))
123 123 seen = {}
124 124 checklog(mf, "manifest")
125 125 for i in mf:
126 126 n = mf.node(i)
127 127 lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
128 128 if n in mflinkrevs:
129 129 del mflinkrevs[n]
130 130
131 131 try:
132 132 for f, fn in mf.readdelta(n).iteritems():
133 133 if not f:
134 134 err(lr, _("file without name in manifest"))
135 135 elif f != "/dev/null":
136 136 fns = filenodes.setdefault(f, {})
137 137 if fn not in fns:
138 138 fns[fn] = i
139 139 except Exception, inst:
140 140 exc(lr, _("reading manifest delta %s") % short(n), inst)
141 141
142 142 ui.status(_("crosschecking files in changesets and manifests\n"))
143 143
144 144 if havemf:
145 145 for c, m in util.sort([(c, m) for m in mflinkrevs for c in mflinkrevs[m]]):
146 146 err(c, _("changeset refers to unknown manifest %s") % short(m))
147 147 del mflinkrevs
148 148
149 149 for f in util.sort(filelinkrevs):
150 150 if f not in filenodes:
151 151 lr = filelinkrevs[f][0]
152 152 err(lr, _("in changeset but not in manifest"), f)
153 153
154 154 if havecl:
155 155 for f in util.sort(filenodes):
156 156 if f not in filelinkrevs:
157 157 try:
158 158 fl = repo.file(f)
159 159 lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
160 160 except:
161 161 lr = None
162 162 err(lr, _("in manifest but not in changeset"), f)
163 163
164 164 ui.status(_("checking files\n"))
165 165
166 166 storefiles = {}
167 167 for f, f2, size in repo.store.datafiles():
168 168 if not f:
169 169 err(None, _("cannot decode filename '%s'") % f2)
170 170 elif size > 0:
171 171 storefiles[f] = True
172 172
173 173 files = util.sort(util.unique(filenodes.keys() + filelinkrevs.keys()))
174 174 for f in files:
175 175 lr = filelinkrevs[f][0]
176 176 try:
177 177 fl = repo.file(f)
178 178 except error.RevlogError, e:
179 179 err(lr, _("broken revlog! (%s)") % e, f)
180 180 continue
181 181
182 182 for ff in fl.files():
183 183 try:
184 184 del storefiles[ff]
185 185 except KeyError:
186 186 err(lr, _("missing revlog!"), ff)
187 187
188 188 checklog(fl, f)
189 189 seen = {}
190 190 for i in fl:
191 191 revisions += 1
192 192 n = fl.node(i)
193 193 lr = checkentry(fl, i, n, seen, filelinkrevs.get(f, []), f)
194 194 if f in filenodes:
195 195 if havemf and n not in filenodes[f]:
196 196 err(lr, _("%s not in manifests") % (short(n)), f)
197 197 else:
198 198 del filenodes[f][n]
199 199
200 200 # verify contents
201 201 try:
202 202 t = fl.read(n)
203 203 rp = fl.renamed(n)
204 204 if len(t) != fl.size(i):
205 205 if len(fl.revision(n)) != fl.size(i):
206 206 err(lr, _("unpacked size is %s, %s expected") %
207 207 (len(t), fl.size(i)), f)
208 208 except Exception, inst:
209 209 exc(lr, _("unpacking %s") % short(n), inst, f)
210 210
211 211 # check renames
212 212 try:
213 213 if rp:
214 214 fl2 = repo.file(rp[0])
215 215 if not len(fl2):
216 216 err(lr, _("empty or missing copy source revlog %s:%s")
217 217 % (rp[0], short(rp[1])), f)
218 218 elif rp[1] == nullid:
219 219 warn(_("warning: %s@%s: copy source revision is nullid %s:%s")
220 220 % (f, lr, rp[0], short(rp[1])))
221 221 else:
222 rev = fl2.rev(rp[1])
222 fl2.rev(rp[1])
223 223 except Exception, inst:
224 224 exc(lr, _("checking rename of %s") % short(n), inst, f)
225 225
226 226 # cross-check
227 227 if f in filenodes:
228 228 fns = [(mf.linkrev(l), n) for n,l in filenodes[f].iteritems()]
229 229 for lr, node in util.sort(fns):
230 230 err(lr, _("%s in manifests not found") % short(node), f)
231 231
232 232 for f in storefiles:
233 233 warn(_("warning: orphan revlog '%s'") % f)
234 234
235 235 ui.status(_("%d files, %d changesets, %d total revisions\n") %
236 236 (len(files), len(cl), revisions))
237 237 if warnings[0]:
238 238 ui.warn(_("%d warnings encountered!\n") % warnings[0])
239 239 if errors[0]:
240 240 ui.warn(_("%d integrity errors encountered!\n") % errors[0])
241 241 if badrevs:
242 242 ui.warn(_("(first damaged changeset appears to be %d)\n")
243 243 % min(badrevs))
244 244 return 1
General Comments 0
You need to be logged in to leave comments. Login now