##// END OF EJS Templates
More whitespace cleanups...
mpm@selenic.com -
r575:7f5ce4bb default
parent child Browse files
Show More
@@ -1,452 +1,451
1 1 # This library is free software; you can redistribute it and/or
2 2 # modify it under the terms of the GNU Lesser General Public
3 3 # License as published by the Free Software Foundation; either
4 4 # version 2.1 of the License, or (at your option) any later version.
5 5 #
6 6 # This library is distributed in the hope that it will be useful,
7 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 9 # Lesser General Public License for more details.
10 10 #
11 11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, write to the
13 # Free Software Foundation, Inc.,
14 # 59 Temple Place, Suite 330,
12 # License along with this library; if not, write to the
13 # Free Software Foundation, Inc.,
14 # 59 Temple Place, Suite 330,
15 15 # Boston, MA 02111-1307 USA
16 16
17 17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
18 18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
19 19
20 20 # $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
21 21
22 22 import os
23 23 import stat
24 24 import urllib
25 25 import urllib2
26 26 import rfc822
27 27
28 try:
28 try:
29 29 from cStringIO import StringIO
30 except ImportError, msg:
30 except ImportError, msg:
31 31 from StringIO import StringIO
32 32
33 33 class RangeError(IOError):
34 34 """Error raised when an unsatisfiable range is requested."""
35 35 pass
36
36
37 37 class HTTPRangeHandler(urllib2.BaseHandler):
38 38 """Handler that enables HTTP Range headers.
39
39
40 40 This was extremely simple. The Range header is a HTTP feature to
41 begin with so all this class does is tell urllib2 that the
42 "206 Partial Content" reponse from the HTTP server is what we
41 begin with so all this class does is tell urllib2 that the
42 "206 Partial Content" reponse from the HTTP server is what we
43 43 expected.
44
44
45 45 Example:
46 46 import urllib2
47 47 import byterange
48
48
49 49 range_handler = range.HTTPRangeHandler()
50 50 opener = urllib2.build_opener(range_handler)
51
51
52 52 # install it
53 53 urllib2.install_opener(opener)
54
54
55 55 # create Request and set Range header
56 56 req = urllib2.Request('http://www.python.org/')
57 57 req.header['Range'] = 'bytes=30-50'
58 58 f = urllib2.urlopen(req)
59 59 """
60
60
61 61 def http_error_206(self, req, fp, code, msg, hdrs):
62 62 # 206 Partial Content Response
63 63 r = urllib.addinfourl(fp, hdrs, req.get_full_url())
64 64 r.code = code
65 65 r.msg = msg
66 66 return r
67
67
68 68 def http_error_416(self, req, fp, code, msg, hdrs):
69 69 # HTTP's Range Not Satisfiable error
70 70 raise RangeError('Requested Range Not Satisfiable')
71 71
72 72 class RangeableFileObject:
73 73 """File object wrapper to enable raw range handling.
74 This was implemented primarilary for handling range
75 specifications for file:// urls. This object effectively makes
76 a file object look like it consists only of a range of bytes in
74 This was implemented primarilary for handling range
75 specifications for file:// urls. This object effectively makes
76 a file object look like it consists only of a range of bytes in
77 77 the stream.
78
78
79 79 Examples:
80 # expose 10 bytes, starting at byte position 20, from
80 # expose 10 bytes, starting at byte position 20, from
81 81 # /etc/aliases.
82 82 >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
83 83 # seek seeks within the range (to position 23 in this case)
84 84 >>> fo.seek(3)
85 85 # tell tells where your at _within the range_ (position 3 in
86 86 # this case)
87 87 >>> fo.tell()
88 88 # read EOFs if an attempt is made to read past the last
89 89 # byte in the range. the following will return only 7 bytes.
90 90 >>> fo.read(30)
91 91 """
92
92
93 93 def __init__(self, fo, rangetup):
94 94 """Create a RangeableFileObject.
95 fo -- a file like object. only the read() method need be
96 supported but supporting an optimized seek() is
95 fo -- a file like object. only the read() method need be
96 supported but supporting an optimized seek() is
97 97 preferable.
98 98 rangetup -- a (firstbyte,lastbyte) tuple specifying the range
99 99 to work over.
100 100 The file object provided is assumed to be at byte offset 0.
101 101 """
102 102 self.fo = fo
103 103 (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
104 104 self.realpos = 0
105 105 self._do_seek(self.firstbyte)
106
106
107 107 def __getattr__(self, name):
108 108 """This effectively allows us to wrap at the instance level.
109 109 Any attribute not found in _this_ object will be searched for
110 110 in self.fo. This includes methods."""
111 111 if hasattr(self.fo, name):
112 112 return getattr(self.fo, name)
113 113 raise AttributeError, name
114
114
115 115 def tell(self):
116 116 """Return the position within the range.
117 This is different from fo.seek in that position 0 is the
117 This is different from fo.seek in that position 0 is the
118 118 first byte position of the range tuple. For example, if
119 119 this object was created with a range tuple of (500,899),
120 120 tell() will return 0 when at byte position 500 of the file.
121 121 """
122 122 return (self.realpos - self.firstbyte)
123
123
124 124 def seek(self,offset,whence=0):
125 125 """Seek within the byte range.
126 126 Positioning is identical to that described under tell().
127 127 """
128 128 assert whence in (0, 1, 2)
129 129 if whence == 0: # absolute seek
130 130 realoffset = self.firstbyte + offset
131 131 elif whence == 1: # relative seek
132 132 realoffset = self.realpos + offset
133 133 elif whence == 2: # absolute from end of file
134 134 # XXX: are we raising the right Error here?
135 135 raise IOError('seek from end of file not supported.')
136
136
137 137 # do not allow seek past lastbyte in range
138 138 if self.lastbyte and (realoffset >= self.lastbyte):
139 139 realoffset = self.lastbyte
140
140
141 141 self._do_seek(realoffset - self.realpos)
142
142
143 143 def read(self, size=-1):
144 144 """Read within the range.
145 145 This method will limit the size read based on the range.
146 146 """
147 147 size = self._calc_read_size(size)
148 148 rslt = self.fo.read(size)
149 149 self.realpos += len(rslt)
150 150 return rslt
151
151
152 152 def readline(self, size=-1):
153 153 """Read lines within the range.
154 154 This method will limit the size read based on the range.
155 155 """
156 156 size = self._calc_read_size(size)
157 157 rslt = self.fo.readline(size)
158 158 self.realpos += len(rslt)
159 159 return rslt
160
160
161 161 def _calc_read_size(self, size):
162 162 """Handles calculating the amount of data to read based on
163 163 the range.
164 164 """
165 165 if self.lastbyte:
166 166 if size > -1:
167 167 if ((self.realpos + size) >= self.lastbyte):
168 168 size = (self.lastbyte - self.realpos)
169 169 else:
170 170 size = (self.lastbyte - self.realpos)
171 171 return size
172
172
173 173 def _do_seek(self,offset):
174 174 """Seek based on whether wrapped object supports seek().
175 175 offset is relative to the current position (self.realpos).
176 176 """
177 177 assert offset >= 0
178 178 if not hasattr(self.fo, 'seek'):
179 179 self._poor_mans_seek(offset)
180 180 else:
181 181 self.fo.seek(self.realpos + offset)
182 182 self.realpos+= offset
183
183
184 184 def _poor_mans_seek(self,offset):
185 185 """Seek by calling the wrapped file objects read() method.
186 186 This is used for file like objects that do not have native
187 187 seek support. The wrapped objects read() method is called
188 188 to manually seek to the desired position.
189 189 offset -- read this number of bytes from the wrapped
190 190 file object.
191 raise RangeError if we encounter EOF before reaching the
191 raise RangeError if we encounter EOF before reaching the
192 192 specified offset.
193 193 """
194 194 pos = 0
195 195 bufsize = 1024
196 196 while pos < offset:
197 197 if (pos + bufsize) > offset:
198 198 bufsize = offset - pos
199 199 buf = self.fo.read(bufsize)
200 200 if len(buf) != bufsize:
201 201 raise RangeError('Requested Range Not Satisfiable')
202 202 pos+= bufsize
203 203
204 204 class FileRangeHandler(urllib2.FileHandler):
205 205 """FileHandler subclass that adds Range support.
206 206 This class handles Range headers exactly like an HTTP
207 207 server would.
208 208 """
209 209 def open_local_file(self, req):
210 210 import mimetypes
211 211 import mimetools
212 212 host = req.get_host()
213 213 file = req.get_selector()
214 214 localfile = urllib.url2pathname(file)
215 215 stats = os.stat(localfile)
216 216 size = stats[stat.ST_SIZE]
217 217 modified = rfc822.formatdate(stats[stat.ST_MTIME])
218 218 mtype = mimetypes.guess_type(file)[0]
219 219 if host:
220 220 host, port = urllib.splitport(host)
221 221 if port or socket.gethostbyname(host) not in self.get_names():
222 222 raise URLError('file not on local host')
223 223 fo = open(localfile,'rb')
224 224 brange = req.headers.get('Range',None)
225 225 brange = range_header_to_tuple(brange)
226 226 assert brange != ()
227 227 if brange:
228 228 (fb,lb) = brange
229 229 if lb == '': lb = size
230 230 if fb < 0 or fb > size or lb > size:
231 231 raise RangeError('Requested Range Not Satisfiable')
232 232 size = (lb - fb)
233 233 fo = RangeableFileObject(fo, (fb,lb))
234 234 headers = mimetools.Message(StringIO(
235 235 'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' %
236 236 (mtype or 'text/plain', size, modified)))
237 237 return urllib.addinfourl(fo, headers, 'file:'+file)
238 238
239 239
240 # FTP Range Support
240 # FTP Range Support
241 241 # Unfortunately, a large amount of base FTP code had to be copied
242 242 # from urllib and urllib2 in order to insert the FTP REST command.
243 # Code modifications for range support have been commented as
243 # Code modifications for range support have been commented as
244 244 # follows:
245 245 # -- range support modifications start/end here
246 246
247 247 from urllib import splitport, splituser, splitpasswd, splitattr, \
248 248 unquote, addclosehook, addinfourl
249 249 import ftplib
250 250 import socket
251 251 import sys
252 252 import ftplib
253 253 import mimetypes
254 254 import mimetools
255 255
256 256 class FTPRangeHandler(urllib2.FTPHandler):
257 257 def ftp_open(self, req):
258 258 host = req.get_host()
259 259 if not host:
260 260 raise IOError, ('ftp error', 'no host given')
261 261 host, port = splitport(host)
262 262 if port is None:
263 263 port = ftplib.FTP_PORT
264 264
265 265 # username/password handling
266 266 user, host = splituser(host)
267 267 if user:
268 268 user, passwd = splitpasswd(user)
269 269 else:
270 270 passwd = None
271 271 host = unquote(host)
272 272 user = unquote(user or '')
273 273 passwd = unquote(passwd or '')
274
274
275 275 try:
276 276 host = socket.gethostbyname(host)
277 277 except socket.error, msg:
278 278 raise URLError(msg)
279 279 path, attrs = splitattr(req.get_selector())
280 280 dirs = path.split('/')
281 281 dirs = map(unquote, dirs)
282 282 dirs, file = dirs[:-1], dirs[-1]
283 283 if dirs and not dirs[0]:
284 284 dirs = dirs[1:]
285 285 try:
286 286 fw = self.connect_ftp(user, passwd, host, port, dirs)
287 287 type = file and 'I' or 'D'
288 288 for attr in attrs:
289 289 attr, value = splitattr(attr)
290 290 if attr.lower() == 'type' and \
291 291 value in ('a', 'A', 'i', 'I', 'd', 'D'):
292 292 type = value.upper()
293
293
294 294 # -- range support modifications start here
295 295 rest = None
296 range_tup = range_header_to_tuple(req.headers.get('Range',None))
296 range_tup = range_header_to_tuple(req.headers.get('Range',None))
297 297 assert range_tup != ()
298 298 if range_tup:
299 299 (fb,lb) = range_tup
300 300 if fb > 0: rest = fb
301 301 # -- range support modifications end here
302
302
303 303 fp, retrlen = fw.retrfile(file, type, rest)
304
304
305 305 # -- range support modifications start here
306 306 if range_tup:
307 307 (fb,lb) = range_tup
308 if lb == '':
308 if lb == '':
309 309 if retrlen is None or retrlen == 0:
310 310 raise RangeError('Requested Range Not Satisfiable due to unobtainable file length.')
311 311 lb = retrlen
312 312 retrlen = lb - fb
313 313 if retrlen < 0:
314 314 # beginning of range is larger than file
315 315 raise RangeError('Requested Range Not Satisfiable')
316 316 else:
317 317 retrlen = lb - fb
318 318 fp = RangeableFileObject(fp, (0,retrlen))
319 319 # -- range support modifications end here
320
320
321 321 headers = ""
322 322 mtype = mimetypes.guess_type(req.get_full_url())[0]
323 323 if mtype:
324 324 headers += "Content-Type: %s\n" % mtype
325 325 if retrlen is not None and retrlen >= 0:
326 326 headers += "Content-Length: %d\n" % retrlen
327 327 sf = StringIO(headers)
328 328 headers = mimetools.Message(sf)
329 329 return addinfourl(fp, headers, req.get_full_url())
330 330 except ftplib.all_errors, msg:
331 331 raise IOError, ('ftp error', msg), sys.exc_info()[2]
332 332
333 333 def connect_ftp(self, user, passwd, host, port, dirs):
334 334 fw = ftpwrapper(user, passwd, host, port, dirs)
335 335 return fw
336 336
337 337 class ftpwrapper(urllib.ftpwrapper):
338 338 # range support note:
339 339 # this ftpwrapper code is copied directly from
340 340 # urllib. The only enhancement is to add the rest
341 341 # argument and pass it on to ftp.ntransfercmd
342 342 def retrfile(self, file, type, rest=None):
343 343 self.endtransfer()
344 344 if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1
345 345 else: cmd = 'TYPE ' + type; isdir = 0
346 346 try:
347 347 self.ftp.voidcmd(cmd)
348 348 except ftplib.all_errors:
349 349 self.init()
350 350 self.ftp.voidcmd(cmd)
351 351 conn = None
352 352 if file and not isdir:
353 353 # Use nlst to see if the file exists at all
354 354 try:
355 355 self.ftp.nlst(file)
356 356 except ftplib.error_perm, reason:
357 357 raise IOError, ('ftp error', reason), sys.exc_info()[2]
358 358 # Restore the transfer mode!
359 359 self.ftp.voidcmd(cmd)
360 360 # Try to retrieve as a file
361 361 try:
362 362 cmd = 'RETR ' + file
363 363 conn = self.ftp.ntransfercmd(cmd, rest)
364 364 except ftplib.error_perm, reason:
365 365 if str(reason)[:3] == '501':
366 366 # workaround for REST not supported error
367 367 fp, retrlen = self.retrfile(file, type)
368 368 fp = RangeableFileObject(fp, (rest,''))
369 369 return (fp, retrlen)
370 370 elif str(reason)[:3] != '550':
371 371 raise IOError, ('ftp error', reason), sys.exc_info()[2]
372 372 if not conn:
373 373 # Set transfer mode to ASCII!
374 374 self.ftp.voidcmd('TYPE A')
375 375 # Try a directory listing
376 376 if file: cmd = 'LIST ' + file
377 377 else: cmd = 'LIST'
378 378 conn = self.ftp.ntransfercmd(cmd)
379 379 self.busy = 1
380 380 # Pass back both a suitably decorated object and a retrieval length
381 381 return (addclosehook(conn[0].makefile('rb'),
382 382 self.endtransfer), conn[1])
383 383
384 384
385 385 ####################################################################
386 386 # Range Tuple Functions
387 387 # XXX: These range tuple functions might go better in a class.
388 388
389 389 _rangere = None
390 390 def range_header_to_tuple(range_header):
391 391 """Get a (firstbyte,lastbyte) tuple from a Range header value.
392
392
393 393 Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
394 394 function pulls the firstbyte and lastbyte values and returns
395 395 a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
396 396 the header value, it is returned as an empty string in the
397 397 tuple.
398
398
399 399 Return None if range_header is None
400 Return () if range_header does not conform to the range spec
400 Return () if range_header does not conform to the range spec
401 401 pattern.
402
402
403 403 """
404 404 global _rangere
405 405 if range_header is None: return None
406 406 if _rangere is None:
407 407 import re
408 408 _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
409 409 match = _rangere.match(range_header)
410 if match:
410 if match:
411 411 tup = range_tuple_normalize(match.group(1,2))
412 if tup and tup[1]:
412 if tup and tup[1]:
413 413 tup = (tup[0],tup[1]+1)
414 414 return tup
415 415 return ()
416 416
417 417 def range_tuple_to_header(range_tup):
418 418 """Convert a range tuple to a Range header value.
419 419 Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
420 420 if no range is needed.
421 421 """
422 422 if range_tup is None: return None
423 423 range_tup = range_tuple_normalize(range_tup)
424 424 if range_tup:
425 if range_tup[1]:
425 if range_tup[1]:
426 426 range_tup = (range_tup[0],range_tup[1] - 1)
427 427 return 'bytes=%s-%s' % range_tup
428
428
429 429 def range_tuple_normalize(range_tup):
430 430 """Normalize a (first_byte,last_byte) range tuple.
431 431 Return a tuple whose first element is guaranteed to be an int
432 and whose second element will be '' (meaning: the last byte) or
432 and whose second element will be '' (meaning: the last byte) or
433 433 an int. Finally, return None if the normalized tuple == (0,'')
434 434 as that is equivelant to retrieving the entire file.
435 435 """
436 436 if range_tup is None: return None
437 437 # handle first byte
438 438 fb = range_tup[0]
439 439 if fb in (None,''): fb = 0
440 440 else: fb = int(fb)
441 441 # handle last byte
442 442 try: lb = range_tup[1]
443 443 except IndexError: lb = ''
444 else:
444 else:
445 445 if lb is None: lb = ''
446 446 elif lb != '': lb = int(lb)
447 447 # check if range is over the entire file
448 448 if (fb,lb) == (0,''): return None
449 449 # check that the range is valid
450 450 if lb < fb: raise RangeError('Invalid byte range: %s-%s' % (fb,lb))
451 451 return (fb,lb)
452
@@ -1,1524 +1,1524
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff")
14 14
15 15 class filelog(revlog):
16 16 def __init__(self, opener, path):
17 17 revlog.__init__(self, opener,
18 18 os.path.join("data", path + ".i"),
19 19 os.path.join("data", path + ".d"))
20 20
21 21 def read(self, node):
22 22 t = self.revision(node)
23 23 if t[:2] != '\1\n':
24 24 return t
25 25 s = t.find('\1\n', 2)
26 26 return t[s+2:]
27 27
28 28 def readmeta(self, node):
29 29 t = self.revision(node)
30 30 if t[:2] != '\1\n':
31 31 return t
32 32 s = t.find('\1\n', 2)
33 33 mt = t[2:s]
34 34 for l in mt.splitlines():
35 35 k, v = l.split(": ", 1)
36 36 m[k] = v
37 37 return m
38 38
39 39 def add(self, text, meta, transaction, link, p1=None, p2=None):
40 40 if meta or text[:2] == '\1\n':
41 41 mt = ""
42 42 if meta:
43 43 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
44 44 text = "\1\n" + "".join(mt) + "\1\n" + text
45 45 return self.addrevision(text, transaction, link, p1, p2)
46 46
47 47 def annotate(self, node):
48 48
49 49 def decorate(text, rev):
50 50 return ([rev] * len(text.splitlines()), text)
51 51
52 52 def pair(parent, child):
53 53 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
54 54 child[0][b1:b2] = parent[0][a1:a2]
55 55 return child
56 56
57 57 # find all ancestors
58 58 needed = {node:1}
59 59 visit = [node]
60 60 while visit:
61 61 n = visit.pop(0)
62 62 for p in self.parents(n):
63 63 if p not in needed:
64 64 needed[p] = 1
65 65 visit.append(p)
66 66 else:
67 67 # count how many times we'll use this
68 68 needed[p] += 1
69 69
70 70 # sort by revision which is a topological order
71 71 visit = [ (self.rev(n), n) for n in needed.keys() ]
72 72 visit.sort()
73 73 hist = {}
74 74
75 75 for r,n in visit:
76 76 curr = decorate(self.read(n), self.linkrev(n))
77 77 for p in self.parents(n):
78 78 if p != nullid:
79 79 curr = pair(hist[p], curr)
80 80 # trim the history of unneeded revs
81 81 needed[p] -= 1
82 82 if not needed[p]:
83 83 del hist[p]
84 84 hist[n] = curr
85 85
86 86 return zip(hist[n][0], hist[n][1].splitlines(1))
87 87
88 88 class manifest(revlog):
89 89 def __init__(self, opener):
90 90 self.mapcache = None
91 91 self.listcache = None
92 92 self.addlist = None
93 93 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
94 94
95 95 def read(self, node):
96 96 if node == nullid: return {} # don't upset local cache
97 97 if self.mapcache and self.mapcache[0] == node:
98 98 return self.mapcache[1]
99 99 text = self.revision(node)
100 100 map = {}
101 101 flag = {}
102 102 self.listcache = (text, text.splitlines(1))
103 103 for l in self.listcache[1]:
104 104 (f, n) = l.split('\0')
105 105 map[f] = bin(n[:40])
106 106 flag[f] = (n[40:-1] == "x")
107 107 self.mapcache = (node, map, flag)
108 108 return map
109 109
110 110 def readflags(self, node):
111 111 if node == nullid: return {} # don't upset local cache
112 112 if not self.mapcache or self.mapcache[0] != node:
113 113 self.read(node)
114 114 return self.mapcache[2]
115 115
116 116 def diff(self, a, b):
117 117 # this is sneaky, as we're not actually using a and b
118 118 if self.listcache and self.addlist and self.listcache[0] == a:
119 119 d = mdiff.diff(self.listcache[1], self.addlist, 1)
120 120 if mdiff.patch(a, d) != b:
121 121 sys.stderr.write("*** sortdiff failed, falling back ***\n")
122 122 return mdiff.textdiff(a, b)
123 123 return d
124 124 else:
125 125 return mdiff.textdiff(a, b)
126 126
127 127 def add(self, map, flags, transaction, link, p1=None, p2=None):
128 128 files = map.keys()
129 129 files.sort()
130 130
131 131 self.addlist = ["%s\000%s%s\n" %
132 132 (f, hex(map[f]), flags[f] and "x" or '')
133 133 for f in files]
134 134 text = "".join(self.addlist)
135 135
136 136 n = self.addrevision(text, transaction, link, p1, p2)
137 137 self.mapcache = (n, map, flags)
138 138 self.listcache = (text, self.addlist)
139 139 self.addlist = None
140 140
141 141 return n
142 142
143 143 class changelog(revlog):
144 144 def __init__(self, opener):
145 145 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
146 146
147 147 def extract(self, text):
148 148 if not text:
149 149 return (nullid, "", "0", [], "")
150 150 last = text.index("\n\n")
151 151 desc = text[last + 2:]
152 152 l = text[:last].splitlines()
153 153 manifest = bin(l[0])
154 154 user = l[1]
155 155 date = l[2]
156 156 files = l[3:]
157 157 return (manifest, user, date, files, desc)
158 158
159 159 def read(self, node):
160 160 return self.extract(self.revision(node))
161 161
162 162 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
163 163 user=None, date=None):
164 164 user = (user or
165 165 os.environ.get("HGUSER") or
166 166 os.environ.get("EMAIL") or
167 167 (os.environ.get("LOGNAME",
168 168 os.environ.get("USERNAME", "unknown"))
169 169 + '@' + socket.getfqdn()))
170 170 date = date or "%d %d" % (time.time(), time.timezone)
171 171 list.sort()
172 172 l = [hex(manifest), user, date] + list + ["", desc]
173 173 text = "\n".join(l)
174 174 return self.addrevision(text, transaction, self.count(), p1, p2)
175 175
176 176 class dirstate:
177 177 def __init__(self, opener, ui, root):
178 178 self.opener = opener
179 179 self.root = root
180 180 self.dirty = 0
181 181 self.ui = ui
182 182 self.map = None
183 183 self.pl = None
184 184 self.copies = {}
185 185
186 186 def __del__(self):
187 187 if self.dirty:
188 188 self.write()
189 189
190 190 def __getitem__(self, key):
191 191 try:
192 192 return self.map[key]
193 193 except TypeError:
194 194 self.read()
195 195 return self[key]
196 196
197 197 def __contains__(self, key):
198 198 if not self.map: self.read()
199 199 return key in self.map
200 200
201 201 def parents(self):
202 202 if not self.pl:
203 203 self.read()
204 204 return self.pl
205 205
206 206 def setparents(self, p1, p2 = nullid):
207 207 self.dirty = 1
208 208 self.pl = p1, p2
209 209
210 210 def state(self, key):
211 211 try:
212 212 return self[key][0]
213 213 except KeyError:
214 214 return "?"
215 215
216 216 def read(self):
217 217 if self.map is not None: return self.map
218 218
219 219 self.map = {}
220 220 self.pl = [nullid, nullid]
221 221 try:
222 222 st = self.opener("dirstate").read()
223 223 if not st: return
224 224 except: return
225 225
226 226 self.pl = [st[:20], st[20: 40]]
227 227
228 228 pos = 40
229 229 while pos < len(st):
230 230 e = struct.unpack(">cllll", st[pos:pos+17])
231 231 l = e[4]
232 232 pos += 17
233 233 f = st[pos:pos + l]
234 234 if '\0' in f:
235 235 f, c = f.split('\0')
236 236 self.copies[f] = c
237 237 self.map[f] = e[:4]
238 238 pos += l
239 239
240 240 def copy(self, source, dest):
241 241 self.read()
242 242 self.dirty = 1
243 243 self.copies[dest] = source
244 244
245 245 def copied(self, file):
246 246 return self.copies.get(file, None)
247 247
248 248 def update(self, files, state):
249 249 ''' current states:
250 250 n normal
251 251 m needs merging
252 252 r marked for removal
253 253 a marked for addition'''
254 254
255 255 if not files: return
256 256 self.read()
257 257 self.dirty = 1
258 258 for f in files:
259 259 if state == "r":
260 260 self.map[f] = ('r', 0, 0, 0)
261 261 else:
262 262 s = os.stat(os.path.join(self.root, f))
263 263 self.map[f] = (state, s.st_mode, s.st_size, s.st_mtime)
264 264
265 265 def forget(self, files):
266 266 if not files: return
267 267 self.read()
268 268 self.dirty = 1
269 269 for f in files:
270 270 try:
271 271 del self.map[f]
272 272 except KeyError:
273 273 self.ui.warn("not in dirstate: %s!\n" % f)
274 274 pass
275 275
276 276 def clear(self):
277 277 self.map = {}
278 278 self.dirty = 1
279 279
280 280 def write(self):
281 281 st = self.opener("dirstate", "w")
282 282 st.write("".join(self.pl))
283 283 for f, e in self.map.items():
284 284 c = self.copied(f)
285 285 if c:
286 286 f = f + "\0" + c
287 287 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
288 288 st.write(e + f)
289 289 self.dirty = 0
290 290
291 291 def changes(self, files, ignore):
292 292 self.read()
293 293 dc = self.map.copy()
294 294 lookup, changed, added, unknown = [], [], [], []
295 295
296 296 # compare all files by default
297 297 if not files: files = [self.root]
298 298
299 299 # recursive generator of all files listed
300 300 def walk(files):
301 301 for f in util.unique(files):
302 302 f = os.path.join(self.root, f)
303 303 if os.path.isdir(f):
304 304 for dir, subdirs, fl in os.walk(f):
305 305 d = dir[len(self.root) + 1:]
306 306 if ".hg" in subdirs: subdirs.remove(".hg")
307 307 for fn in fl:
308 308 fn = util.pconvert(os.path.join(d, fn))
309 309 yield fn
310 310 else:
311 311 yield f[len(self.root) + 1:]
312 312
313 313 for fn in util.unique(walk(files)):
314 314 try: s = os.stat(os.path.join(self.root, fn))
315 315 except: continue
316 316
317 317 if fn in dc:
318 318 c = dc[fn]
319 319 del dc[fn]
320 320
321 321 if c[0] == 'm':
322 322 changed.append(fn)
323 323 elif c[0] == 'a':
324 324 added.append(fn)
325 325 elif c[0] == 'r':
326 326 unknown.append(fn)
327 327 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
328 328 changed.append(fn)
329 329 elif c[1] != s.st_mode or c[3] != s.st_mtime:
330 330 lookup.append(fn)
331 331 else:
332 332 if not ignore(fn): unknown.append(fn)
333 333
334 334 return (lookup, changed, added, dc.keys(), unknown)
335 335
336 336 # used to avoid circular references so destructors work
337 337 def opener(base):
338 338 p = base
339 339 def o(path, mode="r"):
340 340 if p[:7] == "http://":
341 341 f = os.path.join(p, urllib.quote(path))
342 342 return httprangereader.httprangereader(f)
343 343
344 344 f = os.path.join(p, path)
345 345
346 346 mode += "b" # for that other OS
347 347
348 348 if mode[0] != "r":
349 349 try:
350 350 s = os.stat(f)
351 351 except OSError:
352 352 d = os.path.dirname(f)
353 353 if not os.path.isdir(d):
354 354 os.makedirs(d)
355 355 else:
356 356 if s.st_nlink > 1:
357 357 file(f + ".tmp", "wb").write(file(f, "rb").read())
358 358 util.rename(f+".tmp", f)
359 359
360 360 return file(f, mode)
361 361
362 362 return o
363 363
364 364 class RepoError(Exception): pass
365 365
366 366 class localrepository:
367 367 def __init__(self, ui, path=None, create=0):
368 368 self.remote = 0
369 369 if path and path[:7] == "http://":
370 370 self.remote = 1
371 371 self.path = path
372 372 else:
373 373 if not path:
374 374 p = os.getcwd()
375 375 while not os.path.isdir(os.path.join(p, ".hg")):
376 376 oldp = p
377 377 p = os.path.dirname(p)
378 378 if p == oldp: raise RepoError("no repo found")
379 379 path = p
380 380 self.path = os.path.join(path, ".hg")
381 381
382 382 if not create and not os.path.isdir(self.path):
383 383 raise RepoError("repository %s not found" % self.path)
384 384
385 385 self.root = path
386 386 self.ui = ui
387 387
388 388 if create:
389 389 os.mkdir(self.path)
390 390 os.mkdir(self.join("data"))
391 391
392 392 self.opener = opener(self.path)
393 393 self.wopener = opener(self.root)
394 394 self.manifest = manifest(self.opener)
395 395 self.changelog = changelog(self.opener)
396 396 self.ignorelist = None
397 397 self.tagscache = None
398 398 self.nodetagscache = None
399 399
400 400 if not self.remote:
401 401 self.dirstate = dirstate(self.opener, ui, self.root)
402 402 try:
403 403 self.ui.readconfig(self.opener("hgrc"))
404 404 except IOError: pass
405 405
406 406 def ignore(self, f):
407 407 if self.ignorelist is None:
408 408 self.ignorelist = []
409 409 try:
410 410 l = file(self.wjoin(".hgignore"))
411 411 for pat in l:
412 412 if pat != "\n":
413 413 self.ignorelist.append(re.compile(util.pconvert(pat[:-1])))
414 414 except IOError: pass
415 415 for pat in self.ignorelist:
416 416 if pat.search(f): return True
417 417 return False
418 418
419 419 def hook(self, name, **args):
420 420 s = self.ui.config("hooks", name)
421 421 if s:
422 422 self.ui.note("running hook %s: %s\n" % (name, s))
423 423 old = {}
424 424 for k, v in args.items():
425 425 k = k.upper()
426 426 old[k] = os.environ.get(k, None)
427 427 os.environ[k] = v
428 428
429 429 r = os.system(s)
430 430
431 431 for k, v in old.items():
432 432 if v != None:
433 433 os.environ[k] = v
434 434 else:
435 435 del os.environ[k]
436 436
437 437 if r:
438 438 self.ui.warn("abort: %s hook failed with status %d!\n" %
439 439 (name, r))
440 440 return False
441 441 return True
442 442
443 443 def tags(self):
444 444 '''return a mapping of tag to node'''
445 445 if not self.tagscache:
446 446 self.tagscache = {}
447 447 try:
448 448 # read each head of the tags file, ending with the tip
449 449 # and add each tag found to the map, with "newer" ones
450 450 # taking precedence
451 451 fl = self.file(".hgtags")
452 452 h = fl.heads()
453 453 h.reverse()
454 454 for r in h:
455 455 for l in fl.revision(r).splitlines():
456 456 if l:
457 457 n, k = l.split(" ", 1)
458 458 try:
459 459 bin_n = bin(n)
460 460 except TypeError:
461 461 bin_n = ''
462 462 self.tagscache[k.strip()] = bin_n
463 463 except KeyError:
464 464 pass
465 465 for k, n in self.ui.configitems("tags"):
466 466 try:
467 467 bin_n = bin(n)
468 468 except TypeError:
469 469 bin_n = ''
470 470 self.tagscache[k] = bin_n
471 471
472 472 self.tagscache['tip'] = self.changelog.tip()
473 473
474 474 return self.tagscache
475 475
476 476 def tagslist(self):
477 477 '''return a list of tags ordered by revision'''
478 478 l = []
479 479 for t, n in self.tags().items():
480 480 try:
481 481 r = self.changelog.rev(n)
482 482 except:
483 483 r = -2 # sort to the beginning of the list if unknown
484 484 l.append((r,t,n))
485 485 l.sort()
486 486 return [(t,n) for r,t,n in l]
487 487
488 488 def nodetags(self, node):
489 489 '''return the tags associated with a node'''
490 490 if not self.nodetagscache:
491 491 self.nodetagscache = {}
492 492 for t,n in self.tags().items():
493 493 self.nodetagscache.setdefault(n,[]).append(t)
494 494 return self.nodetagscache.get(node, [])
495 495
496 496 def lookup(self, key):
497 497 try:
498 498 return self.tags()[key]
499 499 except KeyError:
500 500 return self.changelog.lookup(key)
501 501
502 502 def join(self, f):
503 503 return os.path.join(self.path, f)
504 504
505 505 def wjoin(self, f):
506 506 return os.path.join(self.root, f)
507 507
508 508 def file(self, f):
509 509 if f[0] == '/': f = f[1:]
510 510 return filelog(self.opener, f)
511 511
512 512 def wfile(self, f, mode='r'):
513 513 return self.wopener(f, mode)
514 514
515 515 def transaction(self):
516 516 # save dirstate for undo
517 517 try:
518 518 ds = self.opener("dirstate").read()
519 519 except IOError:
520 520 ds = ""
521 521 self.opener("undo.dirstate", "w").write(ds)
522 522
523 523 return transaction.transaction(self.opener, self.join("journal"),
524 524 self.join("undo"))
525 525
526 526 def recover(self):
527 527 lock = self.lock()
528 528 if os.path.exists(self.join("journal")):
529 529 self.ui.status("rolling back interrupted transaction\n")
530 530 return transaction.rollback(self.opener, self.join("journal"))
531 531 else:
532 532 self.ui.warn("no interrupted transaction available\n")
533 533
534 534 def undo(self):
535 535 lock = self.lock()
536 536 if os.path.exists(self.join("undo")):
537 537 self.ui.status("rolling back last transaction\n")
538 538 transaction.rollback(self.opener, self.join("undo"))
539 539 self.dirstate = None
540 540 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
541 541 self.dirstate = dirstate(self.opener, self.ui, self.root)
542 542 else:
543 543 self.ui.warn("no undo information available\n")
544 544
545 545 def lock(self, wait = 1):
546 546 try:
547 547 return lock.lock(self.join("lock"), 0)
548 548 except lock.LockHeld, inst:
549 549 if wait:
550 550 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
551 551 return lock.lock(self.join("lock"), wait)
552 552 raise inst
553 553
554 554 def rawcommit(self, files, text, user, date, p1=None, p2=None):
555 555 orig_parent = self.dirstate.parents()[0] or nullid
556 556 p1 = p1 or self.dirstate.parents()[0] or nullid
557 557 p2 = p2 or self.dirstate.parents()[1] or nullid
558 558 c1 = self.changelog.read(p1)
559 559 c2 = self.changelog.read(p2)
560 560 m1 = self.manifest.read(c1[0])
561 561 mf1 = self.manifest.readflags(c1[0])
562 562 m2 = self.manifest.read(c2[0])
563 563
564 564 if orig_parent == p1:
565 565 update_dirstate = 1
566 566 else:
567 567 update_dirstate = 0
568 568
569 569 tr = self.transaction()
570 570 mm = m1.copy()
571 571 mfm = mf1.copy()
572 572 linkrev = self.changelog.count()
573 573 for f in files:
574 574 try:
575 575 t = self.wfile(f).read()
576 576 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
577 577 r = self.file(f)
578 578 mfm[f] = tm
579 579 mm[f] = r.add(t, {}, tr, linkrev,
580 580 m1.get(f, nullid), m2.get(f, nullid))
581 581 if update_dirstate:
582 582 self.dirstate.update([f], "n")
583 583 except IOError:
584 584 try:
585 585 del mm[f]
586 586 del mfm[f]
587 587 if update_dirstate:
588 588 self.dirstate.forget([f])
589 589 except:
590 590 # deleted from p2?
591 591 pass
592 592
593 593 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
594 594 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
595 595 tr.close()
596 596 if update_dirstate:
597 597 self.dirstate.setparents(n, nullid)
598 598
599 599 def commit(self, files = None, text = "", user = None, date = None):
600 600 commit = []
601 601 remove = []
602 602 if files:
603 603 for f in files:
604 604 s = self.dirstate.state(f)
605 605 if s in 'nmai':
606 606 commit.append(f)
607 607 elif s == 'r':
608 608 remove.append(f)
609 609 else:
610 610 self.ui.warn("%s not tracked!\n" % f)
611 611 else:
612 612 (c, a, d, u) = self.changes(None, None)
613 613 commit = c + a
614 614 remove = d
615 615
616 616 if not commit and not remove:
617 617 self.ui.status("nothing changed\n")
618 618 return
619 619
620 620 if not self.hook("precommit"):
621 621 return 1
622 622
623 623 p1, p2 = self.dirstate.parents()
624 624 c1 = self.changelog.read(p1)
625 625 c2 = self.changelog.read(p2)
626 626 m1 = self.manifest.read(c1[0])
627 627 mf1 = self.manifest.readflags(c1[0])
628 628 m2 = self.manifest.read(c2[0])
629 629 lock = self.lock()
630 630 tr = self.transaction()
631 631
632 632 # check in files
633 633 new = {}
634 634 linkrev = self.changelog.count()
635 635 commit.sort()
636 636 for f in commit:
637 637 self.ui.note(f + "\n")
638 638 try:
639 639 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
640 640 t = self.wfile(f).read()
641 641 except IOError:
642 642 self.warn("trouble committing %s!\n" % f)
643 643 raise
644 644
645 645 meta = {}
646 646 cp = self.dirstate.copied(f)
647 647 if cp:
648 648 meta["copy"] = cp
649 649 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
650 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
650 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
651 651
652 652 r = self.file(f)
653 653 fp1 = m1.get(f, nullid)
654 654 fp2 = m2.get(f, nullid)
655 655 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
656 656
657 657 # update manifest
658 658 m1.update(new)
659 659 for f in remove:
660 660 if f in m1:
661 661 del m1[f]
662 662 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0])
663 663
664 664 # add changeset
665 665 new = new.keys()
666 666 new.sort()
667 667
668 668 if not text:
669 669 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
670 670 edittext += "".join(["HG: changed %s\n" % f for f in new])
671 671 edittext += "".join(["HG: removed %s\n" % f for f in remove])
672 672 edittext = self.ui.edit(edittext)
673 673 if not edittext.rstrip():
674 674 return 1
675 675 text = edittext
676 676
677 677 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
678 678
679 679 if not self.hook("commit", node=hex(n)):
680 680 return 1
681 681
682 682 tr.close()
683 683
684 684 self.dirstate.setparents(n)
685 685 self.dirstate.update(new, "n")
686 686 self.dirstate.forget(remove)
687 687
688 688 def changes(self, node1, node2, files=None):
689 689 mf2, u = None, []
690 690
691 691 def fcmp(fn, mf):
692 692 t1 = self.wfile(fn).read()
693 693 t2 = self.file(fn).revision(mf[fn])
694 694 return cmp(t1, t2)
695 695
696 696 # are we comparing the working directory?
697 697 if not node2:
698 698 l, c, a, d, u = self.dirstate.changes(files, self.ignore)
699 699
700 700 # are we comparing working dir against its parent?
701 701 if not node1:
702 702 if l:
703 703 # do a full compare of any files that might have changed
704 704 change = self.changelog.read(self.dirstate.parents()[0])
705 705 mf2 = self.manifest.read(change[0])
706 706 for f in l:
707 707 if fcmp(f, mf2):
708 708 c.append(f)
709 709
710 710 for l in c, a, d, u:
711 711 l.sort()
712 712
713 713 return (c, a, d, u)
714 714
715 715 # are we comparing working dir against non-tip?
716 716 # generate a pseudo-manifest for the working dir
717 717 if not node2:
718 718 if not mf2:
719 719 change = self.changelog.read(self.dirstate.parents()[0])
720 720 mf2 = self.manifest.read(change[0]).copy()
721 721 for f in a + c + l:
722 722 mf2[f] = ""
723 723 for f in d:
724 724 if f in mf2: del mf2[f]
725 725 else:
726 726 change = self.changelog.read(node2)
727 727 mf2 = self.manifest.read(change[0])
728 728
729 729 # flush lists from dirstate before comparing manifests
730 730 c, a = [], []
731 731
732 732 change = self.changelog.read(node1)
733 733 mf1 = self.manifest.read(change[0]).copy()
734 734
735 735 for fn in mf2:
736 736 if mf1.has_key(fn):
737 737 if mf1[fn] != mf2[fn]:
738 738 if mf2[fn] != "" or fcmp(fn, mf1):
739 739 c.append(fn)
740 740 del mf1[fn]
741 741 else:
742 742 a.append(fn)
743 743
744 744 d = mf1.keys()
745 745
746 746 for l in c, a, d, u:
747 747 l.sort()
748 748
749 749 return (c, a, d, u)
750 750
751 751 def add(self, list):
752 752 for f in list:
753 753 p = self.wjoin(f)
754 754 if not os.path.isfile(p):
755 755 self.ui.warn("%s does not exist!\n" % f)
756 756 elif self.dirstate.state(f) == 'n':
757 757 self.ui.warn("%s already tracked!\n" % f)
758 758 else:
759 759 self.dirstate.update([f], "a")
760 760
761 761 def forget(self, list):
762 762 for f in list:
763 763 if self.dirstate.state(f) not in 'ai':
764 764 self.ui.warn("%s not added!\n" % f)
765 765 else:
766 766 self.dirstate.forget([f])
767 767
768 768 def remove(self, list):
769 769 for f in list:
770 770 p = self.wjoin(f)
771 771 if os.path.isfile(p):
772 772 self.ui.warn("%s still exists!\n" % f)
773 773 elif self.dirstate.state(f) == 'a':
774 774 self.ui.warn("%s never committed!\n" % f)
775 775 self.dirstate.forget(f)
776 776 elif f not in self.dirstate:
777 777 self.ui.warn("%s not tracked!\n" % f)
778 778 else:
779 779 self.dirstate.update([f], "r")
780 780
781 781 def copy(self, source, dest):
782 782 p = self.wjoin(dest)
783 783 if not os.path.isfile(dest):
784 784 self.ui.warn("%s does not exist!\n" % dest)
785 785 else:
786 786 if self.dirstate.state(dest) == '?':
787 787 self.dirstate.update([dest], "a")
788 788 self.dirstate.copy(source, dest)
789 789
790 790 def heads(self):
791 791 return self.changelog.heads()
792 792
793 793 def branches(self, nodes):
794 794 if not nodes: nodes = [self.changelog.tip()]
795 795 b = []
796 796 for n in nodes:
797 797 t = n
798 798 while n:
799 799 p = self.changelog.parents(n)
800 800 if p[1] != nullid or p[0] == nullid:
801 801 b.append((t, n, p[0], p[1]))
802 802 break
803 803 n = p[0]
804 804 return b
805 805
806 806 def between(self, pairs):
807 807 r = []
808 808
809 809 for top, bottom in pairs:
810 810 n, l, i = top, [], 0
811 811 f = 1
812 812
813 813 while n != bottom:
814 814 p = self.changelog.parents(n)[0]
815 815 if i == f:
816 l.append(n)
816 l.append(n)
817 817 f = f * 2
818 818 n = p
819 819 i += 1
820 820
821 821 r.append(l)
822 822
823 823 return r
824 824
825 825 def newer(self, nodes):
826 826 m = {}
827 827 nl = []
828 828 pm = {}
829 829 cl = self.changelog
830 830 t = l = cl.count()
831 831
832 832 # find the lowest numbered node
833 833 for n in nodes:
834 834 l = min(l, cl.rev(n))
835 835 m[n] = 1
836 836
837 837 for i in xrange(l, t):
838 838 n = cl.node(i)
839 839 if n in m: # explicitly listed
840 840 pm[n] = 1
841 841 nl.append(n)
842 842 continue
843 843 for p in cl.parents(n):
844 844 if p in pm: # parent listed
845 845 pm[n] = 1
846 846 nl.append(n)
847 847 break
848 848
849 849 return nl
850 850
851 851 def findincoming(self, remote):
852 852 m = self.changelog.nodemap
853 853 search = []
854 854 fetch = []
855 855 seen = {}
856 856 seenbranch = {}
857 857
858 858 # if we have an empty repo, fetch everything
859 859 if self.changelog.tip() == nullid:
860 860 self.ui.status("requesting all changes\n")
861 861 return [nullid]
862 862
863 863 # otherwise, assume we're closer to the tip than the root
864 864 self.ui.status("searching for changes\n")
865 865 heads = remote.heads()
866 866 unknown = []
867 867 for h in heads:
868 868 if h not in m:
869 869 unknown.append(h)
870 870
871 871 if not unknown:
872 872 return None
873 873
874 874 rep = {}
875 875 reqcnt = 0
876 876
877 877 unknown = remote.branches(unknown)
878 878 while unknown:
879 879 r = []
880 880 while unknown:
881 881 n = unknown.pop(0)
882 882 if n[0] in seen:
883 883 continue
884 884
885 885 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
886 886 if n[0] == nullid:
887 887 break
888 888 if n in seenbranch:
889 889 self.ui.debug("branch already found\n")
890 890 continue
891 891 if n[1] and n[1] in m: # do we know the base?
892 892 self.ui.debug("found incomplete branch %s:%s\n"
893 893 % (short(n[0]), short(n[1])))
894 894 search.append(n) # schedule branch range for scanning
895 895 seenbranch[n] = 1
896 896 else:
897 897 if n[1] not in seen and n[1] not in fetch:
898 898 if n[2] in m and n[3] in m:
899 899 self.ui.debug("found new changeset %s\n" %
900 900 short(n[1]))
901 901 fetch.append(n[1]) # earliest unknown
902 902 continue
903 903
904 904 for a in n[2:4]:
905 905 if a not in rep:
906 906 r.append(a)
907 907 rep[a] = 1
908 908
909 909 seen[n[0]] = 1
910 910
911 911 if r:
912 912 reqcnt += 1
913 913 self.ui.debug("request %d: %s\n" %
914 914 (reqcnt, " ".join(map(short, r))))
915 915 for p in range(0, len(r), 10):
916 916 for b in remote.branches(r[p:p+10]):
917 917 self.ui.debug("received %s:%s\n" %
918 918 (short(b[0]), short(b[1])))
919 919 if b[0] not in m and b[0] not in seen:
920 920 unknown.append(b)
921 921
922 922 while search:
923 923 n = search.pop(0)
924 924 reqcnt += 1
925 925 l = remote.between([(n[0], n[1])])[0]
926 926 l.append(n[1])
927 927 p = n[0]
928 928 f = 1
929 929 for i in l:
930 930 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
931 931 if i in m:
932 932 if f <= 2:
933 933 self.ui.debug("found new branch changeset %s\n" %
934 934 short(p))
935 935 fetch.append(p)
936 936 else:
937 937 self.ui.debug("narrowed branch search to %s:%s\n"
938 938 % (short(p), short(i)))
939 939 search.append((p, i))
940 940 break
941 941 p, f = i, f * 2
942 942
943 943 for f in fetch:
944 944 if f in m:
945 945 raise RepoError("already have changeset " + short(f[:4]))
946 946
947 947 if fetch == [nullid]:
948 948 self.ui.warn("warning: pulling from an unrelated repository!\n")
949 949
950 950 self.ui.note("adding new changesets starting at " +
951 951 " ".join([short(f) for f in fetch]) + "\n")
952 952
953 953 self.ui.debug("%d total queries\n" % reqcnt)
954 954
955 955 return fetch
956 956
957 957 def changegroup(self, basenodes):
958 958 nodes = self.newer(basenodes)
959 959
960 960 # construct the link map
961 961 linkmap = {}
962 962 for n in nodes:
963 963 linkmap[self.changelog.rev(n)] = n
964 964
965 965 # construct a list of all changed files
966 966 changed = {}
967 967 for n in nodes:
968 968 c = self.changelog.read(n)
969 969 for f in c[3]:
970 970 changed[f] = 1
971 971 changed = changed.keys()
972 972 changed.sort()
973 973
974 974 # the changegroup is changesets + manifests + all file revs
975 975 revs = [ self.changelog.rev(n) for n in nodes ]
976 976
977 977 for y in self.changelog.group(linkmap): yield y
978 978 for y in self.manifest.group(linkmap): yield y
979 979 for f in changed:
980 980 yield struct.pack(">l", len(f) + 4) + f
981 981 g = self.file(f).group(linkmap)
982 982 for y in g:
983 983 yield y
984 984
985 985 def addchangegroup(self, generator):
986 986
987 987 class genread:
988 988 def __init__(self, generator):
989 989 self.g = generator
990 990 self.buf = ""
991 991 def read(self, l):
992 992 while l > len(self.buf):
993 993 try:
994 994 self.buf += self.g.next()
995 995 except StopIteration:
996 996 break
997 997 d, self.buf = self.buf[:l], self.buf[l:]
998 998 return d
999 999
1000 1000 def getchunk():
1001 1001 d = source.read(4)
1002 1002 if not d: return ""
1003 1003 l = struct.unpack(">l", d)[0]
1004 1004 if l <= 4: return ""
1005 1005 return source.read(l - 4)
1006 1006
1007 1007 def getgroup():
1008 1008 while 1:
1009 1009 c = getchunk()
1010 1010 if not c: break
1011 1011 yield c
1012 1012
1013 1013 def csmap(x):
1014 1014 self.ui.debug("add changeset %s\n" % short(x))
1015 1015 return self.changelog.count()
1016 1016
1017 1017 def revmap(x):
1018 1018 return self.changelog.rev(x)
1019 1019
1020 1020 if not generator: return
1021 1021 changesets = files = revisions = 0
1022 1022
1023 1023 source = genread(generator)
1024 1024 lock = self.lock()
1025 1025 tr = self.transaction()
1026 1026
1027 1027 # pull off the changeset group
1028 1028 self.ui.status("adding changesets\n")
1029 1029 co = self.changelog.tip()
1030 1030 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1031 1031 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1032 1032
1033 1033 # pull off the manifest group
1034 1034 self.ui.status("adding manifests\n")
1035 1035 mm = self.manifest.tip()
1036 1036 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1037 1037
1038 1038 # process the files
1039 1039 self.ui.status("adding file revisions\n")
1040 1040 while 1:
1041 1041 f = getchunk()
1042 1042 if not f: break
1043 1043 self.ui.debug("adding %s revisions\n" % f)
1044 1044 fl = self.file(f)
1045 1045 o = fl.count()
1046 1046 n = fl.addgroup(getgroup(), revmap, tr)
1047 1047 revisions += fl.count() - o
1048 1048 files += 1
1049 1049
1050 1050 self.ui.status(("modified %d files, added %d changesets" +
1051 1051 " and %d new revisions\n")
1052 1052 % (files, changesets, revisions))
1053 1053
1054 1054 tr.close()
1055 1055 return
1056 1056
1057 1057 def update(self, node, allow=False, force=False):
1058 1058 pl = self.dirstate.parents()
1059 1059 if not force and pl[1] != nullid:
1060 1060 self.ui.warn("aborting: outstanding uncommitted merges\n")
1061 1061 return
1062 1062
1063 1063 p1, p2 = pl[0], node
1064 1064 pa = self.changelog.ancestor(p1, p2)
1065 1065 m1n = self.changelog.read(p1)[0]
1066 1066 m2n = self.changelog.read(p2)[0]
1067 1067 man = self.manifest.ancestor(m1n, m2n)
1068 1068 m1 = self.manifest.read(m1n)
1069 1069 mf1 = self.manifest.readflags(m1n)
1070 1070 m2 = self.manifest.read(m2n)
1071 1071 mf2 = self.manifest.readflags(m2n)
1072 1072 ma = self.manifest.read(man)
1073 1073 mfa = self.manifest.readflags(man)
1074 1074
1075 1075 (c, a, d, u) = self.changes(None, None)
1076 1076
1077 1077 # is this a jump, or a merge? i.e. is there a linear path
1078 1078 # from p1 to p2?
1079 1079 linear_path = (pa == p1 or pa == p2)
1080 1080
1081 1081 # resolve the manifest to determine which files
1082 1082 # we care about merging
1083 1083 self.ui.note("resolving manifests\n")
1084 1084 self.ui.debug(" ancestor %s local %s remote %s\n" %
1085 1085 (short(man), short(m1n), short(m2n)))
1086 1086
1087 1087 merge = {}
1088 1088 get = {}
1089 1089 remove = []
1090 1090 mark = {}
1091 1091
1092 1092 # construct a working dir manifest
1093 1093 mw = m1.copy()
1094 1094 mfw = mf1.copy()
1095 1095 for f in a + c + u:
1096 1096 mw[f] = ""
1097 1097 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1098 1098 for f in d:
1099 1099 if f in mw: del mw[f]
1100 1100
1101 1101 # If we're jumping between revisions (as opposed to merging),
1102 1102 # and if neither the working directory nor the target rev has
1103 1103 # the file, then we need to remove it from the dirstate, to
1104 1104 # prevent the dirstate from listing the file when it is no
1105 1105 # longer in the manifest.
1106 1106 if linear_path and f not in m2:
1107 1107 self.dirstate.forget((f,))
1108 1108
1109 1109 for f, n in mw.iteritems():
1110 1110 if f in m2:
1111 1111 s = 0
1112 1112
1113 1113 # is the wfile new since m1, and match m2?
1114 1114 if f not in m1:
1115 1115 t1 = self.wfile(f).read()
1116 1116 t2 = self.file(f).revision(m2[f])
1117 1117 if cmp(t1, t2) == 0:
1118 1118 mark[f] = 1
1119 1119 n = m2[f]
1120 1120 del t1, t2
1121 1121
1122 1122 # are files different?
1123 1123 if n != m2[f]:
1124 1124 a = ma.get(f, nullid)
1125 1125 # are both different from the ancestor?
1126 1126 if n != a and m2[f] != a:
1127 1127 self.ui.debug(" %s versions differ, resolve\n" % f)
1128 1128 # merge executable bits
1129 1129 # "if we changed or they changed, change in merge"
1130 1130 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1131 1131 mode = ((a^b) | (a^c)) ^ a
1132 1132 merge[f] = (m1.get(f, nullid), m2[f], mode)
1133 1133 s = 1
1134 1134 # are we clobbering?
1135 1135 # is remote's version newer?
1136 1136 # or are we going back in time?
1137 1137 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1138 1138 self.ui.debug(" remote %s is newer, get\n" % f)
1139 1139 get[f] = m2[f]
1140 1140 s = 1
1141 1141 else:
1142 1142 mark[f] = 1
1143 1143
1144 1144 if not s and mfw[f] != mf2[f]:
1145 1145 if force:
1146 1146 self.ui.debug(" updating permissions for %s\n" % f)
1147 1147 util.set_exec(self.wjoin(f), mf2[f])
1148 1148 else:
1149 1149 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1150 1150 mode = ((a^b) | (a^c)) ^ a
1151 1151 if mode != b:
1152 1152 self.ui.debug(" updating permissions for %s\n" % f)
1153 1153 util.set_exec(self.wjoin(f), mode)
1154 1154 mark[f] = 1
1155 1155 del m2[f]
1156 1156 elif f in ma:
1157 1157 if not force and n != ma[f]:
1158 1158 r = ""
1159 1159 if linear_path or allow:
1160 1160 r = self.ui.prompt(
1161 1161 (" local changed %s which remote deleted\n" % f) +
1162 1162 "(k)eep or (d)elete?", "[kd]", "k")
1163 1163 if r == "d":
1164 1164 remove.append(f)
1165 1165 else:
1166 1166 self.ui.debug("other deleted %s\n" % f)
1167 1167 remove.append(f) # other deleted it
1168 1168 else:
1169 1169 if n == m1.get(f, nullid): # same as parent
1170 1170 if p2 == pa: # going backwards?
1171 1171 self.ui.debug("remote deleted %s\n" % f)
1172 1172 remove.append(f)
1173 1173 else:
1174 1174 self.ui.debug("local created %s, keeping\n" % f)
1175 1175 else:
1176 1176 self.ui.debug("working dir created %s, keeping\n" % f)
1177 1177
1178 1178 for f, n in m2.iteritems():
1179 1179 if f[0] == "/": continue
1180 1180 if not force and f in ma and n != ma[f]:
1181 1181 r = ""
1182 1182 if linear_path or allow:
1183 1183 r = self.ui.prompt(
1184 1184 ("remote changed %s which local deleted\n" % f) +
1185 1185 "(k)eep or (d)elete?", "[kd]", "k")
1186 1186 if r == "d": remove.append(f)
1187 1187 else:
1188 1188 self.ui.debug("remote created %s\n" % f)
1189 1189 get[f] = n
1190 1190
1191 1191 del mw, m1, m2, ma
1192 1192
1193 1193 if force:
1194 1194 for f in merge:
1195 1195 get[f] = merge[f][1]
1196 1196 merge = {}
1197 1197
1198 1198 if linear_path:
1199 1199 # we don't need to do any magic, just jump to the new rev
1200 1200 mode = 'n'
1201 1201 p1, p2 = p2, nullid
1202 1202 else:
1203 1203 if not allow:
1204 1204 self.ui.status("this update spans a branch" +
1205 1205 " affecting the following files:\n")
1206 1206 fl = merge.keys() + get.keys()
1207 1207 fl.sort()
1208 1208 for f in fl:
1209 1209 cf = ""
1210 1210 if f in merge: cf = " (resolve)"
1211 1211 self.ui.status(" %s%s\n" % (f, cf))
1212 1212 self.ui.warn("aborting update spanning branches!\n")
1213 1213 self.ui.status("(use update -m to perform a branch merge)\n")
1214 1214 return 1
1215 1215 # we have to remember what files we needed to get/change
1216 1216 # because any file that's different from either one of its
1217 1217 # parents must be in the changeset
1218 1218 mode = 'm'
1219 1219 self.dirstate.update(mark.keys(), "m")
1220 1220
1221 1221 self.dirstate.setparents(p1, p2)
1222 1222
1223 1223 # get the files we don't need to change
1224 1224 files = get.keys()
1225 1225 files.sort()
1226 1226 for f in files:
1227 1227 if f[0] == "/": continue
1228 1228 self.ui.note("getting %s\n" % f)
1229 1229 t = self.file(f).read(get[f])
1230 1230 try:
1231 1231 self.wfile(f, "w").write(t)
1232 1232 except IOError:
1233 1233 os.makedirs(os.path.dirname(self.wjoin(f)))
1234 1234 self.wfile(f, "w").write(t)
1235 1235 util.set_exec(self.wjoin(f), mf2[f])
1236 1236 self.dirstate.update([f], mode)
1237 1237
1238 1238 # merge the tricky bits
1239 1239 files = merge.keys()
1240 1240 files.sort()
1241 1241 for f in files:
1242 1242 self.ui.status("merging %s\n" % f)
1243 1243 m, o, flag = merge[f]
1244 1244 self.merge3(f, m, o)
1245 1245 util.set_exec(self.wjoin(f), flag)
1246 1246 self.dirstate.update([f], 'm')
1247 1247
1248 1248 for f in remove:
1249 1249 self.ui.note("removing %s\n" % f)
1250 1250 os.unlink(f)
1251 1251 if mode == 'n':
1252 1252 self.dirstate.forget(remove)
1253 1253 else:
1254 1254 self.dirstate.update(remove, 'r')
1255 1255
1256 1256 def merge3(self, fn, my, other):
1257 1257 """perform a 3-way merge in the working directory"""
1258 1258
1259 1259 def temp(prefix, node):
1260 1260 pre = "%s~%s." % (os.path.basename(fn), prefix)
1261 1261 (fd, name) = tempfile.mkstemp("", pre)
1262 1262 f = os.fdopen(fd, "wb")
1263 1263 f.write(fl.revision(node))
1264 1264 f.close()
1265 1265 return name
1266 1266
1267 1267 fl = self.file(fn)
1268 1268 base = fl.ancestor(my, other)
1269 1269 a = self.wjoin(fn)
1270 1270 b = temp("base", base)
1271 1271 c = temp("other", other)
1272 1272
1273 1273 self.ui.note("resolving %s\n" % fn)
1274 1274 self.ui.debug("file %s: other %s ancestor %s\n" %
1275 1275 (fn, short(other), short(base)))
1276 1276
1277 1277 cmd = os.environ.get("HGMERGE", "hgmerge")
1278 1278 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1279 1279 if r:
1280 1280 self.ui.warn("merging %s failed!\n" % fn)
1281 1281
1282 1282 os.unlink(b)
1283 1283 os.unlink(c)
1284 1284
1285 1285 def verify(self):
1286 1286 filelinkrevs = {}
1287 1287 filenodes = {}
1288 1288 changesets = revisions = files = 0
1289 1289 errors = 0
1290 1290
1291 1291 seen = {}
1292 1292 self.ui.status("checking changesets\n")
1293 1293 for i in range(self.changelog.count()):
1294 1294 changesets += 1
1295 1295 n = self.changelog.node(i)
1296 1296 if n in seen:
1297 1297 self.ui.warn("duplicate changeset at revision %d\n" % i)
1298 1298 errors += 1
1299 1299 seen[n] = 1
1300 1300
1301 1301 for p in self.changelog.parents(n):
1302 1302 if p not in self.changelog.nodemap:
1303 1303 self.ui.warn("changeset %s has unknown parent %s\n" %
1304 1304 (short(n), short(p)))
1305 1305 errors += 1
1306 1306 try:
1307 1307 changes = self.changelog.read(n)
1308 1308 except Exception, inst:
1309 1309 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1310 1310 errors += 1
1311 1311
1312 1312 for f in changes[3]:
1313 1313 filelinkrevs.setdefault(f, []).append(i)
1314 1314
1315 1315 seen = {}
1316 1316 self.ui.status("checking manifests\n")
1317 1317 for i in range(self.manifest.count()):
1318 1318 n = self.manifest.node(i)
1319 1319 if n in seen:
1320 1320 self.ui.warn("duplicate manifest at revision %d\n" % i)
1321 1321 errors += 1
1322 1322 seen[n] = 1
1323 1323
1324 1324 for p in self.manifest.parents(n):
1325 1325 if p not in self.manifest.nodemap:
1326 1326 self.ui.warn("manifest %s has unknown parent %s\n" %
1327 1327 (short(n), short(p)))
1328 1328 errors += 1
1329 1329
1330 1330 try:
1331 1331 delta = mdiff.patchtext(self.manifest.delta(n))
1332 1332 except KeyboardInterrupt:
1333 1333 print "aborted"
1334 1334 sys.exit(0)
1335 1335 except Exception, inst:
1336 1336 self.ui.warn("unpacking manifest %s: %s\n"
1337 1337 % (short(n), inst))
1338 1338 errors += 1
1339 1339
1340 1340 ff = [ l.split('\0') for l in delta.splitlines() ]
1341 1341 for f, fn in ff:
1342 1342 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1343 1343
1344 1344 self.ui.status("crosschecking files in changesets and manifests\n")
1345 1345 for f in filenodes:
1346 1346 if f not in filelinkrevs:
1347 1347 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1348 1348 errors += 1
1349 1349
1350 1350 for f in filelinkrevs:
1351 1351 if f not in filenodes:
1352 1352 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1353 1353 errors += 1
1354 1354
1355 1355 self.ui.status("checking files\n")
1356 1356 ff = filenodes.keys()
1357 1357 ff.sort()
1358 1358 for f in ff:
1359 1359 if f == "/dev/null": continue
1360 1360 files += 1
1361 1361 fl = self.file(f)
1362 1362 nodes = { nullid: 1 }
1363 1363 seen = {}
1364 1364 for i in range(fl.count()):
1365 1365 revisions += 1
1366 1366 n = fl.node(i)
1367 1367
1368 1368 if n in seen:
1369 1369 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1370 1370 errors += 1
1371 1371
1372 1372 if n not in filenodes[f]:
1373 1373 self.ui.warn("%s: %d:%s not in manifests\n"
1374 1374 % (f, i, short(n)))
1375 1375 print len(filenodes[f].keys()), fl.count(), f
1376 1376 errors += 1
1377 1377 else:
1378 1378 del filenodes[f][n]
1379 1379
1380 1380 flr = fl.linkrev(n)
1381 1381 if flr not in filelinkrevs[f]:
1382 1382 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1383 1383 % (f, short(n), fl.linkrev(n)))
1384 1384 errors += 1
1385 1385 else:
1386 1386 filelinkrevs[f].remove(flr)
1387 1387
1388 1388 # verify contents
1389 1389 try:
1390 1390 t = fl.read(n)
1391 1391 except Exception, inst:
1392 1392 self.ui.warn("unpacking file %s %s: %s\n"
1393 1393 % (f, short(n), inst))
1394 1394 errors += 1
1395 1395
1396 1396 # verify parents
1397 1397 (p1, p2) = fl.parents(n)
1398 1398 if p1 not in nodes:
1399 1399 self.ui.warn("file %s:%s unknown parent 1 %s" %
1400 1400 (f, short(n), short(p1)))
1401 1401 errors += 1
1402 1402 if p2 not in nodes:
1403 1403 self.ui.warn("file %s:%s unknown parent 2 %s" %
1404 1404 (f, short(n), short(p1)))
1405 1405 errors += 1
1406 1406 nodes[n] = 1
1407 1407
1408 1408 # cross-check
1409 1409 for node in filenodes[f]:
1410 1410 self.ui.warn("node %s in manifests not in %s\n"
1411 1411 % (hex(n), f))
1412 1412 errors += 1
1413 1413
1414 1414 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1415 1415 (files, changesets, revisions))
1416 1416
1417 1417 if errors:
1418 1418 self.ui.warn("%d integrity errors encountered!\n" % errors)
1419 1419 return 1
1420 1420
1421 1421 class remoterepository:
1422 1422 def __init__(self, ui, path):
1423 1423 self.url = path
1424 1424 self.ui = ui
1425 1425 no_list = [ "localhost", "127.0.0.1" ]
1426 1426 host = ui.config("http_proxy", "host")
1427 1427 if host is None:
1428 1428 host = os.environ.get("http_proxy")
1429 1429 if host and host.startswith('http://'):
1430 1430 host = host[7:]
1431 1431 user = ui.config("http_proxy", "user")
1432 1432 passwd = ui.config("http_proxy", "passwd")
1433 1433 no = ui.config("http_proxy", "no")
1434 1434 if no is None:
1435 1435 no = os.environ.get("no_proxy")
1436 1436 if no:
1437 1437 no_list = no_list + no.split(",")
1438 1438
1439 1439 no_proxy = 0
1440 1440 for h in no_list:
1441 1441 if (path.startswith("http://" + h + "/") or
1442 1442 path.startswith("http://" + h + ":") or
1443 1443 path == "http://" + h):
1444 1444 no_proxy = 1
1445 1445
1446 1446 # Note: urllib2 takes proxy values from the environment and those will
1447 1447 # take precedence
1448 1448 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1449 1449 if os.environ.has_key(env):
1450 1450 del os.environ[env]
1451 1451
1452 1452 proxy_handler = urllib2.BaseHandler()
1453 1453 if host and not no_proxy:
1454 1454 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1455 1455
1456 1456 authinfo = None
1457 1457 if user and passwd:
1458 1458 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1459 1459 passmgr.add_password(None, host, user, passwd)
1460 1460 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1461 1461
1462 1462 opener = urllib2.build_opener(proxy_handler, authinfo)
1463 1463 urllib2.install_opener(opener)
1464 1464
1465 1465 def do_cmd(self, cmd, **args):
1466 1466 self.ui.debug("sending %s command\n" % cmd)
1467 1467 q = {"cmd": cmd}
1468 1468 q.update(args)
1469 1469 qs = urllib.urlencode(q)
1470 1470 cu = "%s?%s" % (self.url, qs)
1471 1471 return urllib2.urlopen(cu)
1472 1472
1473 1473 def heads(self):
1474 1474 d = self.do_cmd("heads").read()
1475 1475 try:
1476 1476 return map(bin, d[:-1].split(" "))
1477 1477 except:
1478 1478 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1479 1479 raise
1480 1480
1481 1481 def branches(self, nodes):
1482 1482 n = " ".join(map(hex, nodes))
1483 1483 d = self.do_cmd("branches", nodes=n).read()
1484 1484 try:
1485 1485 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1486 1486 return br
1487 1487 except:
1488 1488 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1489 1489 raise
1490 1490
1491 1491 def between(self, pairs):
1492 1492 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1493 1493 d = self.do_cmd("between", pairs=n).read()
1494 1494 try:
1495 1495 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1496 1496 return p
1497 1497 except:
1498 1498 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1499 1499 raise
1500 1500
1501 1501 def changegroup(self, nodes):
1502 1502 n = " ".join(map(hex, nodes))
1503 1503 zd = zlib.decompressobj()
1504 1504 f = self.do_cmd("changegroup", roots=n)
1505 1505 bytes = 0
1506 1506 while 1:
1507 1507 d = f.read(4096)
1508 1508 bytes += len(d)
1509 1509 if not d:
1510 1510 yield zd.flush()
1511 1511 break
1512 1512 yield zd.decompress(d)
1513 1513 self.ui.note("%d bytes of data transfered\n" % bytes)
1514 1514
1515 1515 def repository(ui, path=None, create=0):
1516 1516 if path and path[:7] == "http://":
1517 1517 return remoterepository(ui, path)
1518 1518 if path and path[:5] == "hg://":
1519 1519 return remoterepository(ui, path.replace("hg://", "http://"))
1520 1520 if path and path[:11] == "old-http://":
1521 1521 return localrepository(ui, path.replace("old-http://", "http://"))
1522 1522 else:
1523 1523 return localrepository(ui, path, create)
1524 1524
@@ -1,787 +1,787
1 1 # hgweb.py - web interface to a mercurial repository
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os, cgi, time, re, difflib, sys, zlib
10 10 from mercurial.hg import *
11 11 from mercurial.ui import *
12 12
13 13 def templatepath():
14 14 for f in "templates", "../templates":
15 15 p = os.path.join(os.path.dirname(__file__), f)
16 16 if os.path.isdir(p): return p
17 17
18 18 def age(t):
19 19 def plural(t, c):
20 20 if c == 1: return t
21 21 return t + "s"
22 22 def fmt(t, c):
23 23 return "%d %s" % (c, plural(t, c))
24 24
25 25 now = time.time()
26 26 delta = max(1, int(now - t))
27 27
28 28 scales = [["second", 1],
29 29 ["minute", 60],
30 30 ["hour", 3600],
31 31 ["day", 3600 * 24],
32 32 ["week", 3600 * 24 * 7],
33 33 ["month", 3600 * 24 * 30],
34 34 ["year", 3600 * 24 * 365]]
35 35
36 36 scales.reverse()
37 37
38 38 for t, s in scales:
39 39 n = delta / s
40 40 if n >= 2 or s == 1: return fmt(t, n)
41 41
42 42 def nl2br(text):
43 43 return text.replace('\n', '<br/>\n')
44 44
45 45 def obfuscate(text):
46 46 return ''.join([ '&#%d;' % ord(c) for c in text ])
47 47
48 48 def up(p):
49 49 if p[0] != "/": p = "/" + p
50 50 if p[-1] == "/": p = p[:-1]
51 51 up = os.path.dirname(p)
52 52 if up == "/":
53 53 return "/"
54 54 return up + "/"
55 55
56 56 def httphdr(type):
57 57 print 'Content-type: %s\n' % type
58 58
59 59 def write(*things):
60 60 for thing in things:
61 61 if hasattr(thing, "__iter__"):
62 62 for part in thing:
63 63 write(part)
64 64 else:
65 65 sys.stdout.write(str(thing))
66 66
67 67 def template(tmpl, filters = {}, **map):
68 68 while tmpl:
69 69 m = re.search(r"#([a-zA-Z0-9]+)((\|[a-zA-Z0-9]+)*)#", tmpl)
70 70 if m:
71 71 yield tmpl[:m.start(0)]
72 72 v = map.get(m.group(1), "")
73 73 v = callable(v) and v() or v
74 74
75 75 fl = m.group(2)
76 76 if fl:
77 77 for f in fl.split("|")[1:]:
78 78 v = filters[f](v)
79 79
80 80 yield v
81 81 tmpl = tmpl[m.end(0):]
82 82 else:
83 83 yield tmpl
84 84 return
85 85
86 86 class templater:
87 87 def __init__(self, mapfile, filters = {}):
88 88 self.cache = {}
89 89 self.map = {}
90 90 self.base = os.path.dirname(mapfile)
91 91 self.filters = filters
92 92
93 93 for l in file(mapfile):
94 94 m = re.match(r'(\S+)\s*=\s*"(.*)"$', l)
95 95 if m:
96 96 self.cache[m.group(1)] = m.group(2)
97 97 else:
98 98 m = re.match(r'(\S+)\s*=\s*(\S+)', l)
99 99 if m:
100 100 self.map[m.group(1)] = os.path.join(self.base, m.group(2))
101 101 else:
102 102 raise "unknown map entry '%s'" % l
103 103
104 104 def __call__(self, t, **map):
105 105 try:
106 106 tmpl = self.cache[t]
107 107 except KeyError:
108 108 tmpl = self.cache[t] = file(self.map[t]).read()
109 109 return template(tmpl, self.filters, **map)
110 110
111 111 class hgweb:
112 112 maxchanges = 10
113 113 maxfiles = 10
114 114
115 115 def __init__(self, path, name, templates = ""):
116 116 self.templates = templates or templatepath()
117 117 self.reponame = name
118 118 self.path = path
119 119 self.mtime = -1
120 120 self.viewonly = 0
121 121
122 122 self.filters = {
123 123 "escape": cgi.escape,
124 124 "age": age,
125 125 "date": (lambda x: time.asctime(time.gmtime(x))),
126 126 "addbreaks": nl2br,
127 127 "obfuscate": obfuscate,
128 128 "short": (lambda x: x[:12]),
129 129 "firstline": (lambda x: x.splitlines(1)[0]),
130 130 "permissions": (lambda x: x and "-rwxr-xr-x" or "-rw-r--r--")
131 131 }
132 132
133 133 def refresh(self):
134 134 s = os.stat(os.path.join(self.path, ".hg", "00changelog.i"))
135 135 if s.st_mtime != self.mtime:
136 136 self.mtime = s.st_mtime
137 137 self.repo = repository(ui(), self.path)
138 138
139 139 def date(self, cs):
140 140 return time.asctime(time.gmtime(float(cs[2].split(' ')[0])))
141 141
142 142 def listfiles(self, files, mf):
143 143 for f in files[:self.maxfiles]:
144 144 yield self.t("filenodelink", node = hex(mf[f]), file = f)
145 145 if len(files) > self.maxfiles:
146 146 yield self.t("fileellipses")
147 147
148 148 def listfilediffs(self, files, changeset):
149 149 for f in files[:self.maxfiles]:
150 150 yield self.t("filedifflink", node = hex(changeset), file = f)
151 151 if len(files) > self.maxfiles:
152 152 yield self.t("fileellipses")
153 153
154 154 def parents(self, t1, nodes=[], rev=None,**args):
155 155 if not rev: rev = lambda x: ""
156 156 for node in nodes:
157 157 if node != nullid:
158 158 yield self.t(t1, node = hex(node), rev = rev(node), **args)
159 159
160 160 def showtag(self, t1, node=nullid, **args):
161 161 for t in self.repo.nodetags(node):
162 162 yield self.t(t1, tag = t, **args)
163 163
164 164 def diff(self, node1, node2, files):
165 165 def filterfiles(list, files):
166 166 l = [ x for x in list if x in files ]
167 167
168 168 for f in files:
169 169 if f[-1] != os.sep: f += os.sep
170 170 l += [ x for x in list if x.startswith(f) ]
171 171 return l
172 172
173 173 parity = [0]
174 174 def diffblock(diff, f, fn):
175 175 yield self.t("diffblock",
176 176 lines = prettyprintlines(diff),
177 177 parity = parity[0],
178 178 file = f,
179 179 filenode = hex(fn or nullid))
180 180 parity[0] = 1 - parity[0]
181 181
182 182 def prettyprintlines(diff):
183 183 for l in diff.splitlines(1):
184 184 if l.startswith('+'):
185 185 yield self.t("difflineplus", line = l)
186 186 elif l.startswith('-'):
187 187 yield self.t("difflineminus", line = l)
188 188 elif l.startswith('@'):
189 189 yield self.t("difflineat", line = l)
190 190 else:
191 191 yield self.t("diffline", line = l)
192 192
193 193 r = self.repo
194 194 cl = r.changelog
195 195 mf = r.manifest
196 196 change1 = cl.read(node1)
197 197 change2 = cl.read(node2)
198 198 mmap1 = mf.read(change1[0])
199 199 mmap2 = mf.read(change2[0])
200 200 date1 = self.date(change1)
201 201 date2 = self.date(change2)
202 202
203 203 c, a, d, u = r.changes(node1, node2)
204 204 c, a, d = map(lambda x: filterfiles(x, files), (c, a, d))
205 205
206 206 for f in c:
207 207 to = r.file(f).read(mmap1[f])
208 208 tn = r.file(f).read(mmap2[f])
209 209 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
210 210 for f in a:
211 211 to = None
212 212 tn = r.file(f).read(mmap2[f])
213 213 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
214 214 for f in d:
215 215 to = r.file(f).read(mmap1[f])
216 216 tn = None
217 217 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
218 218
219 219 def header(self):
220 220 yield self.t("header", repo = self.reponame)
221 221
222 222 def footer(self):
223 223 yield self.t("footer", repo = self.reponame)
224 224
225 225 def changelog(self, pos):
226 226 def changenav():
227 227 def seq(factor = 1):
228 228 yield 1 * factor
229 229 yield 3 * factor
230 230 #yield 5 * factor
231 231 for f in seq(factor * 10):
232 232 yield f
233 233
234 234 l = []
235 235 for f in seq():
236 236 if f < self.maxchanges / 2: continue
237 237 if f > count: break
238 238 r = "%d" % f
239 239 if pos + f < count: l.append(("+" + r, pos + f))
240 240 if pos - f >= 0: l.insert(0, ("-" + r, pos - f))
241 241
242 242 yield self.t("naventry", rev = 0, label="(0)")
243 243
244 244 for label, rev in l:
245 245 yield self.t("naventry", label = label, rev = rev)
246 246
247 247 yield self.t("naventry", label="tip")
248 248
249 249 def changelist():
250 250 parity = (start - end) & 1
251 251 cl = self.repo.changelog
252 252 l = [] # build a list in forward order for efficiency
253 253 for i in range(start, end):
254 254 n = cl.node(i)
255 255 changes = cl.read(n)
256 256 hn = hex(n)
257 257 p1, p2 = cl.parents(n)
258 258 t = float(changes[2].split(' ')[0])
259 259
260 260 l.insert(0, self.t(
261 261 'changelogentry',
262 262 parity = parity,
263 263 author = changes[1],
264 264 parent = self.parents("changelogparent",
265 265 cl.parents(n), cl.rev),
266 266 changelogtag = self.showtag("changelogtag",n),
267 267 p1 = hex(p1), p2 = hex(p2),
268 268 p1rev = cl.rev(p1), p2rev = cl.rev(p2),
269 269 manifest = hex(changes[0]),
270 270 desc = changes[4],
271 271 date = t,
272 272 files = self.listfilediffs(changes[3], n),
273 273 rev = i,
274 274 node = hn))
275 275 parity = 1 - parity
276 276
277 277 yield l
278 278
279 279 cl = self.repo.changelog
280 280 mf = cl.read(cl.tip())[0]
281 281 count = cl.count()
282 282 start = max(0, pos - self.maxchanges + 1)
283 283 end = min(count, start + self.maxchanges)
284 284 pos = end - 1
285 285
286 286 yield self.t('changelog',
287 287 header = self.header(),
288 288 footer = self.footer(),
289 289 repo = self.reponame,
290 290 changenav = changenav,
291 291 manifest = hex(mf),
292 292 rev = pos, changesets = count, entries = changelist)
293 293
294 294 def search(self, query):
295 295
296 296 def changelist():
297 297 cl = self.repo.changelog
298 298 count = 0
299 299 qw = query.lower().split()
300 300
301 301 def revgen():
302 302 for i in range(cl.count() - 1, 0, -100):
303 303 l = []
304 304 for j in range(max(0, i - 100), i):
305 305 n = cl.node(j)
306 306 changes = cl.read(n)
307 307 l.insert(0, (n, j, changes))
308 308 for e in l:
309 309 yield e
310 310
311 311 for n, i, changes in revgen():
312 312 miss = 0
313 313 for q in qw:
314 314 if not (q in changes[1].lower() or
315 315 q in changes[4].lower() or
316 316 q in " ".join(changes[3][:20]).lower()):
317 317 miss = 1
318 318 break
319 319 if miss: continue
320 320
321 321 count += 1
322 322 hn = hex(n)
323 323 p1, p2 = cl.parents(n)
324 324 t = float(changes[2].split(' ')[0])
325 325
326 326 yield self.t(
327 327 'searchentry',
328 328 parity = count & 1,
329 329 author = changes[1],
330 330 parent = self.parents("changelogparent",
331 331 cl.parents(n), cl.rev),
332 332 changelogtag = self.showtag("changelogtag",n),
333 333 p1 = hex(p1), p2 = hex(p2),
334 334 p1rev = cl.rev(p1), p2rev = cl.rev(p2),
335 335 manifest = hex(changes[0]),
336 336 desc = changes[4],
337 337 date = t,
338 338 files = self.listfilediffs(changes[3], n),
339 339 rev = i,
340 340 node = hn)
341 341
342 342 if count >= self.maxchanges: break
343 343
344 344 cl = self.repo.changelog
345 345 mf = cl.read(cl.tip())[0]
346 346
347 347 yield self.t('search',
348 348 header = self.header(),
349 349 footer = self.footer(),
350 350 query = query,
351 351 repo = self.reponame,
352 352 manifest = hex(mf),
353 353 entries = changelist)
354 354
355 355 def changeset(self, nodeid):
356 356 n = bin(nodeid)
357 357 cl = self.repo.changelog
358 358 changes = cl.read(n)
359 359 p1, p2 = cl.parents(n)
360 360 p1rev, p2rev = cl.rev(p1), cl.rev(p2)
361 361 t = float(changes[2].split(' ')[0])
362 362
363 363 files = []
364 364 mf = self.repo.manifest.read(changes[0])
365 365 for f in changes[3]:
366 366 files.append(self.t("filenodelink",
367 367 filenode = hex(mf.get(f, nullid)), file = f))
368 368
369 369 def diff():
370 370 yield self.diff(p1, n, changes[3])
371 371
372 372 yield self.t('changeset',
373 373 header = self.header(),
374 374 footer = self.footer(),
375 375 repo = self.reponame,
376 376 diff = diff,
377 377 rev = cl.rev(n),
378 378 node = nodeid,
379 379 parent = self.parents("changesetparent",
380 380 cl.parents(n), cl.rev),
381 381 changesettag = self.showtag("changesettag",n),
382 382 p1 = hex(p1), p2 = hex(p2),
383 383 p1rev = cl.rev(p1), p2rev = cl.rev(p2),
384 384 manifest = hex(changes[0]),
385 385 author = changes[1],
386 386 desc = changes[4],
387 387 date = t,
388 388 files = files)
389 389
390 390 def filelog(self, f, filenode):
391 391 cl = self.repo.changelog
392 392 fl = self.repo.file(f)
393 393 count = fl.count()
394 394
395 395 def entries():
396 396 l = []
397 397 parity = (count - 1) & 1
398 398
399 399 for i in range(count):
400 400
401 401 n = fl.node(i)
402 402 lr = fl.linkrev(n)
403 403 cn = cl.node(lr)
404 404 cs = cl.read(cl.node(lr))
405 405 p1, p2 = fl.parents(n)
406 406 t = float(cs[2].split(' ')[0])
407 407
408 408 l.insert(0, self.t("filelogentry",
409 409 parity = parity,
410 410 filenode = hex(n),
411 411 filerev = i,
412 412 file = f,
413 413 node = hex(cn),
414 414 author = cs[1],
415 415 date = t,
416 416 desc = cs[4],
417 417 p1 = hex(p1), p2 = hex(p2),
418 418 p1rev = fl.rev(p1), p2rev = fl.rev(p2)))
419 419 parity = 1 - parity
420 420
421 421 yield l
422 422
423 423 yield self.t("filelog",
424 424 header = self.header(),
425 425 footer = self.footer(),
426 426 repo = self.reponame,
427 427 file = f,
428 428 filenode = filenode,
429 429 entries = entries)
430 430
431 431 def filerevision(self, f, node):
432 432 fl = self.repo.file(f)
433 433 n = bin(node)
434 434 text = fl.read(n)
435 435 changerev = fl.linkrev(n)
436 436 cl = self.repo.changelog
437 437 cn = cl.node(changerev)
438 438 cs = cl.read(cn)
439 439 p1, p2 = fl.parents(n)
440 440 t = float(cs[2].split(' ')[0])
441 441 mfn = cs[0]
442 442
443 443 def lines():
444 444 for l, t in enumerate(text.splitlines(1)):
445 445 yield self.t("fileline", line = t,
446 446 linenumber = "% 6d" % (l + 1),
447 447 parity = l & 1)
448 448
449 449 yield self.t("filerevision", file = f,
450 450 header = self.header(),
451 451 footer = self.footer(),
452 452 repo = self.reponame,
453 453 filenode = node,
454 454 path = up(f),
455 455 text = lines(),
456 456 rev = changerev,
457 457 node = hex(cn),
458 458 manifest = hex(mfn),
459 459 author = cs[1],
460 460 date = t,
461 461 parent = self.parents("filerevparent",
462 462 fl.parents(n), fl.rev, file=f),
463 463 p1 = hex(p1), p2 = hex(p2),
464 464 permissions = self.repo.manifest.readflags(mfn)[f],
465 465 p1rev = fl.rev(p1), p2rev = fl.rev(p2))
466 466
467 467 def fileannotate(self, f, node):
468 468 bcache = {}
469 469 ncache = {}
470 470 fl = self.repo.file(f)
471 471 n = bin(node)
472 472 changerev = fl.linkrev(n)
473 473
474 474 cl = self.repo.changelog
475 475 cn = cl.node(changerev)
476 476 cs = cl.read(cn)
477 477 p1, p2 = fl.parents(n)
478 478 t = float(cs[2].split(' ')[0])
479 479 mfn = cs[0]
480 480
481 481 def annotate():
482 482 parity = 1
483 483 last = None
484 484 for r, l in fl.annotate(n):
485 485 try:
486 486 cnode = ncache[r]
487 487 except KeyError:
488 488 cnode = ncache[r] = self.repo.changelog.node(r)
489 489
490 490 try:
491 491 name = bcache[r]
492 492 except KeyError:
493 493 cl = self.repo.changelog.read(cnode)
494 494 name = cl[1]
495 495 f = name.find('@')
496 496 if f >= 0:
497 497 name = name[:f]
498 498 f = name.find('<')
499 499 if f >= 0:
500 500 name = name[f+1:]
501 501 bcache[r] = name
502 502
503 503 if last != cnode:
504 504 parity = 1 - parity
505 505 last = cnode
506 506
507 507 yield self.t("annotateline",
508 508 parity = parity,
509 509 node = hex(cnode),
510 510 rev = r,
511 511 author = name,
512 512 file = f,
513 513 line = l)
514 514
515 515 yield self.t("fileannotate",
516 516 header = self.header(),
517 517 footer = self.footer(),
518 518 repo = self.reponame,
519 519 file = f,
520 520 filenode = node,
521 521 annotate = annotate,
522 522 path = up(f),
523 523 rev = changerev,
524 524 node = hex(cn),
525 525 manifest = hex(mfn),
526 526 author = cs[1],
527 527 date = t,
528 528 parent = self.parents("fileannotateparent",
529 529 fl.parents(n), fl.rev, file=f),
530 530 p1 = hex(p1), p2 = hex(p2),
531 531 permissions = self.repo.manifest.readflags(mfn)[f],
532 532 p1rev = fl.rev(p1), p2rev = fl.rev(p2))
533 533
534 534 def manifest(self, mnode, path):
535 535 mf = self.repo.manifest.read(bin(mnode))
536 536 rev = self.repo.manifest.rev(bin(mnode))
537 537 node = self.repo.changelog.node(rev)
538 538 mff=self.repo.manifest.readflags(bin(mnode))
539 539
540 540 files = {}
541 541
542 542 p = path[1:]
543 543 l = len(p)
544 544
545 545 for f,n in mf.items():
546 546 if f[:l] != p:
547 547 continue
548 548 remain = f[l:]
549 549 if "/" in remain:
550 550 short = remain[:remain.find("/") + 1] # bleah
551 551 files[short] = (f, None)
552 552 else:
553 553 short = os.path.basename(remain)
554 554 files[short] = (f, n)
555 555
556 556 def filelist():
557 557 parity = 0
558 558 fl = files.keys()
559 559 fl.sort()
560 560 for f in fl:
561 561 full, fnode = files[f]
562 562 if fnode:
563 563 yield self.t("manifestfileentry",
564 564 file = full,
565 565 manifest = mnode,
566 566 filenode = hex(fnode),
567 567 parity = parity,
568 568 basename = f,
569 569 permissions = mff[full])
570 570 else:
571 571 yield self.t("manifestdirentry",
572 572 parity = parity,
573 573 path = os.path.join(path, f),
574 574 manifest = mnode, basename = f[:-1])
575 575 parity = 1 - parity
576 576
577 577 yield self.t("manifest",
578 578 header = self.header(),
579 579 footer = self.footer(),
580 580 repo = self.reponame,
581 581 manifest = mnode,
582 582 rev = rev,
583 583 node = hex(node),
584 584 path = path,
585 585 up = up(path),
586 586 entries = filelist)
587 587
588 588 def tags(self):
589 589 cl = self.repo.changelog
590 590 mf = cl.read(cl.tip())[0]
591 591
592 592 i = self.repo.tagslist()
593 593 i.reverse()
594 594
595 595 def entries():
596 596 parity = 0
597 597 for k,n in i:
598 598 yield self.t("tagentry",
599 599 parity = parity,
600 600 tag = k,
601 601 node = hex(n))
602 602 parity = 1 - parity
603 603
604 604 yield self.t("tags",
605 605 header = self.header(),
606 606 footer = self.footer(),
607 607 repo = self.reponame,
608 608 manifest = hex(mf),
609 609 entries = entries)
610 610
611 611 def filediff(self, file, changeset):
612 612 n = bin(changeset)
613 613 cl = self.repo.changelog
614 614 p1 = cl.parents(n)[0]
615 615 cs = cl.read(n)
616 616 mf = self.repo.manifest.read(cs[0])
617 617
618 618 def diff():
619 619 yield self.diff(p1, n, file)
620 620
621 621 yield self.t("filediff",
622 622 header = self.header(),
623 623 footer = self.footer(),
624 624 repo = self.reponame,
625 625 file = file,
626 626 filenode = hex(mf.get(file, nullid)),
627 627 node = changeset,
628 628 rev = self.repo.changelog.rev(n),
629 629 parent = self.parents("filediffparent",
630 630 cl.parents(n), cl.rev),
631 631 p1rev = self.repo.changelog.rev(p1),
632 632 diff = diff)
633 633
634 634 # add tags to things
635 635 # tags -> list of changesets corresponding to tags
636 636 # find tag, changeset, file
637 637
638 638 def run(self):
639 639 self.refresh()
640 640 args = cgi.parse()
641 641
642 642 m = os.path.join(self.templates, "map")
643 643 if args.has_key('style'):
644 644 b = os.path.basename("map-" + args['style'][0])
645 645 p = os.path.join(self.templates, b)
646 646 if os.path.isfile(p): m = p
647 647
648 648 self.t = templater(m, self.filters)
649 649
650 650 if not args.has_key('cmd') or args['cmd'][0] == 'changelog':
651 651 c = self.repo.changelog.count() - 1
652 652 hi = c
653 653 if args.has_key('rev'):
654 654 hi = args['rev'][0]
655 655 try:
656 656 hi = self.repo.changelog.rev(self.repo.lookup(hi))
657 657 except KeyError:
658 658 write(self.search(hi))
659 659 return
660
660
661 661 write(self.changelog(hi))
662 662
663 663 elif args['cmd'][0] == 'changeset':
664 664 write(self.changeset(args['node'][0]))
665 665
666 666 elif args['cmd'][0] == 'manifest':
667 667 write(self.manifest(args['manifest'][0], args['path'][0]))
668 668
669 669 elif args['cmd'][0] == 'tags':
670 670 write(self.tags())
671 671
672 672 elif args['cmd'][0] == 'filediff':
673 673 write(self.filediff(args['file'][0], args['node'][0]))
674 674
675 675 elif args['cmd'][0] == 'file':
676 676 write(self.filerevision(args['file'][0], args['filenode'][0]))
677 677
678 678 elif args['cmd'][0] == 'annotate':
679 679 write(self.fileannotate(args['file'][0], args['filenode'][0]))
680 680
681 681 elif args['cmd'][0] == 'filelog':
682 682 write(self.filelog(args['file'][0], args['filenode'][0]))
683 683
684 684 elif args['cmd'][0] == 'heads':
685 685 httphdr("text/plain")
686 686 h = self.repo.heads()
687 687 sys.stdout.write(" ".join(map(hex, h)) + "\n")
688 688
689 689 elif args['cmd'][0] == 'branches':
690 690 httphdr("text/plain")
691 691 nodes = []
692 692 if args.has_key('nodes'):
693 693 nodes = map(bin, args['nodes'][0].split(" "))
694 694 for b in self.repo.branches(nodes):
695 695 sys.stdout.write(" ".join(map(hex, b)) + "\n")
696 696
697 697 elif args['cmd'][0] == 'between':
698 698 httphdr("text/plain")
699 699 nodes = []
700 700 if args.has_key('pairs'):
701 701 pairs = [ map(bin, p.split("-"))
702 702 for p in args['pairs'][0].split(" ") ]
703 703 for b in self.repo.between(pairs):
704 704 sys.stdout.write(" ".join(map(hex, b)) + "\n")
705 705
706 706 elif args['cmd'][0] == 'changegroup':
707 707 httphdr("application/hg-changegroup")
708 708 nodes = []
709 709 if self.viewonly:
710 710 return
711 711
712 712 if args.has_key('roots'):
713 713 nodes = map(bin, args['roots'][0].split(" "))
714 714
715 715 z = zlib.compressobj()
716 716 for chunk in self.repo.changegroup(nodes):
717 717 sys.stdout.write(z.compress(chunk))
718 718
719 719 sys.stdout.write(z.flush())
720 720
721 721 else:
722 722 write(self.t("error"))
723 723
724 724 def server(path, name, templates, address, port):
725 725
726 726 import BaseHTTPServer
727 727 import sys, os
728 728
729 729 class hgwebhandler(BaseHTTPServer.BaseHTTPRequestHandler):
730 730 def do_POST(self):
731 731 try:
732 732 self.do_hgweb()
733 733 except socket.error, inst:
734 734 if inst.args[0] != 32: raise
735 735
736 736 def do_GET(self):
737 737 self.do_POST()
738 738
739 739 def do_hgweb(self):
740 740 query = ""
741 741 p = self.path.find("?")
742 742 if p:
743 743 query = self.path[p + 1:]
744 744 query = query.replace('+', ' ')
745 745
746 746 env = {}
747 747 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
748 748 env['REQUEST_METHOD'] = self.command
749 749 if query:
750 750 env['QUERY_STRING'] = query
751 751 host = self.address_string()
752 752 if host != self.client_address[0]:
753 753 env['REMOTE_HOST'] = host
754 754 env['REMOTE_ADDR'] = self.client_address[0]
755 755
756 756 if self.headers.typeheader is None:
757 757 env['CONTENT_TYPE'] = self.headers.type
758 758 else:
759 759 env['CONTENT_TYPE'] = self.headers.typeheader
760 760 length = self.headers.getheader('content-length')
761 761 if length:
762 762 env['CONTENT_LENGTH'] = length
763 763 accept = []
764 764 for line in self.headers.getallmatchingheaders('accept'):
765 765 if line[:1] in "\t\n\r ":
766 766 accept.append(line.strip())
767 767 else:
768 768 accept = accept + line[7:].split(',')
769 769 env['HTTP_ACCEPT'] = ','.join(accept)
770 770
771 771 os.environ.update(env)
772 772
773 773 save = sys.argv, sys.stdin, sys.stdout, sys.stderr
774 774 try:
775 775 sys.stdin = self.rfile
776 776 sys.stdout = self.wfile
777 777 sys.argv = ["hgweb.py"]
778 778 if '=' not in query:
779 779 sys.argv.append(query)
780 780 self.send_response(200, "Script output follows")
781 781 hg.run()
782 782 finally:
783 783 sys.argv, sys.stdin, sys.stdout, sys.stderr = save
784 784
785 785 hg = hgweb(path, name, templates)
786 786 httpd = BaseHTTPServer.HTTPServer((address, port), hgwebhandler)
787 787 httpd.serve_forever()
@@ -1,42 +1,42
1 1 #!/usr/bin/env python
2
3 # This is the mercurial setup script.
2 #
3 # This is the mercurial setup script.
4 4 #
5 5 # './setup.py install', or
6 6 # './setup.py --help' for more options
7 7
8 8 import glob
9 9 from distutils.core import setup, Extension
10 10 from distutils.command.install_data import install_data
11 11
12 12 import mercurial.version
13 13
14 14 # specify version string, otherwise 'hg identify' will be used:
15 15 version = ''
16 16
17 17 class install_package_data(install_data):
18 18 def finalize_options(self):
19 19 self.set_undefined_options('install',
20 20 ('install_lib', 'install_dir'))
21 21 install_data.finalize_options(self)
22 22
23 23 try:
24 24 mercurial.version.remember_version(version)
25 25 setup(name='mercurial',
26 26 version=mercurial.version.get_version(),
27 27 author='Matt Mackall',
28 28 author_email='mpm@selenic.com',
29 29 url='http://selenic.com/mercurial',
30 30 description='scalable distributed SCM',
31 31 license='GNU GPL',
32 32 packages=['mercurial'],
33 33 ext_modules=[Extension('mercurial.mpatch', ['mercurial/mpatch.c']),
34 34 Extension('mercurial.bdiff', ['mercurial/bdiff.c'])],
35 35 data_files=[('mercurial/templates',
36 36 ['templates/map'] +
37 37 glob.glob('templates/map-*') +
38 glob.glob('templates/*.tmpl'))],
38 glob.glob('templates/*.tmpl'))],
39 39 cmdclass = { 'install_data' : install_package_data },
40 40 scripts=['hg', 'hgmerge'])
41 41 finally:
42 42 mercurial.version.forget_version()
General Comments 0
You need to be logged in to leave comments. Login now