##// END OF EJS Templates
use Exception(args)-style raising consistently (py3k compatibility)
Peter Ruibal -
r7008:8fee8ff1 default
parent child Browse files
Show More
@@ -1,469 +1,469 b''
1 1 # This library is free software; you can redistribute it and/or
2 2 # modify it under the terms of the GNU Lesser General Public
3 3 # License as published by the Free Software Foundation; either
4 4 # version 2.1 of the License, or (at your option) any later version.
5 5 #
6 6 # This library is distributed in the hope that it will be useful,
7 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 9 # Lesser General Public License for more details.
10 10 #
11 11 # You should have received a copy of the GNU Lesser General Public
12 12 # License along with this library; if not, write to the
13 13 # Free Software Foundation, Inc.,
14 14 # 59 Temple Place, Suite 330,
15 15 # Boston, MA 02111-1307 USA
16 16
17 17 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
18 18 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
19 19
20 20 # $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
21 21
22 22 import os
23 23 import stat
24 24 import urllib
25 25 import urllib2
26 26 import rfc822
27 27
28 28 try:
29 29 from cStringIO import StringIO
30 30 except ImportError, msg:
31 31 from StringIO import StringIO
32 32
33 33 class RangeError(IOError):
34 34 """Error raised when an unsatisfiable range is requested."""
35 35 pass
36 36
37 37 class HTTPRangeHandler(urllib2.BaseHandler):
38 38 """Handler that enables HTTP Range headers.
39 39
40 40 This was extremely simple. The Range header is a HTTP feature to
41 41 begin with so all this class does is tell urllib2 that the
42 42 "206 Partial Content" reponse from the HTTP server is what we
43 43 expected.
44 44
45 45 Example:
46 46 import urllib2
47 47 import byterange
48 48
49 49 range_handler = range.HTTPRangeHandler()
50 50 opener = urllib2.build_opener(range_handler)
51 51
52 52 # install it
53 53 urllib2.install_opener(opener)
54 54
55 55 # create Request and set Range header
56 56 req = urllib2.Request('http://www.python.org/')
57 57 req.header['Range'] = 'bytes=30-50'
58 58 f = urllib2.urlopen(req)
59 59 """
60 60
61 61 def http_error_206(self, req, fp, code, msg, hdrs):
62 62 # 206 Partial Content Response
63 63 r = urllib.addinfourl(fp, hdrs, req.get_full_url())
64 64 r.code = code
65 65 r.msg = msg
66 66 return r
67 67
68 68 def http_error_416(self, req, fp, code, msg, hdrs):
69 69 # HTTP's Range Not Satisfiable error
70 70 raise RangeError('Requested Range Not Satisfiable')
71 71
72 72 class RangeableFileObject:
73 73 """File object wrapper to enable raw range handling.
74 74 This was implemented primarilary for handling range
75 75 specifications for file:// urls. This object effectively makes
76 76 a file object look like it consists only of a range of bytes in
77 77 the stream.
78 78
79 79 Examples:
80 80 # expose 10 bytes, starting at byte position 20, from
81 81 # /etc/aliases.
82 82 >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
83 83 # seek seeks within the range (to position 23 in this case)
84 84 >>> fo.seek(3)
85 85 # tell tells where your at _within the range_ (position 3 in
86 86 # this case)
87 87 >>> fo.tell()
88 88 # read EOFs if an attempt is made to read past the last
89 89 # byte in the range. the following will return only 7 bytes.
90 90 >>> fo.read(30)
91 91 """
92 92
93 93 def __init__(self, fo, rangetup):
94 94 """Create a RangeableFileObject.
95 95 fo -- a file like object. only the read() method need be
96 96 supported but supporting an optimized seek() is
97 97 preferable.
98 98 rangetup -- a (firstbyte,lastbyte) tuple specifying the range
99 99 to work over.
100 100 The file object provided is assumed to be at byte offset 0.
101 101 """
102 102 self.fo = fo
103 103 (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
104 104 self.realpos = 0
105 105 self._do_seek(self.firstbyte)
106 106
107 107 def __getattr__(self, name):
108 108 """This effectively allows us to wrap at the instance level.
109 109 Any attribute not found in _this_ object will be searched for
110 110 in self.fo. This includes methods."""
111 111 if hasattr(self.fo, name):
112 112 return getattr(self.fo, name)
113 raise AttributeError, name
113 raise AttributeError(name)
114 114
115 115 def tell(self):
116 116 """Return the position within the range.
117 117 This is different from fo.seek in that position 0 is the
118 118 first byte position of the range tuple. For example, if
119 119 this object was created with a range tuple of (500,899),
120 120 tell() will return 0 when at byte position 500 of the file.
121 121 """
122 122 return (self.realpos - self.firstbyte)
123 123
124 124 def seek(self, offset, whence=0):
125 125 """Seek within the byte range.
126 126 Positioning is identical to that described under tell().
127 127 """
128 128 assert whence in (0, 1, 2)
129 129 if whence == 0: # absolute seek
130 130 realoffset = self.firstbyte + offset
131 131 elif whence == 1: # relative seek
132 132 realoffset = self.realpos + offset
133 133 elif whence == 2: # absolute from end of file
134 134 # XXX: are we raising the right Error here?
135 135 raise IOError('seek from end of file not supported.')
136 136
137 137 # do not allow seek past lastbyte in range
138 138 if self.lastbyte and (realoffset >= self.lastbyte):
139 139 realoffset = self.lastbyte
140 140
141 141 self._do_seek(realoffset - self.realpos)
142 142
143 143 def read(self, size=-1):
144 144 """Read within the range.
145 145 This method will limit the size read based on the range.
146 146 """
147 147 size = self._calc_read_size(size)
148 148 rslt = self.fo.read(size)
149 149 self.realpos += len(rslt)
150 150 return rslt
151 151
152 152 def readline(self, size=-1):
153 153 """Read lines within the range.
154 154 This method will limit the size read based on the range.
155 155 """
156 156 size = self._calc_read_size(size)
157 157 rslt = self.fo.readline(size)
158 158 self.realpos += len(rslt)
159 159 return rslt
160 160
161 161 def _calc_read_size(self, size):
162 162 """Handles calculating the amount of data to read based on
163 163 the range.
164 164 """
165 165 if self.lastbyte:
166 166 if size > -1:
167 167 if ((self.realpos + size) >= self.lastbyte):
168 168 size = (self.lastbyte - self.realpos)
169 169 else:
170 170 size = (self.lastbyte - self.realpos)
171 171 return size
172 172
173 173 def _do_seek(self, offset):
174 174 """Seek based on whether wrapped object supports seek().
175 175 offset is relative to the current position (self.realpos).
176 176 """
177 177 assert offset >= 0
178 178 if not hasattr(self.fo, 'seek'):
179 179 self._poor_mans_seek(offset)
180 180 else:
181 181 self.fo.seek(self.realpos + offset)
182 182 self.realpos += offset
183 183
184 184 def _poor_mans_seek(self, offset):
185 185 """Seek by calling the wrapped file objects read() method.
186 186 This is used for file like objects that do not have native
187 187 seek support. The wrapped objects read() method is called
188 188 to manually seek to the desired position.
189 189 offset -- read this number of bytes from the wrapped
190 190 file object.
191 191 raise RangeError if we encounter EOF before reaching the
192 192 specified offset.
193 193 """
194 194 pos = 0
195 195 bufsize = 1024
196 196 while pos < offset:
197 197 if (pos + bufsize) > offset:
198 198 bufsize = offset - pos
199 199 buf = self.fo.read(bufsize)
200 200 if len(buf) != bufsize:
201 201 raise RangeError('Requested Range Not Satisfiable')
202 202 pos += bufsize
203 203
204 204 class FileRangeHandler(urllib2.FileHandler):
205 205 """FileHandler subclass that adds Range support.
206 206 This class handles Range headers exactly like an HTTP
207 207 server would.
208 208 """
209 209 def open_local_file(self, req):
210 210 import mimetypes
211 211 import mimetools
212 212 host = req.get_host()
213 213 file = req.get_selector()
214 214 localfile = urllib.url2pathname(file)
215 215 stats = os.stat(localfile)
216 216 size = stats[stat.ST_SIZE]
217 217 modified = rfc822.formatdate(stats[stat.ST_MTIME])
218 218 mtype = mimetypes.guess_type(file)[0]
219 219 if host:
220 220 host, port = urllib.splitport(host)
221 221 if port or socket.gethostbyname(host) not in self.get_names():
222 222 raise urllib2.URLError('file not on local host')
223 223 fo = open(localfile,'rb')
224 224 brange = req.headers.get('Range', None)
225 225 brange = range_header_to_tuple(brange)
226 226 assert brange != ()
227 227 if brange:
228 228 (fb, lb) = brange
229 229 if lb == '':
230 230 lb = size
231 231 if fb < 0 or fb > size or lb > size:
232 232 raise RangeError('Requested Range Not Satisfiable')
233 233 size = (lb - fb)
234 234 fo = RangeableFileObject(fo, (fb, lb))
235 235 headers = mimetools.Message(StringIO(
236 236 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' %
237 237 (mtype or 'text/plain', size, modified)))
238 238 return urllib.addinfourl(fo, headers, 'file:'+file)
239 239
240 240
241 241 # FTP Range Support
242 242 # Unfortunately, a large amount of base FTP code had to be copied
243 243 # from urllib and urllib2 in order to insert the FTP REST command.
244 244 # Code modifications for range support have been commented as
245 245 # follows:
246 246 # -- range support modifications start/end here
247 247
248 248 from urllib import splitport, splituser, splitpasswd, splitattr, \
249 249 unquote, addclosehook, addinfourl
250 250 import ftplib
251 251 import socket
252 252 import sys
253 253 import mimetypes
254 254 import mimetools
255 255
256 256 class FTPRangeHandler(urllib2.FTPHandler):
257 257 def ftp_open(self, req):
258 258 host = req.get_host()
259 259 if not host:
260 raise IOError, ('ftp error', 'no host given')
260 raise IOError('ftp error', 'no host given')
261 261 host, port = splitport(host)
262 262 if port is None:
263 263 port = ftplib.FTP_PORT
264 264
265 265 # username/password handling
266 266 user, host = splituser(host)
267 267 if user:
268 268 user, passwd = splitpasswd(user)
269 269 else:
270 270 passwd = None
271 271 host = unquote(host)
272 272 user = unquote(user or '')
273 273 passwd = unquote(passwd or '')
274 274
275 275 try:
276 276 host = socket.gethostbyname(host)
277 277 except socket.error, msg:
278 278 raise urllib2.URLError(msg)
279 279 path, attrs = splitattr(req.get_selector())
280 280 dirs = path.split('/')
281 281 dirs = map(unquote, dirs)
282 282 dirs, file = dirs[:-1], dirs[-1]
283 283 if dirs and not dirs[0]:
284 284 dirs = dirs[1:]
285 285 try:
286 286 fw = self.connect_ftp(user, passwd, host, port, dirs)
287 287 type = file and 'I' or 'D'
288 288 for attr in attrs:
289 289 attr, value = splitattr(attr)
290 290 if attr.lower() == 'type' and \
291 291 value in ('a', 'A', 'i', 'I', 'd', 'D'):
292 292 type = value.upper()
293 293
294 294 # -- range support modifications start here
295 295 rest = None
296 296 range_tup = range_header_to_tuple(req.headers.get('Range', None))
297 297 assert range_tup != ()
298 298 if range_tup:
299 299 (fb, lb) = range_tup
300 300 if fb > 0:
301 301 rest = fb
302 302 # -- range support modifications end here
303 303
304 304 fp, retrlen = fw.retrfile(file, type, rest)
305 305
306 306 # -- range support modifications start here
307 307 if range_tup:
308 308 (fb, lb) = range_tup
309 309 if lb == '':
310 310 if retrlen is None or retrlen == 0:
311 311 raise RangeError('Requested Range Not Satisfiable due to unobtainable file length.')
312 312 lb = retrlen
313 313 retrlen = lb - fb
314 314 if retrlen < 0:
315 315 # beginning of range is larger than file
316 316 raise RangeError('Requested Range Not Satisfiable')
317 317 else:
318 318 retrlen = lb - fb
319 319 fp = RangeableFileObject(fp, (0, retrlen))
320 320 # -- range support modifications end here
321 321
322 322 headers = ""
323 323 mtype = mimetypes.guess_type(req.get_full_url())[0]
324 324 if mtype:
325 325 headers += "Content-Type: %s\n" % mtype
326 326 if retrlen is not None and retrlen >= 0:
327 327 headers += "Content-Length: %d\n" % retrlen
328 328 sf = StringIO(headers)
329 329 headers = mimetools.Message(sf)
330 330 return addinfourl(fp, headers, req.get_full_url())
331 331 except ftplib.all_errors, msg:
332 raise IOError, ('ftp error', msg), sys.exc_info()[2]
332 raise IOError('ftp error', msg), sys.exc_info()[2]
333 333
334 334 def connect_ftp(self, user, passwd, host, port, dirs):
335 335 fw = ftpwrapper(user, passwd, host, port, dirs)
336 336 return fw
337 337
338 338 class ftpwrapper(urllib.ftpwrapper):
339 339 # range support note:
340 340 # this ftpwrapper code is copied directly from
341 341 # urllib. The only enhancement is to add the rest
342 342 # argument and pass it on to ftp.ntransfercmd
343 343 def retrfile(self, file, type, rest=None):
344 344 self.endtransfer()
345 345 if type in ('d', 'D'):
346 346 cmd = 'TYPE A'
347 347 isdir = 1
348 348 else:
349 349 cmd = 'TYPE ' + type
350 350 isdir = 0
351 351 try:
352 352 self.ftp.voidcmd(cmd)
353 353 except ftplib.all_errors:
354 354 self.init()
355 355 self.ftp.voidcmd(cmd)
356 356 conn = None
357 357 if file and not isdir:
358 358 # Use nlst to see if the file exists at all
359 359 try:
360 360 self.ftp.nlst(file)
361 361 except ftplib.error_perm, reason:
362 raise IOError, ('ftp error', reason), sys.exc_info()[2]
362 raise IOError('ftp error', reason), sys.exc_info()[2]
363 363 # Restore the transfer mode!
364 364 self.ftp.voidcmd(cmd)
365 365 # Try to retrieve as a file
366 366 try:
367 367 cmd = 'RETR ' + file
368 368 conn = self.ftp.ntransfercmd(cmd, rest)
369 369 except ftplib.error_perm, reason:
370 370 if str(reason).startswith('501'):
371 371 # workaround for REST not supported error
372 372 fp, retrlen = self.retrfile(file, type)
373 373 fp = RangeableFileObject(fp, (rest,''))
374 374 return (fp, retrlen)
375 375 elif not str(reason).startswith('550'):
376 raise IOError, ('ftp error', reason), sys.exc_info()[2]
376 raise IOError('ftp error', reason), sys.exc_info()[2]
377 377 if not conn:
378 378 # Set transfer mode to ASCII!
379 379 self.ftp.voidcmd('TYPE A')
380 380 # Try a directory listing
381 381 if file:
382 382 cmd = 'LIST ' + file
383 383 else:
384 384 cmd = 'LIST'
385 385 conn = self.ftp.ntransfercmd(cmd)
386 386 self.busy = 1
387 387 # Pass back both a suitably decorated object and a retrieval length
388 388 return (addclosehook(conn[0].makefile('rb'),
389 389 self.endtransfer), conn[1])
390 390
391 391
392 392 ####################################################################
393 393 # Range Tuple Functions
394 394 # XXX: These range tuple functions might go better in a class.
395 395
396 396 _rangere = None
397 397 def range_header_to_tuple(range_header):
398 398 """Get a (firstbyte,lastbyte) tuple from a Range header value.
399 399
400 400 Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
401 401 function pulls the firstbyte and lastbyte values and returns
402 402 a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
403 403 the header value, it is returned as an empty string in the
404 404 tuple.
405 405
406 406 Return None if range_header is None
407 407 Return () if range_header does not conform to the range spec
408 408 pattern.
409 409
410 410 """
411 411 global _rangere
412 412 if range_header is None:
413 413 return None
414 414 if _rangere is None:
415 415 import re
416 416 _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
417 417 match = _rangere.match(range_header)
418 418 if match:
419 419 tup = range_tuple_normalize(match.group(1, 2))
420 420 if tup and tup[1]:
421 421 tup = (tup[0], tup[1]+1)
422 422 return tup
423 423 return ()
424 424
425 425 def range_tuple_to_header(range_tup):
426 426 """Convert a range tuple to a Range header value.
427 427 Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
428 428 if no range is needed.
429 429 """
430 430 if range_tup is None:
431 431 return None
432 432 range_tup = range_tuple_normalize(range_tup)
433 433 if range_tup:
434 434 if range_tup[1]:
435 435 range_tup = (range_tup[0], range_tup[1] - 1)
436 436 return 'bytes=%s-%s' % range_tup
437 437
438 438 def range_tuple_normalize(range_tup):
439 439 """Normalize a (first_byte,last_byte) range tuple.
440 440 Return a tuple whose first element is guaranteed to be an int
441 441 and whose second element will be '' (meaning: the last byte) or
442 442 an int. Finally, return None if the normalized tuple == (0,'')
443 443 as that is equivelant to retrieving the entire file.
444 444 """
445 445 if range_tup is None:
446 446 return None
447 447 # handle first byte
448 448 fb = range_tup[0]
449 449 if fb in (None, ''):
450 450 fb = 0
451 451 else:
452 452 fb = int(fb)
453 453 # handle last byte
454 454 try:
455 455 lb = range_tup[1]
456 456 except IndexError:
457 457 lb = ''
458 458 else:
459 459 if lb is None:
460 460 lb = ''
461 461 elif lb != '':
462 462 lb = int(lb)
463 463 # check if range is over the entire file
464 464 if (fb, lb) == (0, ''):
465 465 return None
466 466 # check that the range is valid
467 467 if lb < fb:
468 468 raise RangeError('Invalid byte range: %s-%s' % (fb, lb))
469 469 return (fb, lb)
@@ -1,775 +1,775 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import nullid, nullrev, short, hex
9 9 from i18n import _
10 10 import ancestor, bdiff, revlog, util, os, errno
11 11
12 12 class changectx(object):
13 13 """A changecontext object makes access to data related to a particular
14 14 changeset convenient."""
15 15 def __init__(self, repo, changeid=''):
16 16 """changeid is a revision number, node, or tag"""
17 17 if changeid == '':
18 18 changeid = '.'
19 19 self._repo = repo
20 20 self._node = self._repo.lookup(changeid)
21 21 self._rev = self._repo.changelog.rev(self._node)
22 22
23 23 def __str__(self):
24 24 return short(self.node())
25 25
26 26 def __int__(self):
27 27 return self.rev()
28 28
29 29 def __repr__(self):
30 30 return "<changectx %s>" % str(self)
31 31
32 32 def __hash__(self):
33 33 try:
34 34 return hash(self._rev)
35 35 except AttributeError:
36 36 return id(self)
37 37
38 38 def __eq__(self, other):
39 39 try:
40 40 return self._rev == other._rev
41 41 except AttributeError:
42 42 return False
43 43
44 44 def __ne__(self, other):
45 45 return not (self == other)
46 46
47 47 def __nonzero__(self):
48 48 return self._rev != nullrev
49 49
50 50 def __getattr__(self, name):
51 51 if name == '_changeset':
52 52 self._changeset = self._repo.changelog.read(self.node())
53 53 return self._changeset
54 54 elif name == '_manifest':
55 55 self._manifest = self._repo.manifest.read(self._changeset[0])
56 56 return self._manifest
57 57 elif name == '_manifestdelta':
58 58 md = self._repo.manifest.readdelta(self._changeset[0])
59 59 self._manifestdelta = md
60 60 return self._manifestdelta
61 61 elif name == '_parents':
62 62 p = self._repo.changelog.parents(self._node)
63 63 if p[1] == nullid:
64 64 p = p[:-1]
65 65 self._parents = [changectx(self._repo, x) for x in p]
66 66 return self._parents
67 67 else:
68 raise AttributeError, name
68 raise AttributeError(name)
69 69
70 70 def __contains__(self, key):
71 71 return key in self._manifest
72 72
73 73 def __getitem__(self, key):
74 74 return self.filectx(key)
75 75
76 76 def __iter__(self):
77 77 for f in util.sort(self._manifest):
78 78 yield f
79 79
80 80 def changeset(self): return self._changeset
81 81 def manifest(self): return self._manifest
82 82
83 83 def rev(self): return self._rev
84 84 def node(self): return self._node
85 85 def hex(self): return hex(self._node)
86 86 def user(self): return self._changeset[1]
87 87 def date(self): return self._changeset[2]
88 88 def files(self): return self._changeset[3]
89 89 def description(self): return self._changeset[4]
90 90 def branch(self): return self._changeset[5].get("branch")
91 91 def extra(self): return self._changeset[5]
92 92 def tags(self): return self._repo.nodetags(self._node)
93 93
94 94 def parents(self):
95 95 """return contexts for each parent changeset"""
96 96 return self._parents
97 97
98 98 def children(self):
99 99 """return contexts for each child changeset"""
100 100 c = self._repo.changelog.children(self._node)
101 101 return [changectx(self._repo, x) for x in c]
102 102
103 103 def ancestors(self):
104 104 for a in self._repo.changelog.ancestors(self._rev):
105 105 yield changectx(self._repo, a)
106 106
107 107 def descendants(self):
108 108 for d in self._repo.changelog.descendants(self._rev):
109 109 yield changectx(self._repo, d)
110 110
111 111 def _fileinfo(self, path):
112 112 if '_manifest' in self.__dict__:
113 113 try:
114 114 return self._manifest[path], self._manifest.flags(path)
115 115 except KeyError:
116 116 raise revlog.LookupError(self._node, path,
117 117 _('not found in manifest'))
118 118 if '_manifestdelta' in self.__dict__ or path in self.files():
119 119 if path in self._manifestdelta:
120 120 return self._manifestdelta[path], self._manifestdelta.flags(path)
121 121 node, flag = self._repo.manifest.find(self._changeset[0], path)
122 122 if not node:
123 123 raise revlog.LookupError(self._node, path,
124 124 _('not found in manifest'))
125 125
126 126 return node, flag
127 127
128 128 def filenode(self, path):
129 129 return self._fileinfo(path)[0]
130 130
131 131 def flags(self, path):
132 132 try:
133 133 return self._fileinfo(path)[1]
134 134 except revlog.LookupError:
135 135 return ''
136 136
137 137 def filectx(self, path, fileid=None, filelog=None):
138 138 """get a file context from this changeset"""
139 139 if fileid is None:
140 140 fileid = self.filenode(path)
141 141 return filectx(self._repo, path, fileid=fileid,
142 142 changectx=self, filelog=filelog)
143 143
144 144 def ancestor(self, c2):
145 145 """
146 146 return the ancestor context of self and c2
147 147 """
148 148 n = self._repo.changelog.ancestor(self._node, c2._node)
149 149 return changectx(self._repo, n)
150 150
151 151 def walk(self, match):
152 152 fdict = dict.fromkeys(match.files())
153 153 # for dirstate.walk, files=['.'] means "walk the whole tree".
154 154 # follow that here, too
155 155 fdict.pop('.', None)
156 156 for fn in self:
157 157 for ffn in fdict:
158 158 # match if the file is the exact name or a directory
159 159 if ffn == fn or fn.startswith("%s/" % ffn):
160 160 del fdict[ffn]
161 161 break
162 162 if match(fn):
163 163 yield fn
164 164 for fn in util.sort(fdict):
165 165 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
166 166 yield fn
167 167
168 168 class filectx(object):
169 169 """A filecontext object makes access to data related to a particular
170 170 filerevision convenient."""
171 171 def __init__(self, repo, path, changeid=None, fileid=None,
172 172 filelog=None, changectx=None):
173 173 """changeid can be a changeset revision, node, or tag.
174 174 fileid can be a file revision or node."""
175 175 self._repo = repo
176 176 self._path = path
177 177
178 178 assert (changeid is not None
179 179 or fileid is not None
180 180 or changectx is not None)
181 181
182 182 if filelog:
183 183 self._filelog = filelog
184 184
185 185 if changeid is not None:
186 186 self._changeid = changeid
187 187 if changectx is not None:
188 188 self._changectx = changectx
189 189 if fileid is not None:
190 190 self._fileid = fileid
191 191
192 192 def __getattr__(self, name):
193 193 if name == '_changectx':
194 194 self._changectx = changectx(self._repo, self._changeid)
195 195 return self._changectx
196 196 elif name == '_filelog':
197 197 self._filelog = self._repo.file(self._path)
198 198 return self._filelog
199 199 elif name == '_changeid':
200 200 if '_changectx' in self.__dict__:
201 201 self._changeid = self._changectx.rev()
202 202 else:
203 203 self._changeid = self._filelog.linkrev(self._filenode)
204 204 return self._changeid
205 205 elif name == '_filenode':
206 206 if '_fileid' in self.__dict__:
207 207 self._filenode = self._filelog.lookup(self._fileid)
208 208 else:
209 209 self._filenode = self._changectx.filenode(self._path)
210 210 return self._filenode
211 211 elif name == '_filerev':
212 212 self._filerev = self._filelog.rev(self._filenode)
213 213 return self._filerev
214 214 elif name == '_repopath':
215 215 self._repopath = self._path
216 216 return self._repopath
217 217 else:
218 raise AttributeError, name
218 raise AttributeError(name)
219 219
220 220 def __nonzero__(self):
221 221 try:
222 222 n = self._filenode
223 223 return True
224 224 except revlog.LookupError:
225 225 # file is missing
226 226 return False
227 227
228 228 def __str__(self):
229 229 return "%s@%s" % (self.path(), short(self.node()))
230 230
231 231 def __repr__(self):
232 232 return "<filectx %s>" % str(self)
233 233
234 234 def __hash__(self):
235 235 try:
236 236 return hash((self._path, self._fileid))
237 237 except AttributeError:
238 238 return id(self)
239 239
240 240 def __eq__(self, other):
241 241 try:
242 242 return (self._path == other._path
243 243 and self._fileid == other._fileid)
244 244 except AttributeError:
245 245 return False
246 246
247 247 def __ne__(self, other):
248 248 return not (self == other)
249 249
250 250 def filectx(self, fileid):
251 251 '''opens an arbitrary revision of the file without
252 252 opening a new filelog'''
253 253 return filectx(self._repo, self._path, fileid=fileid,
254 254 filelog=self._filelog)
255 255
256 256 def filerev(self): return self._filerev
257 257 def filenode(self): return self._filenode
258 258 def flags(self): return self._changectx.flags(self._path)
259 259 def filelog(self): return self._filelog
260 260
261 261 def rev(self):
262 262 if '_changectx' in self.__dict__:
263 263 return self._changectx.rev()
264 264 if '_changeid' in self.__dict__:
265 265 return self._changectx.rev()
266 266 return self._filelog.linkrev(self._filenode)
267 267
268 268 def linkrev(self): return self._filelog.linkrev(self._filenode)
269 269 def node(self): return self._changectx.node()
270 270 def user(self): return self._changectx.user()
271 271 def date(self): return self._changectx.date()
272 272 def files(self): return self._changectx.files()
273 273 def description(self): return self._changectx.description()
274 274 def branch(self): return self._changectx.branch()
275 275 def manifest(self): return self._changectx.manifest()
276 276 def changectx(self): return self._changectx
277 277
278 278 def data(self): return self._filelog.read(self._filenode)
279 279 def path(self): return self._path
280 280 def size(self): return self._filelog.size(self._filerev)
281 281
282 282 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
283 283
284 284 def renamed(self):
285 285 """check if file was actually renamed in this changeset revision
286 286
287 287 If rename logged in file revision, we report copy for changeset only
288 288 if file revisions linkrev points back to the changeset in question
289 289 or both changeset parents contain different file revisions.
290 290 """
291 291
292 292 renamed = self._filelog.renamed(self._filenode)
293 293 if not renamed:
294 294 return renamed
295 295
296 296 if self.rev() == self.linkrev():
297 297 return renamed
298 298
299 299 name = self.path()
300 300 fnode = self._filenode
301 301 for p in self._changectx.parents():
302 302 try:
303 303 if fnode == p.filenode(name):
304 304 return None
305 305 except revlog.LookupError:
306 306 pass
307 307 return renamed
308 308
309 309 def parents(self):
310 310 p = self._path
311 311 fl = self._filelog
312 312 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
313 313
314 314 r = self._filelog.renamed(self._filenode)
315 315 if r:
316 316 pl[0] = (r[0], r[1], None)
317 317
318 318 return [filectx(self._repo, p, fileid=n, filelog=l)
319 319 for p,n,l in pl if n != nullid]
320 320
321 321 def children(self):
322 322 # hard for renames
323 323 c = self._filelog.children(self._filenode)
324 324 return [filectx(self._repo, self._path, fileid=x,
325 325 filelog=self._filelog) for x in c]
326 326
327 327 def annotate(self, follow=False, linenumber=None):
328 328 '''returns a list of tuples of (ctx, line) for each line
329 329 in the file, where ctx is the filectx of the node where
330 330 that line was last changed.
331 331 This returns tuples of ((ctx, linenumber), line) for each line,
332 332 if "linenumber" parameter is NOT "None".
333 333 In such tuples, linenumber means one at the first appearance
334 334 in the managed file.
335 335 To reduce annotation cost,
336 336 this returns fixed value(False is used) as linenumber,
337 337 if "linenumber" parameter is "False".'''
338 338
339 339 def decorate_compat(text, rev):
340 340 return ([rev] * len(text.splitlines()), text)
341 341
342 342 def without_linenumber(text, rev):
343 343 return ([(rev, False)] * len(text.splitlines()), text)
344 344
345 345 def with_linenumber(text, rev):
346 346 size = len(text.splitlines())
347 347 return ([(rev, i) for i in xrange(1, size + 1)], text)
348 348
349 349 decorate = (((linenumber is None) and decorate_compat) or
350 350 (linenumber and with_linenumber) or
351 351 without_linenumber)
352 352
353 353 def pair(parent, child):
354 354 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
355 355 child[0][b1:b2] = parent[0][a1:a2]
356 356 return child
357 357
358 358 getlog = util.cachefunc(lambda x: self._repo.file(x))
359 359 def getctx(path, fileid):
360 360 log = path == self._path and self._filelog or getlog(path)
361 361 return filectx(self._repo, path, fileid=fileid, filelog=log)
362 362 getctx = util.cachefunc(getctx)
363 363
364 364 def parents(f):
365 365 # we want to reuse filectx objects as much as possible
366 366 p = f._path
367 367 if f._filerev is None: # working dir
368 368 pl = [(n.path(), n.filerev()) for n in f.parents()]
369 369 else:
370 370 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
371 371
372 372 if follow:
373 373 r = f.renamed()
374 374 if r:
375 375 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
376 376
377 377 return [getctx(p, n) for p, n in pl if n != nullrev]
378 378
379 379 # use linkrev to find the first changeset where self appeared
380 380 if self.rev() != self.linkrev():
381 381 base = self.filectx(self.filerev())
382 382 else:
383 383 base = self
384 384
385 385 # find all ancestors
386 386 needed = {base: 1}
387 387 visit = [base]
388 388 files = [base._path]
389 389 while visit:
390 390 f = visit.pop(0)
391 391 for p in parents(f):
392 392 if p not in needed:
393 393 needed[p] = 1
394 394 visit.append(p)
395 395 if p._path not in files:
396 396 files.append(p._path)
397 397 else:
398 398 # count how many times we'll use this
399 399 needed[p] += 1
400 400
401 401 # sort by revision (per file) which is a topological order
402 402 visit = []
403 403 for f in files:
404 404 fn = [(n.rev(), n) for n in needed if n._path == f]
405 405 visit.extend(fn)
406 406
407 407 hist = {}
408 408 for r, f in util.sort(visit):
409 409 curr = decorate(f.data(), f)
410 410 for p in parents(f):
411 411 if p != nullid:
412 412 curr = pair(hist[p], curr)
413 413 # trim the history of unneeded revs
414 414 needed[p] -= 1
415 415 if not needed[p]:
416 416 del hist[p]
417 417 hist[f] = curr
418 418
419 419 return zip(hist[f][0], hist[f][1].splitlines(1))
420 420
421 421 def ancestor(self, fc2):
422 422 """
423 423 find the common ancestor file context, if any, of self, and fc2
424 424 """
425 425
426 426 acache = {}
427 427
428 428 # prime the ancestor cache for the working directory
429 429 for c in (self, fc2):
430 430 if c._filerev == None:
431 431 pl = [(n.path(), n.filenode()) for n in c.parents()]
432 432 acache[(c._path, None)] = pl
433 433
434 434 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
435 435 def parents(vertex):
436 436 if vertex in acache:
437 437 return acache[vertex]
438 438 f, n = vertex
439 439 if f not in flcache:
440 440 flcache[f] = self._repo.file(f)
441 441 fl = flcache[f]
442 442 pl = [(f, p) for p in fl.parents(n) if p != nullid]
443 443 re = fl.renamed(n)
444 444 if re:
445 445 pl.append(re)
446 446 acache[vertex] = pl
447 447 return pl
448 448
449 449 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
450 450 v = ancestor.ancestor(a, b, parents)
451 451 if v:
452 452 f, n = v
453 453 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
454 454
455 455 return None
456 456
457 457 class workingctx(changectx):
458 458 """A workingctx object makes access to data related to
459 459 the current working directory convenient.
460 460 parents - a pair of parent nodeids, or None to use the dirstate.
461 461 date - any valid date string or (unixtime, offset), or None.
462 462 user - username string, or None.
463 463 extra - a dictionary of extra values, or None.
464 464 changes - a list of file lists as returned by localrepo.status()
465 465 or None to use the repository status.
466 466 """
467 467 def __init__(self, repo, parents=None, text="", user=None, date=None,
468 468 extra=None, changes=None):
469 469 self._repo = repo
470 470 self._rev = None
471 471 self._node = None
472 472 self._text = text
473 473 if date:
474 474 self._date = util.parsedate(date)
475 475 if user:
476 476 self._user = user
477 477 if parents:
478 478 self._parents = [changectx(self._repo, p) for p in parents]
479 479 if changes:
480 480 self._status = list(changes)
481 481
482 482 self._extra = {}
483 483 if extra:
484 484 self._extra = extra.copy()
485 485 if 'branch' not in self._extra:
486 486 branch = self._repo.dirstate.branch()
487 487 try:
488 488 branch = branch.decode('UTF-8').encode('UTF-8')
489 489 except UnicodeDecodeError:
490 490 raise util.Abort(_('branch name not in UTF-8!'))
491 491 self._extra['branch'] = branch
492 492 if self._extra['branch'] == '':
493 493 self._extra['branch'] = 'default'
494 494
495 495 def __str__(self):
496 496 return str(self._parents[0]) + "+"
497 497
498 498 def __nonzero__(self):
499 499 return True
500 500
501 501 def __contains__(self, key):
502 502 return self._dirstate[key] not in "?r"
503 503
504 504 def __getattr__(self, name):
505 505 if name == '_status':
506 506 self._status = self._repo.status(unknown=True)
507 507 return self._status
508 508 elif name == '_user':
509 509 self._user = self._repo.ui.username()
510 510 return self._user
511 511 elif name == '_date':
512 512 self._date = util.makedate()
513 513 return self._date
514 514 if name == '_manifest':
515 515 self._buildmanifest()
516 516 return self._manifest
517 517 elif name == '_parents':
518 518 p = self._repo.dirstate.parents()
519 519 if p[1] == nullid:
520 520 p = p[:-1]
521 521 self._parents = [changectx(self._repo, x) for x in p]
522 522 return self._parents
523 523 else:
524 raise AttributeError, name
524 raise AttributeError(name)
525 525
526 526 def _buildmanifest(self):
527 527 """generate a manifest corresponding to the working directory"""
528 528
529 529 man = self._parents[0].manifest().copy()
530 530 copied = self._repo.dirstate.copies()
531 531 cf = lambda x: man.flags(copied.get(x, x))
532 532 ff = self._repo.dirstate.flagfunc(cf)
533 533 modified, added, removed, deleted, unknown = self._status[:5]
534 534 for i, l in (("a", added), ("m", modified), ("u", unknown)):
535 535 for f in l:
536 536 man[f] = man.get(copied.get(f, f), nullid) + i
537 537 try:
538 538 man.set(f, ff(f))
539 539 except OSError:
540 540 pass
541 541
542 542 for f in deleted + removed:
543 543 if f in man:
544 544 del man[f]
545 545
546 546 self._manifest = man
547 547
548 548 def manifest(self): return self._manifest
549 549
550 550 def user(self): return self._user or self._repo.ui.username()
551 551 def date(self): return self._date
552 552 def description(self): return self._text
553 553 def files(self):
554 554 return util.sort(self._status[0] + self._status[1] + self._status[2])
555 555
556 556 def modified(self): return self._status[0]
557 557 def added(self): return self._status[1]
558 558 def removed(self): return self._status[2]
559 559 def deleted(self): return self._status[3]
560 560 def unknown(self): return self._status[4]
561 561 def clean(self): return self._status[5]
562 562 def branch(self): return self._extra['branch']
563 563 def extra(self): return self._extra
564 564
565 565 def tags(self):
566 566 t = []
567 567 [t.extend(p.tags()) for p in self.parents()]
568 568 return t
569 569
570 570 def children(self):
571 571 return []
572 572
573 573 def flags(self, path):
574 574 if '_manifest' in self.__dict__:
575 575 try:
576 576 return self._manifest.flags(path)
577 577 except KeyError:
578 578 return ''
579 579
580 580 pnode = self._parents[0].changeset()[0]
581 581 orig = self._repo.dirstate.copies().get(path, path)
582 582 node, flag = self._repo.manifest.find(pnode, orig)
583 583 try:
584 584 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
585 585 return ff(path)
586 586 except OSError:
587 587 pass
588 588
589 589 if not node or path in self.deleted() or path in self.removed():
590 590 return ''
591 591 return flag
592 592
593 593 def filectx(self, path, filelog=None):
594 594 """get a file context from the working directory"""
595 595 return workingfilectx(self._repo, path, workingctx=self,
596 596 filelog=filelog)
597 597
598 598 def ancestor(self, c2):
599 599 """return the ancestor context of self and c2"""
600 600 return self._parents[0].ancestor(c2) # punt on two parents for now
601 601
602 602 def walk(self, match):
603 603 return util.sort(self._repo.dirstate.walk(match, True, False).keys())
604 604
605 605 class workingfilectx(filectx):
606 606 """A workingfilectx object makes access to data related to a particular
607 607 file in the working directory convenient."""
608 608 def __init__(self, repo, path, filelog=None, workingctx=None):
609 609 """changeid can be a changeset revision, node, or tag.
610 610 fileid can be a file revision or node."""
611 611 self._repo = repo
612 612 self._path = path
613 613 self._changeid = None
614 614 self._filerev = self._filenode = None
615 615
616 616 if filelog:
617 617 self._filelog = filelog
618 618 if workingctx:
619 619 self._changectx = workingctx
620 620
621 621 def __getattr__(self, name):
622 622 if name == '_changectx':
623 623 self._changectx = workingctx(self._repo)
624 624 return self._changectx
625 625 elif name == '_repopath':
626 626 self._repopath = (self._repo.dirstate.copied(self._path)
627 627 or self._path)
628 628 return self._repopath
629 629 elif name == '_filelog':
630 630 self._filelog = self._repo.file(self._repopath)
631 631 return self._filelog
632 632 else:
633 raise AttributeError, name
633 raise AttributeError(name)
634 634
635 635 def __nonzero__(self):
636 636 return True
637 637
638 638 def __str__(self):
639 639 return "%s@%s" % (self.path(), self._changectx)
640 640
641 641 def filectx(self, fileid):
642 642 '''opens an arbitrary revision of the file without
643 643 opening a new filelog'''
644 644 return filectx(self._repo, self._repopath, fileid=fileid,
645 645 filelog=self._filelog)
646 646
647 647 def rev(self):
648 648 if '_changectx' in self.__dict__:
649 649 return self._changectx.rev()
650 650 return self._filelog.linkrev(self._filenode)
651 651
652 652 def data(self): return self._repo.wread(self._path)
653 653 def renamed(self):
654 654 rp = self._repopath
655 655 if rp == self._path:
656 656 return None
657 657 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
658 658
659 659 def parents(self):
660 660 '''return parent filectxs, following copies if necessary'''
661 661 p = self._path
662 662 rp = self._repopath
663 663 pcl = self._changectx._parents
664 664 fl = self._filelog
665 665 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
666 666 if len(pcl) > 1:
667 667 if rp != p:
668 668 fl = None
669 669 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
670 670
671 671 return [filectx(self._repo, p, fileid=n, filelog=l)
672 672 for p,n,l in pl if n != nullid]
673 673
674 674 def children(self):
675 675 return []
676 676
677 677 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
678 678 def date(self):
679 679 t, tz = self._changectx.date()
680 680 try:
681 681 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
682 682 except OSError, err:
683 683 if err.errno != errno.ENOENT: raise
684 684 return (t, tz)
685 685
686 686 def cmp(self, text): return self._repo.wread(self._path) == text
687 687
688 688 class memctx(object):
689 689 """A memctx is a subset of changectx supposed to be built on memory
690 690 and passed to commit functions.
691 691
692 692 NOTE: this interface and the related memfilectx are experimental and
693 693 may change without notice.
694 694
695 695 parents - a pair of parent nodeids.
696 696 filectxfn - a callable taking (repo, memctx, path) arguments and
697 697 returning a memctx object.
698 698 date - any valid date string or (unixtime, offset), or None.
699 699 user - username string, or None.
700 700 extra - a dictionary of extra values, or None.
701 701 """
702 702 def __init__(self, repo, parents, text, files, filectxfn, user=None,
703 703 date=None, extra=None):
704 704 self._repo = repo
705 705 self._rev = None
706 706 self._node = None
707 707 self._text = text
708 708 self._date = date and util.parsedate(date) or util.makedate()
709 709 self._user = user
710 710 parents = [(p or nullid) for p in parents]
711 711 p1, p2 = parents
712 712 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
713 713 files = util.sort(list(files))
714 714 self._status = [files, [], [], [], []]
715 715 self._filectxfn = filectxfn
716 716
717 717 self._extra = extra and extra.copy() or {}
718 718 if 'branch' not in self._extra:
719 719 self._extra['branch'] = 'default'
720 720 elif self._extra.get('branch') == '':
721 721 self._extra['branch'] = 'default'
722 722
723 723 def __str__(self):
724 724 return str(self._parents[0]) + "+"
725 725
726 726 def __int__(self):
727 727 return self._rev
728 728
729 729 def __nonzero__(self):
730 730 return True
731 731
732 732 def user(self): return self._user or self._repo.ui.username()
733 733 def date(self): return self._date
734 734 def description(self): return self._text
735 735 def files(self): return self.modified()
736 736 def modified(self): return self._status[0]
737 737 def added(self): return self._status[1]
738 738 def removed(self): return self._status[2]
739 739 def deleted(self): return self._status[3]
740 740 def unknown(self): return self._status[4]
741 741 def clean(self): return self._status[5]
742 742 def branch(self): return self._extra['branch']
743 743 def extra(self): return self._extra
744 744 def flags(self, f): return self[f].flags()
745 745
746 746 def parents(self):
747 747 """return contexts for each parent changeset"""
748 748 return self._parents
749 749
750 750 def filectx(self, path, filelog=None):
751 751 """get a file context from the working directory"""
752 752 return self._filectxfn(self._repo, self, path)
753 753
754 754 class memfilectx(object):
755 755 """A memfilectx is a subset of filectx supposed to be built by client
756 756 code and passed to commit functions.
757 757 """
758 758 def __init__(self, path, data, islink, isexec, copied):
759 759 """copied is the source file path, or None."""
760 760 self._path = path
761 761 self._data = data
762 762 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
763 763 self._copied = None
764 764 if copied:
765 765 self._copied = (copied, nullid)
766 766
767 767 def __nonzero__(self): return True
768 768 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
769 769 def path(self): return self._path
770 770 def data(self): return self._data
771 771 def flags(self): return self._flags
772 772 def isexec(self): return 'x' in self._flags
773 773 def islink(self): return 'l' in self._flags
774 774 def renamed(self): return self._copied
775 775
@@ -1,603 +1,603 b''
1 1 """
2 2 dirstate.py - working directory tracking for mercurial
3 3
4 4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8 """
9 9
10 10 from node import nullid
11 11 from i18n import _
12 12 import struct, os, bisect, stat, util, errno, ignore
13 13 import cStringIO, osutil, sys
14 14
15 15 _unknown = ('?', 0, 0, 0)
16 16 _format = ">cllll"
17 17
18 18 def _finddirs(path):
19 19 pos = len(path)
20 20 while 1:
21 21 pos = path.rfind('/', 0, pos)
22 22 if pos == -1:
23 23 break
24 24 yield path[:pos]
25 25
26 26 class dirstate(object):
27 27
28 28 def __init__(self, opener, ui, root):
29 29 self._opener = opener
30 30 self._root = root
31 31 self._rootdir = os.path.join(root, '')
32 32 self._dirty = False
33 33 self._dirtypl = False
34 34 self._ui = ui
35 35
36 36 def __getattr__(self, name):
37 37 if name == '_map':
38 38 self._read()
39 39 return self._map
40 40 elif name == '_copymap':
41 41 self._read()
42 42 return self._copymap
43 43 elif name == '_foldmap':
44 44 _foldmap = {}
45 45 for name in self._map:
46 46 norm = os.path.normcase(os.path.normpath(name))
47 47 _foldmap[norm] = name
48 48 self._foldmap = _foldmap
49 49 return self._foldmap
50 50 elif name == '_branch':
51 51 try:
52 52 self._branch = (self._opener("branch").read().strip()
53 53 or "default")
54 54 except IOError:
55 55 self._branch = "default"
56 56 return self._branch
57 57 elif name == '_pl':
58 58 self._pl = [nullid, nullid]
59 59 try:
60 60 st = self._opener("dirstate").read(40)
61 61 if len(st) == 40:
62 62 self._pl = st[:20], st[20:40]
63 63 except IOError, err:
64 64 if err.errno != errno.ENOENT: raise
65 65 return self._pl
66 66 elif name == '_dirs':
67 67 dirs = {}
68 68 for f,s in self._map.items():
69 69 if s[0] != 'r':
70 70 for base in _finddirs(f):
71 71 dirs[base] = dirs.get(base, 0) + 1
72 72 self._dirs = dirs
73 73 return self._dirs
74 74 elif name == '_ignore':
75 75 files = [self._join('.hgignore')]
76 76 for name, path in self._ui.configitems("ui"):
77 77 if name == 'ignore' or name.startswith('ignore.'):
78 78 files.append(os.path.expanduser(path))
79 79 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
80 80 return self._ignore
81 81 elif name == '_slash':
82 82 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
83 83 return self._slash
84 84 elif name == '_checklink':
85 85 self._checklink = util.checklink(self._root)
86 86 return self._checklink
87 87 elif name == '_checkexec':
88 88 self._checkexec = util.checkexec(self._root)
89 89 return self._checkexec
90 90 elif name == '_checkcase':
91 91 self._checkcase = not util.checkcase(self._join('.hg'))
92 92 return self._checkcase
93 93 elif name == 'normalize':
94 94 if self._checkcase:
95 95 self.normalize = self._normalize
96 96 else:
97 97 self.normalize = lambda x: x
98 98 return self.normalize
99 99 else:
100 raise AttributeError, name
100 raise AttributeError(name)
101 101
102 102 def _join(self, f):
103 103 # much faster than os.path.join()
104 104 # it's safe because f is always a relative path
105 105 return self._rootdir + f
106 106
107 107 def flagfunc(self, fallback):
108 108 if self._checklink:
109 109 if self._checkexec:
110 110 def f(x):
111 111 p = self._join(x)
112 112 if os.path.islink(p):
113 113 return 'l'
114 114 if util.is_exec(p):
115 115 return 'x'
116 116 return ''
117 117 return f
118 118 def f(x):
119 119 if os.path.islink(self._join(x)):
120 120 return 'l'
121 121 if 'x' in fallback(x):
122 122 return 'x'
123 123 return ''
124 124 return f
125 125 if self._checkexec:
126 126 def f(x):
127 127 if 'l' in fallback(x):
128 128 return 'l'
129 129 if util.is_exec(self._join(x)):
130 130 return 'x'
131 131 return ''
132 132 return f
133 133 return fallback
134 134
135 135 def getcwd(self):
136 136 cwd = os.getcwd()
137 137 if cwd == self._root: return ''
138 138 # self._root ends with a path separator if self._root is '/' or 'C:\'
139 139 rootsep = self._root
140 140 if not util.endswithsep(rootsep):
141 141 rootsep += os.sep
142 142 if cwd.startswith(rootsep):
143 143 return cwd[len(rootsep):]
144 144 else:
145 145 # we're outside the repo. return an absolute path.
146 146 return cwd
147 147
148 148 def pathto(self, f, cwd=None):
149 149 if cwd is None:
150 150 cwd = self.getcwd()
151 151 path = util.pathto(self._root, cwd, f)
152 152 if self._slash:
153 153 return util.normpath(path)
154 154 return path
155 155
156 156 def __getitem__(self, key):
157 157 ''' current states:
158 158 n normal
159 159 m needs merging
160 160 r marked for removal
161 161 a marked for addition
162 162 ? not tracked'''
163 163 return self._map.get(key, ("?",))[0]
164 164
165 165 def __contains__(self, key):
166 166 return key in self._map
167 167
168 168 def __iter__(self):
169 169 for x in util.sort(self._map):
170 170 yield x
171 171
172 172 def parents(self):
173 173 return self._pl
174 174
175 175 def branch(self):
176 176 return self._branch
177 177
178 178 def setparents(self, p1, p2=nullid):
179 179 self._dirty = self._dirtypl = True
180 180 self._pl = p1, p2
181 181
182 182 def setbranch(self, branch):
183 183 self._branch = branch
184 184 self._opener("branch", "w").write(branch + '\n')
185 185
186 186 def _read(self):
187 187 self._map = {}
188 188 self._copymap = {}
189 189 if not self._dirtypl:
190 190 self._pl = [nullid, nullid]
191 191 try:
192 192 st = self._opener("dirstate").read()
193 193 except IOError, err:
194 194 if err.errno != errno.ENOENT: raise
195 195 return
196 196 if not st:
197 197 return
198 198
199 199 if not self._dirtypl:
200 200 self._pl = [st[:20], st[20: 40]]
201 201
202 202 # deref fields so they will be local in loop
203 203 dmap = self._map
204 204 copymap = self._copymap
205 205 unpack = struct.unpack
206 206 e_size = struct.calcsize(_format)
207 207 pos1 = 40
208 208 l = len(st)
209 209
210 210 # the inner loop
211 211 while pos1 < l:
212 212 pos2 = pos1 + e_size
213 213 e = unpack(">cllll", st[pos1:pos2]) # a literal here is faster
214 214 pos1 = pos2 + e[4]
215 215 f = st[pos2:pos1]
216 216 if '\0' in f:
217 217 f, c = f.split('\0')
218 218 copymap[f] = c
219 219 dmap[f] = e # we hold onto e[4] because making a subtuple is slow
220 220
221 221 def invalidate(self):
222 222 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
223 223 if a in self.__dict__:
224 224 delattr(self, a)
225 225 self._dirty = False
226 226
227 227 def copy(self, source, dest):
228 228 if source == dest:
229 229 return
230 230 self._dirty = True
231 231 self._copymap[dest] = source
232 232
233 233 def copied(self, file):
234 234 return self._copymap.get(file, None)
235 235
236 236 def copies(self):
237 237 return self._copymap
238 238
239 239 def _droppath(self, f):
240 240 if self[f] not in "?r" and "_dirs" in self.__dict__:
241 241 dirs = self._dirs
242 242 for base in _finddirs(f):
243 243 if dirs[base] == 1:
244 244 del dirs[base]
245 245 else:
246 246 dirs[base] -= 1
247 247
248 248 def _addpath(self, f, check=False):
249 249 oldstate = self[f]
250 250 if check or oldstate == "r":
251 251 if '\r' in f or '\n' in f:
252 252 raise util.Abort(
253 253 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
254 254 if f in self._dirs:
255 255 raise util.Abort(_('directory %r already in dirstate') % f)
256 256 # shadows
257 257 for d in _finddirs(f):
258 258 if d in self._dirs:
259 259 break
260 260 if d in self._map and self[d] != 'r':
261 261 raise util.Abort(
262 262 _('file %r in dirstate clashes with %r') % (d, f))
263 263 if oldstate in "?r" and "_dirs" in self.__dict__:
264 264 dirs = self._dirs
265 265 for base in _finddirs(f):
266 266 dirs[base] = dirs.get(base, 0) + 1
267 267
268 268 def normal(self, f):
269 269 'mark a file normal and clean'
270 270 self._dirty = True
271 271 self._addpath(f)
272 272 s = os.lstat(self._join(f))
273 273 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime, 0)
274 274 if f in self._copymap:
275 275 del self._copymap[f]
276 276
277 277 def normallookup(self, f):
278 278 'mark a file normal, but possibly dirty'
279 279 if self._pl[1] != nullid and f in self._map:
280 280 # if there is a merge going on and the file was either
281 281 # in state 'm' or dirty before being removed, restore that state.
282 282 entry = self._map[f]
283 283 if entry[0] == 'r' and entry[2] in (-1, -2):
284 284 source = self._copymap.get(f)
285 285 if entry[2] == -1:
286 286 self.merge(f)
287 287 elif entry[2] == -2:
288 288 self.normaldirty(f)
289 289 if source:
290 290 self.copy(source, f)
291 291 return
292 292 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
293 293 return
294 294 self._dirty = True
295 295 self._addpath(f)
296 296 self._map[f] = ('n', 0, -1, -1, 0)
297 297 if f in self._copymap:
298 298 del self._copymap[f]
299 299
300 300 def normaldirty(self, f):
301 301 'mark a file normal, but dirty'
302 302 self._dirty = True
303 303 self._addpath(f)
304 304 self._map[f] = ('n', 0, -2, -1, 0)
305 305 if f in self._copymap:
306 306 del self._copymap[f]
307 307
308 308 def add(self, f):
309 309 'mark a file added'
310 310 self._dirty = True
311 311 self._addpath(f, True)
312 312 self._map[f] = ('a', 0, -1, -1, 0)
313 313 if f in self._copymap:
314 314 del self._copymap[f]
315 315
316 316 def remove(self, f):
317 317 'mark a file removed'
318 318 self._dirty = True
319 319 self._droppath(f)
320 320 size = 0
321 321 if self._pl[1] != nullid and f in self._map:
322 322 entry = self._map[f]
323 323 if entry[0] == 'm':
324 324 size = -1
325 325 elif entry[0] == 'n' and entry[2] == -2:
326 326 size = -2
327 327 self._map[f] = ('r', 0, size, 0, 0)
328 328 if size == 0 and f in self._copymap:
329 329 del self._copymap[f]
330 330
331 331 def merge(self, f):
332 332 'mark a file merged'
333 333 self._dirty = True
334 334 s = os.lstat(self._join(f))
335 335 self._addpath(f)
336 336 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime, 0)
337 337 if f in self._copymap:
338 338 del self._copymap[f]
339 339
340 340 def forget(self, f):
341 341 'forget a file'
342 342 self._dirty = True
343 343 try:
344 344 self._droppath(f)
345 345 del self._map[f]
346 346 except KeyError:
347 347 self._ui.warn(_("not in dirstate: %s\n") % f)
348 348
349 349 def _normalize(self, path):
350 350 norm_path = os.path.normcase(os.path.normpath(path))
351 351 if norm_path not in self._foldmap:
352 352 if not os.path.exists(os.path.join(self._root, path)):
353 353 return path
354 354 self._foldmap[norm_path] = util.fspath(path, self._root)
355 355 return self._foldmap[norm_path]
356 356
357 357 def clear(self):
358 358 self._map = {}
359 359 if "_dirs" in self.__dict__:
360 360 delattr(self, "_dirs");
361 361 self._copymap = {}
362 362 self._pl = [nullid, nullid]
363 363 self._dirty = True
364 364
365 365 def rebuild(self, parent, files):
366 366 self.clear()
367 367 for f in files:
368 368 if 'x' in files.flags(f):
369 369 self._map[f] = ('n', 0777, -1, 0, 0)
370 370 else:
371 371 self._map[f] = ('n', 0666, -1, 0, 0)
372 372 self._pl = (parent, nullid)
373 373 self._dirty = True
374 374
375 375 def write(self):
376 376 if not self._dirty:
377 377 return
378 378 st = self._opener("dirstate", "w", atomictemp=True)
379 379
380 380 try:
381 381 gran = int(self._ui.config('dirstate', 'granularity', 1))
382 382 except ValueError:
383 383 gran = 1
384 384 limit = sys.maxint
385 385 if gran > 0:
386 386 limit = util.fstat(st).st_mtime - gran
387 387
388 388 cs = cStringIO.StringIO()
389 389 copymap = self._copymap
390 390 pack = struct.pack
391 391 write = cs.write
392 392 write("".join(self._pl))
393 393 for f, e in self._map.iteritems():
394 394 if f in copymap:
395 395 f = "%s\0%s" % (f, copymap[f])
396 396 if e[3] > limit and e[0] == 'n':
397 397 e = (e[0], 0, -1, -1, 0)
398 398 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
399 399 write(e)
400 400 write(f)
401 401 st.write(cs.getvalue())
402 402 st.rename()
403 403 self._dirty = self._dirtypl = False
404 404
405 405 def _dirignore(self, f):
406 406 if f == '.':
407 407 return False
408 408 if self._ignore(f):
409 409 return True
410 410 for p in _finddirs(f):
411 411 if self._ignore(p):
412 412 return True
413 413 return False
414 414
415 415 def walk(self, match, unknown, ignored):
416 416 '''
417 417 walk recursively through the directory tree, finding all files
418 418 matched by the match function
419 419
420 420 results are yielded in a tuple (filename, stat), where stat
421 421 and st is the stat result if the file was found in the directory.
422 422 '''
423 423
424 424 def fwarn(f, msg):
425 425 self._ui.warn('%s: %s\n' % (self.pathto(ff), msg))
426 426 return False
427 427 badfn = fwarn
428 428 if hasattr(match, 'bad'):
429 429 badfn = match.bad
430 430
431 431 def badtype(f, mode):
432 432 kind = 'unknown'
433 433 if stat.S_ISCHR(mode): kind = _('character device')
434 434 elif stat.S_ISBLK(mode): kind = _('block device')
435 435 elif stat.S_ISFIFO(mode): kind = _('fifo')
436 436 elif stat.S_ISSOCK(mode): kind = _('socket')
437 437 elif stat.S_ISDIR(mode): kind = _('directory')
438 438 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
439 439 % (self.pathto(f), kind))
440 440
441 441 ignore = self._ignore
442 442 dirignore = self._dirignore
443 443 if ignored:
444 444 ignore = util.never
445 445 dirignore = util.never
446 446 elif not unknown:
447 447 # if unknown and ignored are False, skip step 2
448 448 ignore = util.always
449 449 dirignore = util.always
450 450
451 451 matchfn = match.matchfn
452 452 dmap = self._map
453 453 normpath = util.normpath
454 454 normalize = self.normalize
455 455 listdir = osutil.listdir
456 456 lstat = os.lstat
457 457 bisect_left = bisect.bisect_left
458 458 pconvert = util.pconvert
459 459 getkind = stat.S_IFMT
460 460 dirkind = stat.S_IFDIR
461 461 regkind = stat.S_IFREG
462 462 lnkkind = stat.S_IFLNK
463 463 join = self._join
464 464 work = []
465 465 wadd = work.append
466 466
467 467 files = util.unique(match.files())
468 468 if not files or '.' in files:
469 469 files = ['']
470 470 results = {'.hg': None}
471 471
472 472 # step 1: find all explicit files
473 473 for ff in util.sort(files):
474 474 nf = normalize(normpath(ff))
475 475 if nf in results:
476 476 continue
477 477
478 478 try:
479 479 st = lstat(join(nf))
480 480 kind = getkind(st.st_mode)
481 481 if kind == dirkind:
482 482 if not dirignore(nf):
483 483 wadd(nf)
484 484 elif kind == regkind or kind == lnkkind:
485 485 results[nf] = st
486 486 else:
487 487 badtype(ff, kind)
488 488 if nf in dmap:
489 489 results[nf] = None
490 490 except OSError, inst:
491 491 keep = False
492 492 prefix = nf + "/"
493 493 for fn in dmap:
494 494 if nf == fn or fn.startswith(prefix):
495 495 keep = True
496 496 break
497 497 if not keep:
498 498 if inst.errno != errno.ENOENT:
499 499 fwarn(ff, inst.strerror)
500 500 elif badfn(ff, inst.strerror):
501 501 if (nf in dmap or not ignore(nf)) and matchfn(nf):
502 502 results[nf] = None
503 503
504 504 # step 2: visit subdirectories
505 505 while work:
506 506 nd = work.pop()
507 507 if hasattr(match, 'dir'):
508 508 match.dir(nd)
509 509 entries = listdir(join(nd), stat=True)
510 510 if nd == '.':
511 511 nd = ''
512 512 else:
513 513 # do not recurse into a repo contained in this
514 514 # one. use bisect to find .hg directory so speed
515 515 # is good on big directory.
516 516 hg = bisect_left(entries, ('.hg'))
517 517 if hg < len(entries) and entries[hg][0] == '.hg' \
518 518 and entries[hg][1] == dirkind:
519 519 continue
520 520 for f, kind, st in entries:
521 521 nf = normalize(nd and (nd + "/" + f) or f)
522 522 if nf not in results:
523 523 if kind == dirkind:
524 524 if not ignore(nf):
525 525 wadd(nf)
526 526 if nf in dmap and matchfn(nf):
527 527 results[nf] = None
528 528 elif kind == regkind or kind == lnkkind:
529 529 if nf in dmap:
530 530 if matchfn(nf):
531 531 results[nf] = st
532 532 elif matchfn(nf) and not ignore(nf):
533 533 results[nf] = st
534 534 elif nf in dmap and matchfn(nf):
535 535 results[nf] = None
536 536
537 537 # step 3: report unseen items in the dmap hash
538 538 visit = [f for f in dmap if f not in results and match(f)]
539 539 for nf in util.sort(visit):
540 540 results[nf] = None
541 541 try:
542 542 st = lstat(join(nf))
543 543 kind = getkind(st.st_mode)
544 544 if kind == regkind or kind == lnkkind:
545 545 results[nf] = st
546 546 except OSError, inst:
547 547 if inst.errno not in (errno.ENOENT, errno.ENOTDIR):
548 548 raise
549 549
550 550 del results['.hg']
551 551 return results
552 552
553 553 def status(self, match, ignored, clean, unknown):
554 554 listignored, listclean, listunknown = ignored, clean, unknown
555 555 lookup, modified, added, unknown, ignored = [], [], [], [], []
556 556 removed, deleted, clean = [], [], []
557 557
558 558 _join = self._join
559 559 lstat = os.lstat
560 560 cmap = self._copymap
561 561 dmap = self._map
562 562 ladd = lookup.append
563 563 madd = modified.append
564 564 aadd = added.append
565 565 uadd = unknown.append
566 566 iadd = ignored.append
567 567 radd = removed.append
568 568 dadd = deleted.append
569 569 cadd = clean.append
570 570
571 571 for fn, st in self.walk(match, listunknown, listignored).iteritems():
572 572 if fn not in dmap:
573 573 if (listignored or match.exact(fn)) and self._dirignore(fn):
574 574 if listignored:
575 575 iadd(fn)
576 576 elif listunknown:
577 577 uadd(fn)
578 578 continue
579 579
580 580 state, mode, size, time, foo = dmap[fn]
581 581
582 582 if not st and state in "nma":
583 583 dadd(fn)
584 584 elif state == 'n':
585 585 if (size >= 0 and
586 586 (size != st.st_size
587 587 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
588 588 or size == -2
589 589 or fn in self._copymap):
590 590 madd(fn)
591 591 elif time != int(st.st_mtime):
592 592 ladd(fn)
593 593 elif listclean:
594 594 cadd(fn)
595 595 elif state == 'm':
596 596 madd(fn)
597 597 elif state == 'a':
598 598 aadd(fn)
599 599 elif state == 'r':
600 600 radd(fn)
601 601
602 602 return (lookup, modified, added, removed, deleted, unknown, ignored,
603 603 clean)
@@ -1,67 +1,67 b''
1 1 # hgweb/wsgicgi.py - CGI->WSGI translator
2 2 #
3 3 # Copyright 2006 Eric Hopper <hopper@omnifarious.org>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7 #
8 8 # This was originally copied from the public domain code at
9 9 # http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
10 10
11 11 import os, sys
12 12 from mercurial import util
13 13
14 14 def launch(application):
15 15 util.set_binary(sys.stdin)
16 16 util.set_binary(sys.stdout)
17 17
18 18 environ = dict(os.environ.items())
19 19 environ.setdefault('PATH_INFO', '')
20 20 environ['wsgi.input'] = sys.stdin
21 21 environ['wsgi.errors'] = sys.stderr
22 22 environ['wsgi.version'] = (1, 0)
23 23 environ['wsgi.multithread'] = False
24 24 environ['wsgi.multiprocess'] = True
25 25 environ['wsgi.run_once'] = True
26 26
27 27 if environ.get('HTTPS','off').lower() in ('on','1','yes'):
28 28 environ['wsgi.url_scheme'] = 'https'
29 29 else:
30 30 environ['wsgi.url_scheme'] = 'http'
31 31
32 32 headers_set = []
33 33 headers_sent = []
34 34 out = sys.stdout
35 35
36 36 def write(data):
37 37 if not headers_set:
38 38 raise AssertionError("write() before start_response()")
39 39
40 40 elif not headers_sent:
41 41 # Before the first output, send the stored headers
42 42 status, response_headers = headers_sent[:] = headers_set
43 43 out.write('Status: %s\r\n' % status)
44 44 for header in response_headers:
45 45 out.write('%s: %s\r\n' % header)
46 46 out.write('\r\n')
47 47
48 48 out.write(data)
49 49 out.flush()
50 50
51 51 def start_response(status, response_headers, exc_info=None):
52 52 if exc_info:
53 53 try:
54 54 if headers_sent:
55 55 # Re-raise original exception if headers sent
56 raise exc_info[0], exc_info[1], exc_info[2]
56 raise exc_info[0](exc_info[1], exc_info[2])
57 57 finally:
58 58 exc_info = None # avoid dangling circular ref
59 59 elif headers_set:
60 60 raise AssertionError("Headers already set!")
61 61
62 62 headers_set[:] = [status, response_headers]
63 63 return write
64 64
65 65 content = application(environ, start_response)
66 66 for chunk in content:
67 67 write(chunk)
@@ -1,2070 +1,2070 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14 import match as match_
15 15 import merge as merge_
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 19 supported = ('revlogv1', 'store')
20 20
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.root = os.path.realpath(path)
24 24 self.path = os.path.join(self.root, ".hg")
25 25 self.origroot = path
26 26 self.opener = util.opener(self.path)
27 27 self.wopener = util.opener(self.root)
28 28
29 29 if not os.path.isdir(self.path):
30 30 if create:
31 31 if not os.path.exists(path):
32 32 os.mkdir(path)
33 33 os.mkdir(self.path)
34 34 requirements = ["revlogv1"]
35 35 if parentui.configbool('format', 'usestore', True):
36 36 os.mkdir(os.path.join(self.path, "store"))
37 37 requirements.append("store")
38 38 # create an invalid changelog
39 39 self.opener("00changelog.i", "a").write(
40 40 '\0\0\0\2' # represents revlogv2
41 41 ' dummy changelog to prevent using the old repo layout'
42 42 )
43 43 reqfile = self.opener("requires", "w")
44 44 for r in requirements:
45 45 reqfile.write("%s\n" % r)
46 46 reqfile.close()
47 47 else:
48 48 raise repo.RepoError(_("repository %s not found") % path)
49 49 elif create:
50 50 raise repo.RepoError(_("repository %s already exists") % path)
51 51 else:
52 52 # find requirements
53 53 requirements = []
54 54 try:
55 55 requirements = self.opener("requires").read().splitlines()
56 56 for r in requirements:
57 57 if r not in self.supported:
58 58 raise repo.RepoError(_("requirement '%s' not supported") % r)
59 59 except IOError, inst:
60 60 if inst.errno != errno.ENOENT:
61 61 raise
62 62
63 63 self.store = store.store(requirements, self.path, util.opener)
64 64 self.spath = self.store.path
65 65 self.sopener = self.store.opener
66 66 self.sjoin = self.store.join
67 67 self.opener.createmode = self.store.createmode
68 68
69 69 self.ui = ui.ui(parentui=parentui)
70 70 try:
71 71 self.ui.readconfig(self.join("hgrc"), self.root)
72 72 extensions.loadall(self.ui)
73 73 except IOError:
74 74 pass
75 75
76 76 self.tagscache = None
77 77 self._tagstypecache = None
78 78 self.branchcache = None
79 79 self._ubranchcache = None # UTF-8 version of branchcache
80 80 self._branchcachetip = None
81 81 self.nodetagscache = None
82 82 self.filterpats = {}
83 83 self._datafilters = {}
84 84 self._transref = self._lockref = self._wlockref = None
85 85
86 86 def __getattr__(self, name):
87 87 if name == 'changelog':
88 88 self.changelog = changelog.changelog(self.sopener)
89 89 self.sopener.defversion = self.changelog.version
90 90 return self.changelog
91 91 if name == 'manifest':
92 92 self.changelog
93 93 self.manifest = manifest.manifest(self.sopener)
94 94 return self.manifest
95 95 if name == 'dirstate':
96 96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
97 97 return self.dirstate
98 98 else:
99 raise AttributeError, name
99 raise AttributeError(name)
100 100
101 101 def __getitem__(self, changeid):
102 102 if changeid == None:
103 103 return context.workingctx(self)
104 104 return context.changectx(self, changeid)
105 105
106 106 def __nonzero__(self):
107 107 return True
108 108
109 109 def __len__(self):
110 110 return len(self.changelog)
111 111
112 112 def __iter__(self):
113 113 for i in xrange(len(self)):
114 114 yield i
115 115
116 116 def url(self):
117 117 return 'file:' + self.root
118 118
119 119 def hook(self, name, throw=False, **args):
120 120 return hook.hook(self.ui, self, name, throw, **args)
121 121
122 122 tag_disallowed = ':\r\n'
123 123
124 124 def _tag(self, names, node, message, local, user, date, parent=None,
125 125 extra={}):
126 126 use_dirstate = parent is None
127 127
128 128 if isinstance(names, str):
129 129 allchars = names
130 130 names = (names,)
131 131 else:
132 132 allchars = ''.join(names)
133 133 for c in self.tag_disallowed:
134 134 if c in allchars:
135 135 raise util.Abort(_('%r cannot be used in a tag name') % c)
136 136
137 137 for name in names:
138 138 self.hook('pretag', throw=True, node=hex(node), tag=name,
139 139 local=local)
140 140
141 141 def writetags(fp, names, munge, prevtags):
142 142 fp.seek(0, 2)
143 143 if prevtags and prevtags[-1] != '\n':
144 144 fp.write('\n')
145 145 for name in names:
146 146 m = munge and munge(name) or name
147 147 if self._tagstypecache and name in self._tagstypecache:
148 148 old = self.tagscache.get(name, nullid)
149 149 fp.write('%s %s\n' % (hex(old), m))
150 150 fp.write('%s %s\n' % (hex(node), m))
151 151 fp.close()
152 152
153 153 prevtags = ''
154 154 if local:
155 155 try:
156 156 fp = self.opener('localtags', 'r+')
157 157 except IOError, err:
158 158 fp = self.opener('localtags', 'a')
159 159 else:
160 160 prevtags = fp.read()
161 161
162 162 # local tags are stored in the current charset
163 163 writetags(fp, names, None, prevtags)
164 164 for name in names:
165 165 self.hook('tag', node=hex(node), tag=name, local=local)
166 166 return
167 167
168 168 if use_dirstate:
169 169 try:
170 170 fp = self.wfile('.hgtags', 'rb+')
171 171 except IOError, err:
172 172 fp = self.wfile('.hgtags', 'ab')
173 173 else:
174 174 prevtags = fp.read()
175 175 else:
176 176 try:
177 177 prevtags = self.filectx('.hgtags', parent).data()
178 178 except revlog.LookupError:
179 179 pass
180 180 fp = self.wfile('.hgtags', 'wb')
181 181 if prevtags:
182 182 fp.write(prevtags)
183 183
184 184 # committed tags are stored in UTF-8
185 185 writetags(fp, names, util.fromlocal, prevtags)
186 186
187 187 if use_dirstate and '.hgtags' not in self.dirstate:
188 188 self.add(['.hgtags'])
189 189
190 190 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
191 191 extra=extra)
192 192
193 193 for name in names:
194 194 self.hook('tag', node=hex(node), tag=name, local=local)
195 195
196 196 return tagnode
197 197
198 198 def tag(self, names, node, message, local, user, date):
199 199 '''tag a revision with one or more symbolic names.
200 200
201 201 names is a list of strings or, when adding a single tag, names may be a
202 202 string.
203 203
204 204 if local is True, the tags are stored in a per-repository file.
205 205 otherwise, they are stored in the .hgtags file, and a new
206 206 changeset is committed with the change.
207 207
208 208 keyword arguments:
209 209
210 210 local: whether to store tags in non-version-controlled file
211 211 (default False)
212 212
213 213 message: commit message to use if committing
214 214
215 215 user: name of user to use if committing
216 216
217 217 date: date tuple to use if committing'''
218 218
219 219 for x in self.status()[:5]:
220 220 if '.hgtags' in x:
221 221 raise util.Abort(_('working copy of .hgtags is changed '
222 222 '(please commit .hgtags manually)'))
223 223
224 224 self._tag(names, node, message, local, user, date)
225 225
226 226 def tags(self):
227 227 '''return a mapping of tag to node'''
228 228 if self.tagscache:
229 229 return self.tagscache
230 230
231 231 globaltags = {}
232 232 tagtypes = {}
233 233
234 234 def readtags(lines, fn, tagtype):
235 235 filetags = {}
236 236 count = 0
237 237
238 238 def warn(msg):
239 239 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
240 240
241 241 for l in lines:
242 242 count += 1
243 243 if not l:
244 244 continue
245 245 s = l.split(" ", 1)
246 246 if len(s) != 2:
247 247 warn(_("cannot parse entry"))
248 248 continue
249 249 node, key = s
250 250 key = util.tolocal(key.strip()) # stored in UTF-8
251 251 try:
252 252 bin_n = bin(node)
253 253 except TypeError:
254 254 warn(_("node '%s' is not well formed") % node)
255 255 continue
256 256 if bin_n not in self.changelog.nodemap:
257 257 warn(_("tag '%s' refers to unknown node") % key)
258 258 continue
259 259
260 260 h = []
261 261 if key in filetags:
262 262 n, h = filetags[key]
263 263 h.append(n)
264 264 filetags[key] = (bin_n, h)
265 265
266 266 for k, nh in filetags.items():
267 267 if k not in globaltags:
268 268 globaltags[k] = nh
269 269 tagtypes[k] = tagtype
270 270 continue
271 271
272 272 # we prefer the global tag if:
273 273 # it supercedes us OR
274 274 # mutual supercedes and it has a higher rank
275 275 # otherwise we win because we're tip-most
276 276 an, ah = nh
277 277 bn, bh = globaltags[k]
278 278 if (bn != an and an in bh and
279 279 (bn not in ah or len(bh) > len(ah))):
280 280 an = bn
281 281 ah.extend([n for n in bh if n not in ah])
282 282 globaltags[k] = an, ah
283 283 tagtypes[k] = tagtype
284 284
285 285 # read the tags file from each head, ending with the tip
286 286 f = None
287 287 for rev, node, fnode in self._hgtagsnodes():
288 288 f = (f and f.filectx(fnode) or
289 289 self.filectx('.hgtags', fileid=fnode))
290 290 readtags(f.data().splitlines(), f, "global")
291 291
292 292 try:
293 293 data = util.fromlocal(self.opener("localtags").read())
294 294 # localtags are stored in the local character set
295 295 # while the internal tag table is stored in UTF-8
296 296 readtags(data.splitlines(), "localtags", "local")
297 297 except IOError:
298 298 pass
299 299
300 300 self.tagscache = {}
301 301 self._tagstypecache = {}
302 302 for k,nh in globaltags.items():
303 303 n = nh[0]
304 304 if n != nullid:
305 305 self.tagscache[k] = n
306 306 self._tagstypecache[k] = tagtypes[k]
307 307 self.tagscache['tip'] = self.changelog.tip()
308 308 return self.tagscache
309 309
310 310 def tagtype(self, tagname):
311 311 '''
312 312 return the type of the given tag. result can be:
313 313
314 314 'local' : a local tag
315 315 'global' : a global tag
316 316 None : tag does not exist
317 317 '''
318 318
319 319 self.tags()
320 320
321 321 return self._tagstypecache.get(tagname)
322 322
323 323 def _hgtagsnodes(self):
324 324 heads = self.heads()
325 325 heads.reverse()
326 326 last = {}
327 327 ret = []
328 328 for node in heads:
329 329 c = self[node]
330 330 rev = c.rev()
331 331 try:
332 332 fnode = c.filenode('.hgtags')
333 333 except revlog.LookupError:
334 334 continue
335 335 ret.append((rev, node, fnode))
336 336 if fnode in last:
337 337 ret[last[fnode]] = None
338 338 last[fnode] = len(ret) - 1
339 339 return [item for item in ret if item]
340 340
341 341 def tagslist(self):
342 342 '''return a list of tags ordered by revision'''
343 343 l = []
344 344 for t, n in self.tags().items():
345 345 try:
346 346 r = self.changelog.rev(n)
347 347 except:
348 348 r = -2 # sort to the beginning of the list if unknown
349 349 l.append((r, t, n))
350 350 return [(t, n) for r, t, n in util.sort(l)]
351 351
352 352 def nodetags(self, node):
353 353 '''return the tags associated with a node'''
354 354 if not self.nodetagscache:
355 355 self.nodetagscache = {}
356 356 for t, n in self.tags().items():
357 357 self.nodetagscache.setdefault(n, []).append(t)
358 358 return self.nodetagscache.get(node, [])
359 359
360 360 def _branchtags(self, partial, lrev):
361 361 tiprev = len(self) - 1
362 362 if lrev != tiprev:
363 363 self._updatebranchcache(partial, lrev+1, tiprev+1)
364 364 self._writebranchcache(partial, self.changelog.tip(), tiprev)
365 365
366 366 return partial
367 367
368 368 def branchtags(self):
369 369 tip = self.changelog.tip()
370 370 if self.branchcache is not None and self._branchcachetip == tip:
371 371 return self.branchcache
372 372
373 373 oldtip = self._branchcachetip
374 374 self._branchcachetip = tip
375 375 if self.branchcache is None:
376 376 self.branchcache = {} # avoid recursion in changectx
377 377 else:
378 378 self.branchcache.clear() # keep using the same dict
379 379 if oldtip is None or oldtip not in self.changelog.nodemap:
380 380 partial, last, lrev = self._readbranchcache()
381 381 else:
382 382 lrev = self.changelog.rev(oldtip)
383 383 partial = self._ubranchcache
384 384
385 385 self._branchtags(partial, lrev)
386 386
387 387 # the branch cache is stored on disk as UTF-8, but in the local
388 388 # charset internally
389 389 for k, v in partial.items():
390 390 self.branchcache[util.tolocal(k)] = v
391 391 self._ubranchcache = partial
392 392 return self.branchcache
393 393
394 394 def _readbranchcache(self):
395 395 partial = {}
396 396 try:
397 397 f = self.opener("branch.cache")
398 398 lines = f.read().split('\n')
399 399 f.close()
400 400 except (IOError, OSError):
401 401 return {}, nullid, nullrev
402 402
403 403 try:
404 404 last, lrev = lines.pop(0).split(" ", 1)
405 405 last, lrev = bin(last), int(lrev)
406 406 if lrev >= len(self) or self[lrev].node() != last:
407 407 # invalidate the cache
408 408 raise ValueError('invalidating branch cache (tip differs)')
409 409 for l in lines:
410 410 if not l: continue
411 411 node, label = l.split(" ", 1)
412 412 partial[label.strip()] = bin(node)
413 413 except (KeyboardInterrupt, util.SignalInterrupt):
414 414 raise
415 415 except Exception, inst:
416 416 if self.ui.debugflag:
417 417 self.ui.warn(str(inst), '\n')
418 418 partial, last, lrev = {}, nullid, nullrev
419 419 return partial, last, lrev
420 420
421 421 def _writebranchcache(self, branches, tip, tiprev):
422 422 try:
423 423 f = self.opener("branch.cache", "w", atomictemp=True)
424 424 f.write("%s %s\n" % (hex(tip), tiprev))
425 425 for label, node in branches.iteritems():
426 426 f.write("%s %s\n" % (hex(node), label))
427 427 f.rename()
428 428 except (IOError, OSError):
429 429 pass
430 430
431 431 def _updatebranchcache(self, partial, start, end):
432 432 for r in xrange(start, end):
433 433 c = self[r]
434 434 b = c.branch()
435 435 partial[b] = c.node()
436 436
437 437 def lookup(self, key):
438 438 if key == '.':
439 439 return self.dirstate.parents()[0]
440 440 elif key == 'null':
441 441 return nullid
442 442 n = self.changelog._match(key)
443 443 if n:
444 444 return n
445 445 if key in self.tags():
446 446 return self.tags()[key]
447 447 if key in self.branchtags():
448 448 return self.branchtags()[key]
449 449 n = self.changelog._partialmatch(key)
450 450 if n:
451 451 return n
452 452 try:
453 453 if len(key) == 20:
454 454 key = hex(key)
455 455 except:
456 456 pass
457 457 raise repo.RepoError(_("unknown revision '%s'") % key)
458 458
459 459 def local(self):
460 460 return True
461 461
462 462 def join(self, f):
463 463 return os.path.join(self.path, f)
464 464
465 465 def wjoin(self, f):
466 466 return os.path.join(self.root, f)
467 467
468 468 def rjoin(self, f):
469 469 return os.path.join(self.root, util.pconvert(f))
470 470
471 471 def file(self, f):
472 472 if f[0] == '/':
473 473 f = f[1:]
474 474 return filelog.filelog(self.sopener, f)
475 475
476 476 def changectx(self, changeid):
477 477 return self[changeid]
478 478
479 479 def parents(self, changeid=None):
480 480 '''get list of changectxs for parents of changeid'''
481 481 return self[changeid].parents()
482 482
483 483 def filectx(self, path, changeid=None, fileid=None):
484 484 """changeid can be a changeset revision, node, or tag.
485 485 fileid can be a file revision or node."""
486 486 return context.filectx(self, path, changeid, fileid)
487 487
488 488 def getcwd(self):
489 489 return self.dirstate.getcwd()
490 490
491 491 def pathto(self, f, cwd=None):
492 492 return self.dirstate.pathto(f, cwd)
493 493
494 494 def wfile(self, f, mode='r'):
495 495 return self.wopener(f, mode)
496 496
497 497 def _link(self, f):
498 498 return os.path.islink(self.wjoin(f))
499 499
500 500 def _filter(self, filter, filename, data):
501 501 if filter not in self.filterpats:
502 502 l = []
503 503 for pat, cmd in self.ui.configitems(filter):
504 504 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 505 fn = None
506 506 params = cmd
507 507 for name, filterfn in self._datafilters.iteritems():
508 508 if cmd.startswith(name):
509 509 fn = filterfn
510 510 params = cmd[len(name):].lstrip()
511 511 break
512 512 if not fn:
513 513 fn = lambda s, c, **kwargs: util.filter(s, c)
514 514 # Wrap old filters not supporting keyword arguments
515 515 if not inspect.getargspec(fn)[2]:
516 516 oldfn = fn
517 517 fn = lambda s, c, **kwargs: oldfn(s, c)
518 518 l.append((mf, fn, params))
519 519 self.filterpats[filter] = l
520 520
521 521 for mf, fn, cmd in self.filterpats[filter]:
522 522 if mf(filename):
523 523 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 524 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 525 break
526 526
527 527 return data
528 528
529 529 def adddatafilter(self, name, filter):
530 530 self._datafilters[name] = filter
531 531
532 532 def wread(self, filename):
533 533 if self._link(filename):
534 534 data = os.readlink(self.wjoin(filename))
535 535 else:
536 536 data = self.wopener(filename, 'r').read()
537 537 return self._filter("encode", filename, data)
538 538
539 539 def wwrite(self, filename, data, flags):
540 540 data = self._filter("decode", filename, data)
541 541 try:
542 542 os.unlink(self.wjoin(filename))
543 543 except OSError:
544 544 pass
545 545 if 'l' in flags:
546 546 self.wopener.symlink(data, filename)
547 547 else:
548 548 self.wopener(filename, 'w').write(data)
549 549 if 'x' in flags:
550 550 util.set_flags(self.wjoin(filename), False, True)
551 551
552 552 def wwritedata(self, filename, data):
553 553 return self._filter("decode", filename, data)
554 554
555 555 def transaction(self):
556 556 if self._transref and self._transref():
557 557 return self._transref().nest()
558 558
559 559 # abort here if the journal already exists
560 560 if os.path.exists(self.sjoin("journal")):
561 561 raise repo.RepoError(_("journal already exists - run hg recover"))
562 562
563 563 # save dirstate for rollback
564 564 try:
565 565 ds = self.opener("dirstate").read()
566 566 except IOError:
567 567 ds = ""
568 568 self.opener("journal.dirstate", "w").write(ds)
569 569 self.opener("journal.branch", "w").write(self.dirstate.branch())
570 570
571 571 renames = [(self.sjoin("journal"), self.sjoin("undo")),
572 572 (self.join("journal.dirstate"), self.join("undo.dirstate")),
573 573 (self.join("journal.branch"), self.join("undo.branch"))]
574 574 tr = transaction.transaction(self.ui.warn, self.sopener,
575 575 self.sjoin("journal"),
576 576 aftertrans(renames),
577 577 self.store.createmode)
578 578 self._transref = weakref.ref(tr)
579 579 return tr
580 580
581 581 def recover(self):
582 582 l = self.lock()
583 583 try:
584 584 if os.path.exists(self.sjoin("journal")):
585 585 self.ui.status(_("rolling back interrupted transaction\n"))
586 586 transaction.rollback(self.sopener, self.sjoin("journal"))
587 587 self.invalidate()
588 588 return True
589 589 else:
590 590 self.ui.warn(_("no interrupted transaction available\n"))
591 591 return False
592 592 finally:
593 593 del l
594 594
595 595 def rollback(self):
596 596 wlock = lock = None
597 597 try:
598 598 wlock = self.wlock()
599 599 lock = self.lock()
600 600 if os.path.exists(self.sjoin("undo")):
601 601 self.ui.status(_("rolling back last transaction\n"))
602 602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 604 try:
605 605 branch = self.opener("undo.branch").read()
606 606 self.dirstate.setbranch(branch)
607 607 except IOError:
608 608 self.ui.warn(_("Named branch could not be reset, "
609 609 "current branch still is: %s\n")
610 610 % util.tolocal(self.dirstate.branch()))
611 611 self.invalidate()
612 612 self.dirstate.invalidate()
613 613 else:
614 614 self.ui.warn(_("no rollback information available\n"))
615 615 finally:
616 616 del lock, wlock
617 617
618 618 def invalidate(self):
619 619 for a in "changelog manifest".split():
620 620 if a in self.__dict__:
621 621 delattr(self, a)
622 622 self.tagscache = None
623 623 self._tagstypecache = None
624 624 self.nodetagscache = None
625 625 self.branchcache = None
626 626 self._ubranchcache = None
627 627 self._branchcachetip = None
628 628
629 629 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
630 630 try:
631 631 l = lock.lock(lockname, 0, releasefn, desc=desc)
632 632 except lock.LockHeld, inst:
633 633 if not wait:
634 634 raise
635 635 self.ui.warn(_("waiting for lock on %s held by %r\n") %
636 636 (desc, inst.locker))
637 637 # default to 600 seconds timeout
638 638 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
639 639 releasefn, desc=desc)
640 640 if acquirefn:
641 641 acquirefn()
642 642 return l
643 643
644 644 def lock(self, wait=True):
645 645 if self._lockref and self._lockref():
646 646 return self._lockref()
647 647
648 648 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
649 649 _('repository %s') % self.origroot)
650 650 self._lockref = weakref.ref(l)
651 651 return l
652 652
653 653 def wlock(self, wait=True):
654 654 if self._wlockref and self._wlockref():
655 655 return self._wlockref()
656 656
657 657 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
658 658 self.dirstate.invalidate, _('working directory of %s') %
659 659 self.origroot)
660 660 self._wlockref = weakref.ref(l)
661 661 return l
662 662
663 663 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
664 664 """
665 665 commit an individual file as part of a larger transaction
666 666 """
667 667
668 668 fn = fctx.path()
669 669 t = fctx.data()
670 670 fl = self.file(fn)
671 671 fp1 = manifest1.get(fn, nullid)
672 672 fp2 = manifest2.get(fn, nullid)
673 673
674 674 meta = {}
675 675 cp = fctx.renamed()
676 676 if cp and cp[0] != fn:
677 677 # Mark the new revision of this file as a copy of another
678 678 # file. This copy data will effectively act as a parent
679 679 # of this new revision. If this is a merge, the first
680 680 # parent will be the nullid (meaning "look up the copy data")
681 681 # and the second one will be the other parent. For example:
682 682 #
683 683 # 0 --- 1 --- 3 rev1 changes file foo
684 684 # \ / rev2 renames foo to bar and changes it
685 685 # \- 2 -/ rev3 should have bar with all changes and
686 686 # should record that bar descends from
687 687 # bar in rev2 and foo in rev1
688 688 #
689 689 # this allows this merge to succeed:
690 690 #
691 691 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
692 692 # \ / merging rev3 and rev4 should use bar@rev2
693 693 # \- 2 --- 4 as the merge base
694 694 #
695 695
696 696 cf = cp[0]
697 697 cr = manifest1.get(cf)
698 698 nfp = fp2
699 699
700 700 if manifest2: # branch merge
701 701 if fp2 == nullid: # copied on remote side
702 702 if fp1 != nullid or cf in manifest2:
703 703 cr = manifest2[cf]
704 704 nfp = fp1
705 705
706 706 # find source in nearest ancestor if we've lost track
707 707 if not cr:
708 708 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
709 709 (fn, cf))
710 710 for a in self['.'].ancestors():
711 711 if cf in a:
712 712 cr = a[cf].filenode()
713 713 break
714 714
715 715 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
716 716 meta["copy"] = cf
717 717 meta["copyrev"] = hex(cr)
718 718 fp1, fp2 = nullid, nfp
719 719 elif fp2 != nullid:
720 720 # is one parent an ancestor of the other?
721 721 fpa = fl.ancestor(fp1, fp2)
722 722 if fpa == fp1:
723 723 fp1, fp2 = fp2, nullid
724 724 elif fpa == fp2:
725 725 fp2 = nullid
726 726
727 727 # is the file unmodified from the parent? report existing entry
728 728 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
729 729 return fp1
730 730
731 731 changelist.append(fn)
732 732 return fl.add(t, meta, tr, linkrev, fp1, fp2)
733 733
734 734 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
735 735 if p1 is None:
736 736 p1, p2 = self.dirstate.parents()
737 737 return self.commit(files=files, text=text, user=user, date=date,
738 738 p1=p1, p2=p2, extra=extra, empty_ok=True)
739 739
740 740 def commit(self, files=None, text="", user=None, date=None,
741 741 match=None, force=False, force_editor=False,
742 742 p1=None, p2=None, extra={}, empty_ok=False):
743 743 wlock = lock = None
744 744 if files:
745 745 files = util.unique(files)
746 746 try:
747 747 wlock = self.wlock()
748 748 lock = self.lock()
749 749 use_dirstate = (p1 is None) # not rawcommit
750 750
751 751 if use_dirstate:
752 752 p1, p2 = self.dirstate.parents()
753 753 update_dirstate = True
754 754
755 755 if (not force and p2 != nullid and
756 756 (match and (match.files() or match.anypats()))):
757 757 raise util.Abort(_('cannot partially commit a merge '
758 758 '(do not specify files or patterns)'))
759 759
760 760 if files:
761 761 modified, removed = [], []
762 762 for f in files:
763 763 s = self.dirstate[f]
764 764 if s in 'nma':
765 765 modified.append(f)
766 766 elif s == 'r':
767 767 removed.append(f)
768 768 else:
769 769 self.ui.warn(_("%s not tracked!\n") % f)
770 770 changes = [modified, [], removed, [], []]
771 771 else:
772 772 changes = self.status(match=match)
773 773 else:
774 774 p1, p2 = p1, p2 or nullid
775 775 update_dirstate = (self.dirstate.parents()[0] == p1)
776 776 changes = [files, [], [], [], []]
777 777
778 778 ms = merge_.mergestate(self)
779 779 for f in changes[0]:
780 780 if f in ms and ms[f] == 'u':
781 781 raise util.Abort(_("unresolved merge conflicts "
782 782 "(see hg resolve)"))
783 783 wctx = context.workingctx(self, (p1, p2), text, user, date,
784 784 extra, changes)
785 785 return self._commitctx(wctx, force, force_editor, empty_ok,
786 786 use_dirstate, update_dirstate)
787 787 finally:
788 788 del lock, wlock
789 789
790 790 def commitctx(self, ctx):
791 791 wlock = lock = None
792 792 try:
793 793 wlock = self.wlock()
794 794 lock = self.lock()
795 795 return self._commitctx(ctx, force=True, force_editor=False,
796 796 empty_ok=True, use_dirstate=False,
797 797 update_dirstate=False)
798 798 finally:
799 799 del lock, wlock
800 800
801 801 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
802 802 use_dirstate=True, update_dirstate=True):
803 803 tr = None
804 804 valid = 0 # don't save the dirstate if this isn't set
805 805 try:
806 806 commit = util.sort(wctx.modified() + wctx.added())
807 807 remove = wctx.removed()
808 808 extra = wctx.extra().copy()
809 809 branchname = extra['branch']
810 810 user = wctx.user()
811 811 text = wctx.description()
812 812
813 813 p1, p2 = [p.node() for p in wctx.parents()]
814 814 c1 = self.changelog.read(p1)
815 815 c2 = self.changelog.read(p2)
816 816 m1 = self.manifest.read(c1[0]).copy()
817 817 m2 = self.manifest.read(c2[0])
818 818
819 819 if use_dirstate:
820 820 oldname = c1[5].get("branch") # stored in UTF-8
821 821 if (not commit and not remove and not force and p2 == nullid
822 822 and branchname == oldname):
823 823 self.ui.status(_("nothing changed\n"))
824 824 return None
825 825
826 826 xp1 = hex(p1)
827 827 if p2 == nullid: xp2 = ''
828 828 else: xp2 = hex(p2)
829 829
830 830 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
831 831
832 832 tr = self.transaction()
833 833 trp = weakref.proxy(tr)
834 834
835 835 # check in files
836 836 new = {}
837 837 changed = []
838 838 linkrev = len(self)
839 839 for f in commit:
840 840 self.ui.note(f + "\n")
841 841 try:
842 842 fctx = wctx.filectx(f)
843 843 newflags = fctx.flags()
844 844 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
845 845 if ((not changed or changed[-1] != f) and
846 846 m2.get(f) != new[f]):
847 847 # mention the file in the changelog if some
848 848 # flag changed, even if there was no content
849 849 # change.
850 850 if m1.flags(f) != newflags:
851 851 changed.append(f)
852 852 m1.set(f, newflags)
853 853 if use_dirstate:
854 854 self.dirstate.normal(f)
855 855
856 856 except (OSError, IOError):
857 857 if use_dirstate:
858 858 self.ui.warn(_("trouble committing %s!\n") % f)
859 859 raise
860 860 else:
861 861 remove.append(f)
862 862
863 863 # update manifest
864 864 m1.update(new)
865 865 removed = []
866 866
867 867 for f in util.sort(remove):
868 868 if f in m1:
869 869 del m1[f]
870 870 removed.append(f)
871 871 elif f in m2:
872 872 removed.append(f)
873 873 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
874 874 (new, removed))
875 875
876 876 # add changeset
877 877 if (not empty_ok and not text) or force_editor:
878 878 edittext = []
879 879 if text:
880 880 edittext.append(text)
881 881 edittext.append("")
882 882 edittext.append("") # Empty line between message and comments.
883 883 edittext.append(_("HG: Enter commit message."
884 884 " Lines beginning with 'HG:' are removed."))
885 885 edittext.append("HG: --")
886 886 edittext.append("HG: user: %s" % user)
887 887 if p2 != nullid:
888 888 edittext.append("HG: branch merge")
889 889 if branchname:
890 890 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
891 891 edittext.extend(["HG: changed %s" % f for f in changed])
892 892 edittext.extend(["HG: removed %s" % f for f in removed])
893 893 if not changed and not remove:
894 894 edittext.append("HG: no files changed")
895 895 edittext.append("")
896 896 # run editor in the repository root
897 897 olddir = os.getcwd()
898 898 os.chdir(self.root)
899 899 text = self.ui.edit("\n".join(edittext), user)
900 900 os.chdir(olddir)
901 901
902 902 lines = [line.rstrip() for line in text.rstrip().splitlines()]
903 903 while lines and not lines[0]:
904 904 del lines[0]
905 905 if not lines and use_dirstate:
906 906 raise util.Abort(_("empty commit message"))
907 907 text = '\n'.join(lines)
908 908
909 909 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
910 910 user, wctx.date(), extra)
911 911 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
912 912 parent2=xp2)
913 913 tr.close()
914 914
915 915 if self.branchcache:
916 916 self.branchtags()
917 917
918 918 if use_dirstate or update_dirstate:
919 919 self.dirstate.setparents(n)
920 920 if use_dirstate:
921 921 for f in removed:
922 922 self.dirstate.forget(f)
923 923 valid = 1 # our dirstate updates are complete
924 924
925 925 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
926 926 return n
927 927 finally:
928 928 if not valid: # don't save our updated dirstate
929 929 self.dirstate.invalidate()
930 930 del tr
931 931
932 932 def walk(self, match, node=None):
933 933 '''
934 934 walk recursively through the directory tree or a given
935 935 changeset, finding all files matched by the match
936 936 function
937 937 '''
938 938 return self[node].walk(match)
939 939
940 940 def status(self, node1='.', node2=None, match=None,
941 941 ignored=False, clean=False, unknown=False):
942 942 """return status of files between two nodes or node and working directory
943 943
944 944 If node1 is None, use the first dirstate parent instead.
945 945 If node2 is None, compare node1 with working directory.
946 946 """
947 947
948 948 def mfmatches(ctx):
949 949 mf = ctx.manifest().copy()
950 950 for fn in mf.keys():
951 951 if not match(fn):
952 952 del mf[fn]
953 953 return mf
954 954
955 955 ctx1 = self[node1]
956 956 ctx2 = self[node2]
957 957 working = ctx2 == self[None]
958 958 parentworking = working and ctx1 == self['.']
959 959 match = match or match_.always(self.root, self.getcwd())
960 960 listignored, listclean, listunknown = ignored, clean, unknown
961 961
962 962 if working: # we need to scan the working dir
963 963 s = self.dirstate.status(match, listignored, listclean, listunknown)
964 964 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
965 965
966 966 # check for any possibly clean files
967 967 if parentworking and cmp:
968 968 fixup = []
969 969 # do a full compare of any files that might have changed
970 970 for f in cmp:
971 971 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
972 972 or ctx1[f].cmp(ctx2[f].data())):
973 973 modified.append(f)
974 974 else:
975 975 fixup.append(f)
976 976
977 977 if listclean:
978 978 clean += fixup
979 979
980 980 # update dirstate for files that are actually clean
981 981 if fixup:
982 982 wlock = None
983 983 try:
984 984 try:
985 985 wlock = self.wlock(False)
986 986 for f in fixup:
987 987 self.dirstate.normal(f)
988 988 except lock.LockException:
989 989 pass
990 990 finally:
991 991 del wlock
992 992
993 993 if not parentworking:
994 994 mf1 = mfmatches(ctx1)
995 995 if working:
996 996 # we are comparing working dir against non-parent
997 997 # generate a pseudo-manifest for the working dir
998 998 mf2 = mfmatches(self['.'])
999 999 for f in cmp + modified + added:
1000 1000 mf2[f] = None
1001 1001 mf2.set(f, ctx2.flags(f))
1002 1002 for f in removed:
1003 1003 if f in mf2:
1004 1004 del mf2[f]
1005 1005 else:
1006 1006 # we are comparing two revisions
1007 1007 deleted, unknown, ignored = [], [], []
1008 1008 mf2 = mfmatches(ctx2)
1009 1009
1010 1010 modified, added, clean = [], [], []
1011 1011 for fn in mf2:
1012 1012 if fn in mf1:
1013 1013 if (mf1.flags(fn) != mf2.flags(fn) or
1014 1014 (mf1[fn] != mf2[fn] and
1015 1015 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1016 1016 modified.append(fn)
1017 1017 elif listclean:
1018 1018 clean.append(fn)
1019 1019 del mf1[fn]
1020 1020 else:
1021 1021 added.append(fn)
1022 1022 removed = mf1.keys()
1023 1023
1024 1024 r = modified, added, removed, deleted, unknown, ignored, clean
1025 1025 [l.sort() for l in r]
1026 1026 return r
1027 1027
1028 1028 def add(self, list):
1029 1029 wlock = self.wlock()
1030 1030 try:
1031 1031 rejected = []
1032 1032 for f in list:
1033 1033 p = self.wjoin(f)
1034 1034 try:
1035 1035 st = os.lstat(p)
1036 1036 except:
1037 1037 self.ui.warn(_("%s does not exist!\n") % f)
1038 1038 rejected.append(f)
1039 1039 continue
1040 1040 if st.st_size > 10000000:
1041 1041 self.ui.warn(_("%s: files over 10MB may cause memory and"
1042 1042 " performance problems\n"
1043 1043 "(use 'hg revert %s' to unadd the file)\n")
1044 1044 % (f, f))
1045 1045 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1046 1046 self.ui.warn(_("%s not added: only files and symlinks "
1047 1047 "supported currently\n") % f)
1048 1048 rejected.append(p)
1049 1049 elif self.dirstate[f] in 'amn':
1050 1050 self.ui.warn(_("%s already tracked!\n") % f)
1051 1051 elif self.dirstate[f] == 'r':
1052 1052 self.dirstate.normallookup(f)
1053 1053 else:
1054 1054 self.dirstate.add(f)
1055 1055 return rejected
1056 1056 finally:
1057 1057 del wlock
1058 1058
1059 1059 def forget(self, list):
1060 1060 wlock = self.wlock()
1061 1061 try:
1062 1062 for f in list:
1063 1063 if self.dirstate[f] != 'a':
1064 1064 self.ui.warn(_("%s not added!\n") % f)
1065 1065 else:
1066 1066 self.dirstate.forget(f)
1067 1067 finally:
1068 1068 del wlock
1069 1069
1070 1070 def remove(self, list, unlink=False):
1071 1071 wlock = None
1072 1072 try:
1073 1073 if unlink:
1074 1074 for f in list:
1075 1075 try:
1076 1076 util.unlink(self.wjoin(f))
1077 1077 except OSError, inst:
1078 1078 if inst.errno != errno.ENOENT:
1079 1079 raise
1080 1080 wlock = self.wlock()
1081 1081 for f in list:
1082 1082 if unlink and os.path.exists(self.wjoin(f)):
1083 1083 self.ui.warn(_("%s still exists!\n") % f)
1084 1084 elif self.dirstate[f] == 'a':
1085 1085 self.dirstate.forget(f)
1086 1086 elif f not in self.dirstate:
1087 1087 self.ui.warn(_("%s not tracked!\n") % f)
1088 1088 else:
1089 1089 self.dirstate.remove(f)
1090 1090 finally:
1091 1091 del wlock
1092 1092
1093 1093 def undelete(self, list):
1094 1094 wlock = None
1095 1095 try:
1096 1096 manifests = [self.manifest.read(self.changelog.read(p)[0])
1097 1097 for p in self.dirstate.parents() if p != nullid]
1098 1098 wlock = self.wlock()
1099 1099 for f in list:
1100 1100 if self.dirstate[f] != 'r':
1101 1101 self.ui.warn(_("%s not removed!\n") % f)
1102 1102 else:
1103 1103 m = f in manifests[0] and manifests[0] or manifests[1]
1104 1104 t = self.file(f).read(m[f])
1105 1105 self.wwrite(f, t, m.flags(f))
1106 1106 self.dirstate.normal(f)
1107 1107 finally:
1108 1108 del wlock
1109 1109
1110 1110 def copy(self, source, dest):
1111 1111 wlock = None
1112 1112 try:
1113 1113 p = self.wjoin(dest)
1114 1114 if not (os.path.exists(p) or os.path.islink(p)):
1115 1115 self.ui.warn(_("%s does not exist!\n") % dest)
1116 1116 elif not (os.path.isfile(p) or os.path.islink(p)):
1117 1117 self.ui.warn(_("copy failed: %s is not a file or a "
1118 1118 "symbolic link\n") % dest)
1119 1119 else:
1120 1120 wlock = self.wlock()
1121 1121 if dest not in self.dirstate:
1122 1122 self.dirstate.add(dest)
1123 1123 self.dirstate.copy(source, dest)
1124 1124 finally:
1125 1125 del wlock
1126 1126
1127 1127 def heads(self, start=None):
1128 1128 heads = self.changelog.heads(start)
1129 1129 # sort the output in rev descending order
1130 1130 heads = [(-self.changelog.rev(h), h) for h in heads]
1131 1131 return [n for (r, n) in util.sort(heads)]
1132 1132
1133 1133 def branchheads(self, branch=None, start=None):
1134 1134 if branch is None:
1135 1135 branch = self[None].branch()
1136 1136 branches = self.branchtags()
1137 1137 if branch not in branches:
1138 1138 return []
1139 1139 # The basic algorithm is this:
1140 1140 #
1141 1141 # Start from the branch tip since there are no later revisions that can
1142 1142 # possibly be in this branch, and the tip is a guaranteed head.
1143 1143 #
1144 1144 # Remember the tip's parents as the first ancestors, since these by
1145 1145 # definition are not heads.
1146 1146 #
1147 1147 # Step backwards from the brach tip through all the revisions. We are
1148 1148 # guaranteed by the rules of Mercurial that we will now be visiting the
1149 1149 # nodes in reverse topological order (children before parents).
1150 1150 #
1151 1151 # If a revision is one of the ancestors of a head then we can toss it
1152 1152 # out of the ancestors set (we've already found it and won't be
1153 1153 # visiting it again) and put its parents in the ancestors set.
1154 1154 #
1155 1155 # Otherwise, if a revision is in the branch it's another head, since it
1156 1156 # wasn't in the ancestor list of an existing head. So add it to the
1157 1157 # head list, and add its parents to the ancestor list.
1158 1158 #
1159 1159 # If it is not in the branch ignore it.
1160 1160 #
1161 1161 # Once we have a list of heads, use nodesbetween to filter out all the
1162 1162 # heads that cannot be reached from startrev. There may be a more
1163 1163 # efficient way to do this as part of the previous algorithm.
1164 1164
1165 1165 set = util.set
1166 1166 heads = [self.changelog.rev(branches[branch])]
1167 1167 # Don't care if ancestors contains nullrev or not.
1168 1168 ancestors = set(self.changelog.parentrevs(heads[0]))
1169 1169 for rev in xrange(heads[0] - 1, nullrev, -1):
1170 1170 if rev in ancestors:
1171 1171 ancestors.update(self.changelog.parentrevs(rev))
1172 1172 ancestors.remove(rev)
1173 1173 elif self[rev].branch() == branch:
1174 1174 heads.append(rev)
1175 1175 ancestors.update(self.changelog.parentrevs(rev))
1176 1176 heads = [self.changelog.node(rev) for rev in heads]
1177 1177 if start is not None:
1178 1178 heads = self.changelog.nodesbetween([start], heads)[2]
1179 1179 return heads
1180 1180
1181 1181 def branches(self, nodes):
1182 1182 if not nodes:
1183 1183 nodes = [self.changelog.tip()]
1184 1184 b = []
1185 1185 for n in nodes:
1186 1186 t = n
1187 1187 while 1:
1188 1188 p = self.changelog.parents(n)
1189 1189 if p[1] != nullid or p[0] == nullid:
1190 1190 b.append((t, n, p[0], p[1]))
1191 1191 break
1192 1192 n = p[0]
1193 1193 return b
1194 1194
1195 1195 def between(self, pairs):
1196 1196 r = []
1197 1197
1198 1198 for top, bottom in pairs:
1199 1199 n, l, i = top, [], 0
1200 1200 f = 1
1201 1201
1202 1202 while n != bottom:
1203 1203 p = self.changelog.parents(n)[0]
1204 1204 if i == f:
1205 1205 l.append(n)
1206 1206 f = f * 2
1207 1207 n = p
1208 1208 i += 1
1209 1209
1210 1210 r.append(l)
1211 1211
1212 1212 return r
1213 1213
1214 1214 def findincoming(self, remote, base=None, heads=None, force=False):
1215 1215 """Return list of roots of the subsets of missing nodes from remote
1216 1216
1217 1217 If base dict is specified, assume that these nodes and their parents
1218 1218 exist on the remote side and that no child of a node of base exists
1219 1219 in both remote and self.
1220 1220 Furthermore base will be updated to include the nodes that exists
1221 1221 in self and remote but no children exists in self and remote.
1222 1222 If a list of heads is specified, return only nodes which are heads
1223 1223 or ancestors of these heads.
1224 1224
1225 1225 All the ancestors of base are in self and in remote.
1226 1226 All the descendants of the list returned are missing in self.
1227 1227 (and so we know that the rest of the nodes are missing in remote, see
1228 1228 outgoing)
1229 1229 """
1230 1230 m = self.changelog.nodemap
1231 1231 search = []
1232 1232 fetch = {}
1233 1233 seen = {}
1234 1234 seenbranch = {}
1235 1235 if base == None:
1236 1236 base = {}
1237 1237
1238 1238 if not heads:
1239 1239 heads = remote.heads()
1240 1240
1241 1241 if self.changelog.tip() == nullid:
1242 1242 base[nullid] = 1
1243 1243 if heads != [nullid]:
1244 1244 return [nullid]
1245 1245 return []
1246 1246
1247 1247 # assume we're closer to the tip than the root
1248 1248 # and start by examining the heads
1249 1249 self.ui.status(_("searching for changes\n"))
1250 1250
1251 1251 unknown = []
1252 1252 for h in heads:
1253 1253 if h not in m:
1254 1254 unknown.append(h)
1255 1255 else:
1256 1256 base[h] = 1
1257 1257
1258 1258 if not unknown:
1259 1259 return []
1260 1260
1261 1261 req = dict.fromkeys(unknown)
1262 1262 reqcnt = 0
1263 1263
1264 1264 # search through remote branches
1265 1265 # a 'branch' here is a linear segment of history, with four parts:
1266 1266 # head, root, first parent, second parent
1267 1267 # (a branch always has two parents (or none) by definition)
1268 1268 unknown = remote.branches(unknown)
1269 1269 while unknown:
1270 1270 r = []
1271 1271 while unknown:
1272 1272 n = unknown.pop(0)
1273 1273 if n[0] in seen:
1274 1274 continue
1275 1275
1276 1276 self.ui.debug(_("examining %s:%s\n")
1277 1277 % (short(n[0]), short(n[1])))
1278 1278 if n[0] == nullid: # found the end of the branch
1279 1279 pass
1280 1280 elif n in seenbranch:
1281 1281 self.ui.debug(_("branch already found\n"))
1282 1282 continue
1283 1283 elif n[1] and n[1] in m: # do we know the base?
1284 1284 self.ui.debug(_("found incomplete branch %s:%s\n")
1285 1285 % (short(n[0]), short(n[1])))
1286 1286 search.append(n) # schedule branch range for scanning
1287 1287 seenbranch[n] = 1
1288 1288 else:
1289 1289 if n[1] not in seen and n[1] not in fetch:
1290 1290 if n[2] in m and n[3] in m:
1291 1291 self.ui.debug(_("found new changeset %s\n") %
1292 1292 short(n[1]))
1293 1293 fetch[n[1]] = 1 # earliest unknown
1294 1294 for p in n[2:4]:
1295 1295 if p in m:
1296 1296 base[p] = 1 # latest known
1297 1297
1298 1298 for p in n[2:4]:
1299 1299 if p not in req and p not in m:
1300 1300 r.append(p)
1301 1301 req[p] = 1
1302 1302 seen[n[0]] = 1
1303 1303
1304 1304 if r:
1305 1305 reqcnt += 1
1306 1306 self.ui.debug(_("request %d: %s\n") %
1307 1307 (reqcnt, " ".join(map(short, r))))
1308 1308 for p in xrange(0, len(r), 10):
1309 1309 for b in remote.branches(r[p:p+10]):
1310 1310 self.ui.debug(_("received %s:%s\n") %
1311 1311 (short(b[0]), short(b[1])))
1312 1312 unknown.append(b)
1313 1313
1314 1314 # do binary search on the branches we found
1315 1315 while search:
1316 1316 n = search.pop(0)
1317 1317 reqcnt += 1
1318 1318 l = remote.between([(n[0], n[1])])[0]
1319 1319 l.append(n[1])
1320 1320 p = n[0]
1321 1321 f = 1
1322 1322 for i in l:
1323 1323 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1324 1324 if i in m:
1325 1325 if f <= 2:
1326 1326 self.ui.debug(_("found new branch changeset %s\n") %
1327 1327 short(p))
1328 1328 fetch[p] = 1
1329 1329 base[i] = 1
1330 1330 else:
1331 1331 self.ui.debug(_("narrowed branch search to %s:%s\n")
1332 1332 % (short(p), short(i)))
1333 1333 search.append((p, i))
1334 1334 break
1335 1335 p, f = i, f * 2
1336 1336
1337 1337 # sanity check our fetch list
1338 1338 for f in fetch.keys():
1339 1339 if f in m:
1340 1340 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1341 1341
1342 1342 if base.keys() == [nullid]:
1343 1343 if force:
1344 1344 self.ui.warn(_("warning: repository is unrelated\n"))
1345 1345 else:
1346 1346 raise util.Abort(_("repository is unrelated"))
1347 1347
1348 1348 self.ui.debug(_("found new changesets starting at ") +
1349 1349 " ".join([short(f) for f in fetch]) + "\n")
1350 1350
1351 1351 self.ui.debug(_("%d total queries\n") % reqcnt)
1352 1352
1353 1353 return fetch.keys()
1354 1354
1355 1355 def findoutgoing(self, remote, base=None, heads=None, force=False):
1356 1356 """Return list of nodes that are roots of subsets not in remote
1357 1357
1358 1358 If base dict is specified, assume that these nodes and their parents
1359 1359 exist on the remote side.
1360 1360 If a list of heads is specified, return only nodes which are heads
1361 1361 or ancestors of these heads, and return a second element which
1362 1362 contains all remote heads which get new children.
1363 1363 """
1364 1364 if base == None:
1365 1365 base = {}
1366 1366 self.findincoming(remote, base, heads, force=force)
1367 1367
1368 1368 self.ui.debug(_("common changesets up to ")
1369 1369 + " ".join(map(short, base.keys())) + "\n")
1370 1370
1371 1371 remain = dict.fromkeys(self.changelog.nodemap)
1372 1372
1373 1373 # prune everything remote has from the tree
1374 1374 del remain[nullid]
1375 1375 remove = base.keys()
1376 1376 while remove:
1377 1377 n = remove.pop(0)
1378 1378 if n in remain:
1379 1379 del remain[n]
1380 1380 for p in self.changelog.parents(n):
1381 1381 remove.append(p)
1382 1382
1383 1383 # find every node whose parents have been pruned
1384 1384 subset = []
1385 1385 # find every remote head that will get new children
1386 1386 updated_heads = {}
1387 1387 for n in remain:
1388 1388 p1, p2 = self.changelog.parents(n)
1389 1389 if p1 not in remain and p2 not in remain:
1390 1390 subset.append(n)
1391 1391 if heads:
1392 1392 if p1 in heads:
1393 1393 updated_heads[p1] = True
1394 1394 if p2 in heads:
1395 1395 updated_heads[p2] = True
1396 1396
1397 1397 # this is the set of all roots we have to push
1398 1398 if heads:
1399 1399 return subset, updated_heads.keys()
1400 1400 else:
1401 1401 return subset
1402 1402
1403 1403 def pull(self, remote, heads=None, force=False):
1404 1404 lock = self.lock()
1405 1405 try:
1406 1406 fetch = self.findincoming(remote, heads=heads, force=force)
1407 1407 if fetch == [nullid]:
1408 1408 self.ui.status(_("requesting all changes\n"))
1409 1409
1410 1410 if not fetch:
1411 1411 self.ui.status(_("no changes found\n"))
1412 1412 return 0
1413 1413
1414 1414 if heads is None:
1415 1415 cg = remote.changegroup(fetch, 'pull')
1416 1416 else:
1417 1417 if 'changegroupsubset' not in remote.capabilities:
1418 1418 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1419 1419 cg = remote.changegroupsubset(fetch, heads, 'pull')
1420 1420 return self.addchangegroup(cg, 'pull', remote.url())
1421 1421 finally:
1422 1422 del lock
1423 1423
1424 1424 def push(self, remote, force=False, revs=None):
1425 1425 # there are two ways to push to remote repo:
1426 1426 #
1427 1427 # addchangegroup assumes local user can lock remote
1428 1428 # repo (local filesystem, old ssh servers).
1429 1429 #
1430 1430 # unbundle assumes local user cannot lock remote repo (new ssh
1431 1431 # servers, http servers).
1432 1432
1433 1433 if remote.capable('unbundle'):
1434 1434 return self.push_unbundle(remote, force, revs)
1435 1435 return self.push_addchangegroup(remote, force, revs)
1436 1436
1437 1437 def prepush(self, remote, force, revs):
1438 1438 base = {}
1439 1439 remote_heads = remote.heads()
1440 1440 inc = self.findincoming(remote, base, remote_heads, force=force)
1441 1441
1442 1442 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1443 1443 if revs is not None:
1444 1444 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1445 1445 else:
1446 1446 bases, heads = update, self.changelog.heads()
1447 1447
1448 1448 if not bases:
1449 1449 self.ui.status(_("no changes found\n"))
1450 1450 return None, 1
1451 1451 elif not force:
1452 1452 # check if we're creating new remote heads
1453 1453 # to be a remote head after push, node must be either
1454 1454 # - unknown locally
1455 1455 # - a local outgoing head descended from update
1456 1456 # - a remote head that's known locally and not
1457 1457 # ancestral to an outgoing head
1458 1458
1459 1459 warn = 0
1460 1460
1461 1461 if remote_heads == [nullid]:
1462 1462 warn = 0
1463 1463 elif not revs and len(heads) > len(remote_heads):
1464 1464 warn = 1
1465 1465 else:
1466 1466 newheads = list(heads)
1467 1467 for r in remote_heads:
1468 1468 if r in self.changelog.nodemap:
1469 1469 desc = self.changelog.heads(r, heads)
1470 1470 l = [h for h in heads if h in desc]
1471 1471 if not l:
1472 1472 newheads.append(r)
1473 1473 else:
1474 1474 newheads.append(r)
1475 1475 if len(newheads) > len(remote_heads):
1476 1476 warn = 1
1477 1477
1478 1478 if warn:
1479 1479 self.ui.warn(_("abort: push creates new remote heads!\n"))
1480 1480 self.ui.status(_("(did you forget to merge?"
1481 1481 " use push -f to force)\n"))
1482 1482 return None, 0
1483 1483 elif inc:
1484 1484 self.ui.warn(_("note: unsynced remote changes!\n"))
1485 1485
1486 1486
1487 1487 if revs is None:
1488 1488 cg = self.changegroup(update, 'push')
1489 1489 else:
1490 1490 cg = self.changegroupsubset(update, revs, 'push')
1491 1491 return cg, remote_heads
1492 1492
1493 1493 def push_addchangegroup(self, remote, force, revs):
1494 1494 lock = remote.lock()
1495 1495 try:
1496 1496 ret = self.prepush(remote, force, revs)
1497 1497 if ret[0] is not None:
1498 1498 cg, remote_heads = ret
1499 1499 return remote.addchangegroup(cg, 'push', self.url())
1500 1500 return ret[1]
1501 1501 finally:
1502 1502 del lock
1503 1503
1504 1504 def push_unbundle(self, remote, force, revs):
1505 1505 # local repo finds heads on server, finds out what revs it
1506 1506 # must push. once revs transferred, if server finds it has
1507 1507 # different heads (someone else won commit/push race), server
1508 1508 # aborts.
1509 1509
1510 1510 ret = self.prepush(remote, force, revs)
1511 1511 if ret[0] is not None:
1512 1512 cg, remote_heads = ret
1513 1513 if force: remote_heads = ['force']
1514 1514 return remote.unbundle(cg, remote_heads, 'push')
1515 1515 return ret[1]
1516 1516
1517 1517 def changegroupinfo(self, nodes, source):
1518 1518 if self.ui.verbose or source == 'bundle':
1519 1519 self.ui.status(_("%d changesets found\n") % len(nodes))
1520 1520 if self.ui.debugflag:
1521 1521 self.ui.debug(_("List of changesets:\n"))
1522 1522 for node in nodes:
1523 1523 self.ui.debug("%s\n" % hex(node))
1524 1524
1525 1525 def changegroupsubset(self, bases, heads, source, extranodes=None):
1526 1526 """This function generates a changegroup consisting of all the nodes
1527 1527 that are descendents of any of the bases, and ancestors of any of
1528 1528 the heads.
1529 1529
1530 1530 It is fairly complex as determining which filenodes and which
1531 1531 manifest nodes need to be included for the changeset to be complete
1532 1532 is non-trivial.
1533 1533
1534 1534 Another wrinkle is doing the reverse, figuring out which changeset in
1535 1535 the changegroup a particular filenode or manifestnode belongs to.
1536 1536
1537 1537 The caller can specify some nodes that must be included in the
1538 1538 changegroup using the extranodes argument. It should be a dict
1539 1539 where the keys are the filenames (or 1 for the manifest), and the
1540 1540 values are lists of (node, linknode) tuples, where node is a wanted
1541 1541 node and linknode is the changelog node that should be transmitted as
1542 1542 the linkrev.
1543 1543 """
1544 1544
1545 1545 self.hook('preoutgoing', throw=True, source=source)
1546 1546
1547 1547 # Set up some initial variables
1548 1548 # Make it easy to refer to self.changelog
1549 1549 cl = self.changelog
1550 1550 # msng is short for missing - compute the list of changesets in this
1551 1551 # changegroup.
1552 1552 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1553 1553 self.changegroupinfo(msng_cl_lst, source)
1554 1554 # Some bases may turn out to be superfluous, and some heads may be
1555 1555 # too. nodesbetween will return the minimal set of bases and heads
1556 1556 # necessary to re-create the changegroup.
1557 1557
1558 1558 # Known heads are the list of heads that it is assumed the recipient
1559 1559 # of this changegroup will know about.
1560 1560 knownheads = {}
1561 1561 # We assume that all parents of bases are known heads.
1562 1562 for n in bases:
1563 1563 for p in cl.parents(n):
1564 1564 if p != nullid:
1565 1565 knownheads[p] = 1
1566 1566 knownheads = knownheads.keys()
1567 1567 if knownheads:
1568 1568 # Now that we know what heads are known, we can compute which
1569 1569 # changesets are known. The recipient must know about all
1570 1570 # changesets required to reach the known heads from the null
1571 1571 # changeset.
1572 1572 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1573 1573 junk = None
1574 1574 # Transform the list into an ersatz set.
1575 1575 has_cl_set = dict.fromkeys(has_cl_set)
1576 1576 else:
1577 1577 # If there were no known heads, the recipient cannot be assumed to
1578 1578 # know about any changesets.
1579 1579 has_cl_set = {}
1580 1580
1581 1581 # Make it easy to refer to self.manifest
1582 1582 mnfst = self.manifest
1583 1583 # We don't know which manifests are missing yet
1584 1584 msng_mnfst_set = {}
1585 1585 # Nor do we know which filenodes are missing.
1586 1586 msng_filenode_set = {}
1587 1587
1588 1588 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1589 1589 junk = None
1590 1590
1591 1591 # A changeset always belongs to itself, so the changenode lookup
1592 1592 # function for a changenode is identity.
1593 1593 def identity(x):
1594 1594 return x
1595 1595
1596 1596 # A function generating function. Sets up an environment for the
1597 1597 # inner function.
1598 1598 def cmp_by_rev_func(revlog):
1599 1599 # Compare two nodes by their revision number in the environment's
1600 1600 # revision history. Since the revision number both represents the
1601 1601 # most efficient order to read the nodes in, and represents a
1602 1602 # topological sorting of the nodes, this function is often useful.
1603 1603 def cmp_by_rev(a, b):
1604 1604 return cmp(revlog.rev(a), revlog.rev(b))
1605 1605 return cmp_by_rev
1606 1606
1607 1607 # If we determine that a particular file or manifest node must be a
1608 1608 # node that the recipient of the changegroup will already have, we can
1609 1609 # also assume the recipient will have all the parents. This function
1610 1610 # prunes them from the set of missing nodes.
1611 1611 def prune_parents(revlog, hasset, msngset):
1612 1612 haslst = hasset.keys()
1613 1613 haslst.sort(cmp_by_rev_func(revlog))
1614 1614 for node in haslst:
1615 1615 parentlst = [p for p in revlog.parents(node) if p != nullid]
1616 1616 while parentlst:
1617 1617 n = parentlst.pop()
1618 1618 if n not in hasset:
1619 1619 hasset[n] = 1
1620 1620 p = [p for p in revlog.parents(n) if p != nullid]
1621 1621 parentlst.extend(p)
1622 1622 for n in hasset:
1623 1623 msngset.pop(n, None)
1624 1624
1625 1625 # This is a function generating function used to set up an environment
1626 1626 # for the inner function to execute in.
1627 1627 def manifest_and_file_collector(changedfileset):
1628 1628 # This is an information gathering function that gathers
1629 1629 # information from each changeset node that goes out as part of
1630 1630 # the changegroup. The information gathered is a list of which
1631 1631 # manifest nodes are potentially required (the recipient may
1632 1632 # already have them) and total list of all files which were
1633 1633 # changed in any changeset in the changegroup.
1634 1634 #
1635 1635 # We also remember the first changenode we saw any manifest
1636 1636 # referenced by so we can later determine which changenode 'owns'
1637 1637 # the manifest.
1638 1638 def collect_manifests_and_files(clnode):
1639 1639 c = cl.read(clnode)
1640 1640 for f in c[3]:
1641 1641 # This is to make sure we only have one instance of each
1642 1642 # filename string for each filename.
1643 1643 changedfileset.setdefault(f, f)
1644 1644 msng_mnfst_set.setdefault(c[0], clnode)
1645 1645 return collect_manifests_and_files
1646 1646
1647 1647 # Figure out which manifest nodes (of the ones we think might be part
1648 1648 # of the changegroup) the recipient must know about and remove them
1649 1649 # from the changegroup.
1650 1650 def prune_manifests():
1651 1651 has_mnfst_set = {}
1652 1652 for n in msng_mnfst_set:
1653 1653 # If a 'missing' manifest thinks it belongs to a changenode
1654 1654 # the recipient is assumed to have, obviously the recipient
1655 1655 # must have that manifest.
1656 1656 linknode = cl.node(mnfst.linkrev(n))
1657 1657 if linknode in has_cl_set:
1658 1658 has_mnfst_set[n] = 1
1659 1659 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1660 1660
1661 1661 # Use the information collected in collect_manifests_and_files to say
1662 1662 # which changenode any manifestnode belongs to.
1663 1663 def lookup_manifest_link(mnfstnode):
1664 1664 return msng_mnfst_set[mnfstnode]
1665 1665
1666 1666 # A function generating function that sets up the initial environment
1667 1667 # the inner function.
1668 1668 def filenode_collector(changedfiles):
1669 1669 next_rev = [0]
1670 1670 # This gathers information from each manifestnode included in the
1671 1671 # changegroup about which filenodes the manifest node references
1672 1672 # so we can include those in the changegroup too.
1673 1673 #
1674 1674 # It also remembers which changenode each filenode belongs to. It
1675 1675 # does this by assuming the a filenode belongs to the changenode
1676 1676 # the first manifest that references it belongs to.
1677 1677 def collect_msng_filenodes(mnfstnode):
1678 1678 r = mnfst.rev(mnfstnode)
1679 1679 if r == next_rev[0]:
1680 1680 # If the last rev we looked at was the one just previous,
1681 1681 # we only need to see a diff.
1682 1682 deltamf = mnfst.readdelta(mnfstnode)
1683 1683 # For each line in the delta
1684 1684 for f, fnode in deltamf.items():
1685 1685 f = changedfiles.get(f, None)
1686 1686 # And if the file is in the list of files we care
1687 1687 # about.
1688 1688 if f is not None:
1689 1689 # Get the changenode this manifest belongs to
1690 1690 clnode = msng_mnfst_set[mnfstnode]
1691 1691 # Create the set of filenodes for the file if
1692 1692 # there isn't one already.
1693 1693 ndset = msng_filenode_set.setdefault(f, {})
1694 1694 # And set the filenode's changelog node to the
1695 1695 # manifest's if it hasn't been set already.
1696 1696 ndset.setdefault(fnode, clnode)
1697 1697 else:
1698 1698 # Otherwise we need a full manifest.
1699 1699 m = mnfst.read(mnfstnode)
1700 1700 # For every file in we care about.
1701 1701 for f in changedfiles:
1702 1702 fnode = m.get(f, None)
1703 1703 # If it's in the manifest
1704 1704 if fnode is not None:
1705 1705 # See comments above.
1706 1706 clnode = msng_mnfst_set[mnfstnode]
1707 1707 ndset = msng_filenode_set.setdefault(f, {})
1708 1708 ndset.setdefault(fnode, clnode)
1709 1709 # Remember the revision we hope to see next.
1710 1710 next_rev[0] = r + 1
1711 1711 return collect_msng_filenodes
1712 1712
1713 1713 # We have a list of filenodes we think we need for a file, lets remove
1714 1714 # all those we now the recipient must have.
1715 1715 def prune_filenodes(f, filerevlog):
1716 1716 msngset = msng_filenode_set[f]
1717 1717 hasset = {}
1718 1718 # If a 'missing' filenode thinks it belongs to a changenode we
1719 1719 # assume the recipient must have, then the recipient must have
1720 1720 # that filenode.
1721 1721 for n in msngset:
1722 1722 clnode = cl.node(filerevlog.linkrev(n))
1723 1723 if clnode in has_cl_set:
1724 1724 hasset[n] = 1
1725 1725 prune_parents(filerevlog, hasset, msngset)
1726 1726
1727 1727 # A function generator function that sets up the a context for the
1728 1728 # inner function.
1729 1729 def lookup_filenode_link_func(fname):
1730 1730 msngset = msng_filenode_set[fname]
1731 1731 # Lookup the changenode the filenode belongs to.
1732 1732 def lookup_filenode_link(fnode):
1733 1733 return msngset[fnode]
1734 1734 return lookup_filenode_link
1735 1735
1736 1736 # Add the nodes that were explicitly requested.
1737 1737 def add_extra_nodes(name, nodes):
1738 1738 if not extranodes or name not in extranodes:
1739 1739 return
1740 1740
1741 1741 for node, linknode in extranodes[name]:
1742 1742 if node not in nodes:
1743 1743 nodes[node] = linknode
1744 1744
1745 1745 # Now that we have all theses utility functions to help out and
1746 1746 # logically divide up the task, generate the group.
1747 1747 def gengroup():
1748 1748 # The set of changed files starts empty.
1749 1749 changedfiles = {}
1750 1750 # Create a changenode group generator that will call our functions
1751 1751 # back to lookup the owning changenode and collect information.
1752 1752 group = cl.group(msng_cl_lst, identity,
1753 1753 manifest_and_file_collector(changedfiles))
1754 1754 for chnk in group:
1755 1755 yield chnk
1756 1756
1757 1757 # The list of manifests has been collected by the generator
1758 1758 # calling our functions back.
1759 1759 prune_manifests()
1760 1760 add_extra_nodes(1, msng_mnfst_set)
1761 1761 msng_mnfst_lst = msng_mnfst_set.keys()
1762 1762 # Sort the manifestnodes by revision number.
1763 1763 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1764 1764 # Create a generator for the manifestnodes that calls our lookup
1765 1765 # and data collection functions back.
1766 1766 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1767 1767 filenode_collector(changedfiles))
1768 1768 for chnk in group:
1769 1769 yield chnk
1770 1770
1771 1771 # These are no longer needed, dereference and toss the memory for
1772 1772 # them.
1773 1773 msng_mnfst_lst = None
1774 1774 msng_mnfst_set.clear()
1775 1775
1776 1776 if extranodes:
1777 1777 for fname in extranodes:
1778 1778 if isinstance(fname, int):
1779 1779 continue
1780 1780 add_extra_nodes(fname,
1781 1781 msng_filenode_set.setdefault(fname, {}))
1782 1782 changedfiles[fname] = 1
1783 1783 # Go through all our files in order sorted by name.
1784 1784 for fname in util.sort(changedfiles):
1785 1785 filerevlog = self.file(fname)
1786 1786 if not len(filerevlog):
1787 1787 raise util.Abort(_("empty or missing revlog for %s") % fname)
1788 1788 # Toss out the filenodes that the recipient isn't really
1789 1789 # missing.
1790 1790 if fname in msng_filenode_set:
1791 1791 prune_filenodes(fname, filerevlog)
1792 1792 msng_filenode_lst = msng_filenode_set[fname].keys()
1793 1793 else:
1794 1794 msng_filenode_lst = []
1795 1795 # If any filenodes are left, generate the group for them,
1796 1796 # otherwise don't bother.
1797 1797 if len(msng_filenode_lst) > 0:
1798 1798 yield changegroup.chunkheader(len(fname))
1799 1799 yield fname
1800 1800 # Sort the filenodes by their revision #
1801 1801 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1802 1802 # Create a group generator and only pass in a changenode
1803 1803 # lookup function as we need to collect no information
1804 1804 # from filenodes.
1805 1805 group = filerevlog.group(msng_filenode_lst,
1806 1806 lookup_filenode_link_func(fname))
1807 1807 for chnk in group:
1808 1808 yield chnk
1809 1809 if fname in msng_filenode_set:
1810 1810 # Don't need this anymore, toss it to free memory.
1811 1811 del msng_filenode_set[fname]
1812 1812 # Signal that no more groups are left.
1813 1813 yield changegroup.closechunk()
1814 1814
1815 1815 if msng_cl_lst:
1816 1816 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1817 1817
1818 1818 return util.chunkbuffer(gengroup())
1819 1819
1820 1820 def changegroup(self, basenodes, source):
1821 1821 """Generate a changegroup of all nodes that we have that a recipient
1822 1822 doesn't.
1823 1823
1824 1824 This is much easier than the previous function as we can assume that
1825 1825 the recipient has any changenode we aren't sending them."""
1826 1826
1827 1827 self.hook('preoutgoing', throw=True, source=source)
1828 1828
1829 1829 cl = self.changelog
1830 1830 nodes = cl.nodesbetween(basenodes, None)[0]
1831 1831 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1832 1832 self.changegroupinfo(nodes, source)
1833 1833
1834 1834 def identity(x):
1835 1835 return x
1836 1836
1837 1837 def gennodelst(log):
1838 1838 for r in log:
1839 1839 n = log.node(r)
1840 1840 if log.linkrev(n) in revset:
1841 1841 yield n
1842 1842
1843 1843 def changed_file_collector(changedfileset):
1844 1844 def collect_changed_files(clnode):
1845 1845 c = cl.read(clnode)
1846 1846 for fname in c[3]:
1847 1847 changedfileset[fname] = 1
1848 1848 return collect_changed_files
1849 1849
1850 1850 def lookuprevlink_func(revlog):
1851 1851 def lookuprevlink(n):
1852 1852 return cl.node(revlog.linkrev(n))
1853 1853 return lookuprevlink
1854 1854
1855 1855 def gengroup():
1856 1856 # construct a list of all changed files
1857 1857 changedfiles = {}
1858 1858
1859 1859 for chnk in cl.group(nodes, identity,
1860 1860 changed_file_collector(changedfiles)):
1861 1861 yield chnk
1862 1862
1863 1863 mnfst = self.manifest
1864 1864 nodeiter = gennodelst(mnfst)
1865 1865 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1866 1866 yield chnk
1867 1867
1868 1868 for fname in util.sort(changedfiles):
1869 1869 filerevlog = self.file(fname)
1870 1870 if not len(filerevlog):
1871 1871 raise util.Abort(_("empty or missing revlog for %s") % fname)
1872 1872 nodeiter = gennodelst(filerevlog)
1873 1873 nodeiter = list(nodeiter)
1874 1874 if nodeiter:
1875 1875 yield changegroup.chunkheader(len(fname))
1876 1876 yield fname
1877 1877 lookup = lookuprevlink_func(filerevlog)
1878 1878 for chnk in filerevlog.group(nodeiter, lookup):
1879 1879 yield chnk
1880 1880
1881 1881 yield changegroup.closechunk()
1882 1882
1883 1883 if nodes:
1884 1884 self.hook('outgoing', node=hex(nodes[0]), source=source)
1885 1885
1886 1886 return util.chunkbuffer(gengroup())
1887 1887
1888 1888 def addchangegroup(self, source, srctype, url, emptyok=False):
1889 1889 """add changegroup to repo.
1890 1890
1891 1891 return values:
1892 1892 - nothing changed or no source: 0
1893 1893 - more heads than before: 1+added heads (2..n)
1894 1894 - less heads than before: -1-removed heads (-2..-n)
1895 1895 - number of heads stays the same: 1
1896 1896 """
1897 1897 def csmap(x):
1898 1898 self.ui.debug(_("add changeset %s\n") % short(x))
1899 1899 return len(cl)
1900 1900
1901 1901 def revmap(x):
1902 1902 return cl.rev(x)
1903 1903
1904 1904 if not source:
1905 1905 return 0
1906 1906
1907 1907 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1908 1908
1909 1909 changesets = files = revisions = 0
1910 1910
1911 1911 # write changelog data to temp files so concurrent readers will not see
1912 1912 # inconsistent view
1913 1913 cl = self.changelog
1914 1914 cl.delayupdate()
1915 1915 oldheads = len(cl.heads())
1916 1916
1917 1917 tr = self.transaction()
1918 1918 try:
1919 1919 trp = weakref.proxy(tr)
1920 1920 # pull off the changeset group
1921 1921 self.ui.status(_("adding changesets\n"))
1922 1922 cor = len(cl) - 1
1923 1923 chunkiter = changegroup.chunkiter(source)
1924 1924 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1925 1925 raise util.Abort(_("received changelog group is empty"))
1926 1926 cnr = len(cl) - 1
1927 1927 changesets = cnr - cor
1928 1928
1929 1929 # pull off the manifest group
1930 1930 self.ui.status(_("adding manifests\n"))
1931 1931 chunkiter = changegroup.chunkiter(source)
1932 1932 # no need to check for empty manifest group here:
1933 1933 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1934 1934 # no new manifest will be created and the manifest group will
1935 1935 # be empty during the pull
1936 1936 self.manifest.addgroup(chunkiter, revmap, trp)
1937 1937
1938 1938 # process the files
1939 1939 self.ui.status(_("adding file changes\n"))
1940 1940 while 1:
1941 1941 f = changegroup.getchunk(source)
1942 1942 if not f:
1943 1943 break
1944 1944 self.ui.debug(_("adding %s revisions\n") % f)
1945 1945 fl = self.file(f)
1946 1946 o = len(fl)
1947 1947 chunkiter = changegroup.chunkiter(source)
1948 1948 if fl.addgroup(chunkiter, revmap, trp) is None:
1949 1949 raise util.Abort(_("received file revlog group is empty"))
1950 1950 revisions += len(fl) - o
1951 1951 files += 1
1952 1952
1953 1953 # make changelog see real files again
1954 1954 cl.finalize(trp)
1955 1955
1956 1956 newheads = len(self.changelog.heads())
1957 1957 heads = ""
1958 1958 if oldheads and newheads != oldheads:
1959 1959 heads = _(" (%+d heads)") % (newheads - oldheads)
1960 1960
1961 1961 self.ui.status(_("added %d changesets"
1962 1962 " with %d changes to %d files%s\n")
1963 1963 % (changesets, revisions, files, heads))
1964 1964
1965 1965 if changesets > 0:
1966 1966 self.hook('pretxnchangegroup', throw=True,
1967 1967 node=hex(self.changelog.node(cor+1)), source=srctype,
1968 1968 url=url)
1969 1969
1970 1970 tr.close()
1971 1971 finally:
1972 1972 del tr
1973 1973
1974 1974 if changesets > 0:
1975 1975 # forcefully update the on-disk branch cache
1976 1976 self.ui.debug(_("updating the branch cache\n"))
1977 1977 self.branchtags()
1978 1978 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1979 1979 source=srctype, url=url)
1980 1980
1981 1981 for i in xrange(cor + 1, cnr + 1):
1982 1982 self.hook("incoming", node=hex(self.changelog.node(i)),
1983 1983 source=srctype, url=url)
1984 1984
1985 1985 # never return 0 here:
1986 1986 if newheads < oldheads:
1987 1987 return newheads - oldheads - 1
1988 1988 else:
1989 1989 return newheads - oldheads + 1
1990 1990
1991 1991
1992 1992 def stream_in(self, remote):
1993 1993 fp = remote.stream_out()
1994 1994 l = fp.readline()
1995 1995 try:
1996 1996 resp = int(l)
1997 1997 except ValueError:
1998 1998 raise util.UnexpectedOutput(
1999 1999 _('Unexpected response from remote server:'), l)
2000 2000 if resp == 1:
2001 2001 raise util.Abort(_('operation forbidden by server'))
2002 2002 elif resp == 2:
2003 2003 raise util.Abort(_('locking the remote repository failed'))
2004 2004 elif resp != 0:
2005 2005 raise util.Abort(_('the server sent an unknown error code'))
2006 2006 self.ui.status(_('streaming all changes\n'))
2007 2007 l = fp.readline()
2008 2008 try:
2009 2009 total_files, total_bytes = map(int, l.split(' ', 1))
2010 2010 except (ValueError, TypeError):
2011 2011 raise util.UnexpectedOutput(
2012 2012 _('Unexpected response from remote server:'), l)
2013 2013 self.ui.status(_('%d files to transfer, %s of data\n') %
2014 2014 (total_files, util.bytecount(total_bytes)))
2015 2015 start = time.time()
2016 2016 for i in xrange(total_files):
2017 2017 # XXX doesn't support '\n' or '\r' in filenames
2018 2018 l = fp.readline()
2019 2019 try:
2020 2020 name, size = l.split('\0', 1)
2021 2021 size = int(size)
2022 2022 except ValueError, TypeError:
2023 2023 raise util.UnexpectedOutput(
2024 2024 _('Unexpected response from remote server:'), l)
2025 2025 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2026 2026 ofp = self.sopener(name, 'w')
2027 2027 for chunk in util.filechunkiter(fp, limit=size):
2028 2028 ofp.write(chunk)
2029 2029 ofp.close()
2030 2030 elapsed = time.time() - start
2031 2031 if elapsed <= 0:
2032 2032 elapsed = 0.001
2033 2033 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2034 2034 (util.bytecount(total_bytes), elapsed,
2035 2035 util.bytecount(total_bytes / elapsed)))
2036 2036 self.invalidate()
2037 2037 return len(self.heads()) + 1
2038 2038
2039 2039 def clone(self, remote, heads=[], stream=False):
2040 2040 '''clone remote repository.
2041 2041
2042 2042 keyword arguments:
2043 2043 heads: list of revs to clone (forces use of pull)
2044 2044 stream: use streaming clone if possible'''
2045 2045
2046 2046 # now, all clients that can request uncompressed clones can
2047 2047 # read repo formats supported by all servers that can serve
2048 2048 # them.
2049 2049
2050 2050 # if revlog format changes, client will have to check version
2051 2051 # and format flags on "stream" capability, and use
2052 2052 # uncompressed only if compatible.
2053 2053
2054 2054 if stream and not heads and remote.capable('stream'):
2055 2055 return self.stream_in(remote)
2056 2056 return self.pull(remote, heads)
2057 2057
2058 2058 # used to avoid circular references so destructors work
2059 2059 def aftertrans(files):
2060 2060 renamefiles = [tuple(t) for t in files]
2061 2061 def a():
2062 2062 for src, dest in renamefiles:
2063 2063 util.rename(src, dest)
2064 2064 return a
2065 2065
2066 2066 def instance(ui, path, create):
2067 2067 return localrepository(ui, util.drop_scheme('file', path), create)
2068 2068
2069 2069 def islocal(path):
2070 2070 return True
@@ -1,116 +1,116 b''
1 1 #! /usr/bin/env python
2 2
3 3 import sys
4 4 from _lsprof import Profiler, profiler_entry
5 5
6 6 __all__ = ['profile', 'Stats']
7 7
8 8 def profile(f, *args, **kwds):
9 9 """XXX docstring"""
10 10 p = Profiler()
11 11 p.enable(subcalls=True, builtins=True)
12 12 try:
13 13 f(*args, **kwds)
14 14 finally:
15 15 p.disable()
16 16 return Stats(p.getstats())
17 17
18 18
19 19 class Stats(object):
20 20 """XXX docstring"""
21 21
22 22 def __init__(self, data):
23 23 self.data = data
24 24
25 25 def sort(self, crit="inlinetime"):
26 26 """XXX docstring"""
27 27 if crit not in profiler_entry.__dict__:
28 raise ValueError, "Can't sort by %s" % crit
28 raise ValueError("Can't sort by %s" % crit)
29 29 self.data.sort(lambda b, a: cmp(getattr(a, crit),
30 30 getattr(b, crit)))
31 31 for e in self.data:
32 32 if e.calls:
33 33 e.calls.sort(lambda b, a: cmp(getattr(a, crit),
34 34 getattr(b, crit)))
35 35
36 36 def pprint(self, top=None, file=None, limit=None, climit=None):
37 37 """XXX docstring"""
38 38 if file is None:
39 39 file = sys.stdout
40 40 d = self.data
41 41 if top is not None:
42 42 d = d[:top]
43 43 cols = "% 12s %12s %11.4f %11.4f %s\n"
44 44 hcols = "% 12s %12s %12s %12s %s\n"
45 45 cols2 = "+%12s %12s %11.4f %11.4f + %s\n"
46 46 file.write(hcols % ("CallCount", "Recursive", "Total(ms)",
47 47 "Inline(ms)", "module:lineno(function)"))
48 48 count = 0
49 49 for e in d:
50 50 file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
51 51 e.inlinetime, label(e.code)))
52 52 count += 1
53 53 if limit is not None and count == limit:
54 54 return
55 55 ccount = 0
56 56 if e.calls:
57 57 for se in e.calls:
58 58 file.write(cols % ("+%s" % se.callcount, se.reccallcount,
59 59 se.totaltime, se.inlinetime,
60 60 "+%s" % label(se.code)))
61 61 count += 1
62 62 ccount += 1
63 63 if limit is not None and count == limit:
64 64 return
65 65 if climit is not None and ccount == climit:
66 66 break
67 67
68 68 def freeze(self):
69 69 """Replace all references to code objects with string
70 70 descriptions; this makes it possible to pickle the instance."""
71 71
72 72 # this code is probably rather ickier than it needs to be!
73 73 for i in range(len(self.data)):
74 74 e = self.data[i]
75 75 if not isinstance(e.code, str):
76 76 self.data[i] = type(e)((label(e.code),) + e[1:])
77 77 if e.calls:
78 78 for j in range(len(e.calls)):
79 79 se = e.calls[j]
80 80 if not isinstance(se.code, str):
81 81 e.calls[j] = type(se)((label(se.code),) + se[1:])
82 82
83 83 _fn2mod = {}
84 84
85 85 def label(code):
86 86 if isinstance(code, str):
87 87 return code
88 88 try:
89 89 mname = _fn2mod[code.co_filename]
90 90 except KeyError:
91 91 for k, v in sys.modules.items():
92 92 if v is None:
93 93 continue
94 94 if not hasattr(v, '__file__'):
95 95 continue
96 96 if not isinstance(v.__file__, str):
97 97 continue
98 98 if v.__file__.startswith(code.co_filename):
99 99 mname = _fn2mod[code.co_filename] = k
100 100 break
101 101 else:
102 102 mname = _fn2mod[code.co_filename] = '<%s>'%code.co_filename
103 103
104 104 return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
105 105
106 106
107 107 if __name__ == '__main__':
108 108 import os
109 109 sys.argv = sys.argv[1:]
110 110 if not sys.argv:
111 111 print >> sys.stderr, "usage: lsprof.py <script> <arguments...>"
112 112 sys.exit(2)
113 113 sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
114 114 stats = profile(execfile, sys.argv[0], globals(), locals())
115 115 stats.sort()
116 116 stats.pprint()
@@ -1,138 +1,138 b''
1 1 #!/usr/bin/env python
2 2 #
3 3 # This is the mercurial setup script.
4 4 #
5 5 # 'python setup.py install', or
6 6 # 'python setup.py --help' for more options
7 7
8 8 import sys
9 9 if not hasattr(sys, 'version_info') or sys.version_info < (2, 3, 0, 'final'):
10 raise SystemExit, "Mercurial requires python 2.3 or later."
10 raise SystemExit("Mercurial requires python 2.3 or later.")
11 11
12 12 import os
13 13 import shutil
14 14 import tempfile
15 15 from distutils.core import setup, Extension
16 16 from distutils.command.install_data import install_data
17 17 from distutils.ccompiler import new_compiler
18 18
19 19 import mercurial.version
20 20
21 21 extra = {}
22 22 scripts = ['hg']
23 23 if os.name == 'nt':
24 24 scripts.append('contrib/win32/hg.bat')
25 25
26 26 # simplified version of distutils.ccompiler.CCompiler.has_function
27 27 # that actually removes its temporary files.
28 28 def has_function(cc, funcname):
29 29 tmpdir = tempfile.mkdtemp(prefix='hg-install-')
30 30 devnull = oldstderr = None
31 31 try:
32 32 try:
33 33 fname = os.path.join(tmpdir, 'funcname.c')
34 34 f = open(fname, 'w')
35 35 f.write('int main(void) {\n')
36 36 f.write(' %s();\n' % funcname)
37 37 f.write('}\n')
38 38 f.close()
39 39 # Redirect stderr to /dev/null to hide any error messages
40 40 # from the compiler.
41 41 # This will have to be changed if we ever have to check
42 42 # for a function on Windows.
43 43 devnull = open('/dev/null', 'w')
44 44 oldstderr = os.dup(sys.stderr.fileno())
45 45 os.dup2(devnull.fileno(), sys.stderr.fileno())
46 46 objects = cc.compile([fname])
47 47 cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
48 48 except:
49 49 return False
50 50 return True
51 51 finally:
52 52 if oldstderr is not None:
53 53 os.dup2(oldstderr, sys.stderr.fileno())
54 54 if devnull is not None:
55 55 devnull.close()
56 56 shutil.rmtree(tmpdir)
57 57
58 58 # py2exe needs to be installed to work
59 59 try:
60 60 import py2exe
61 61
62 62 # Help py2exe to find win32com.shell
63 63 try:
64 64 import modulefinder
65 65 import win32com
66 66 for p in win32com.__path__[1:]: # Take the path to win32comext
67 67 modulefinder.AddPackagePath("win32com", p)
68 68 pn = "win32com.shell"
69 69 __import__(pn)
70 70 m = sys.modules[pn]
71 71 for p in m.__path__[1:]:
72 72 modulefinder.AddPackagePath(pn, p)
73 73 except ImportError:
74 74 pass
75 75
76 76 extra['console'] = ['hg']
77 77
78 78 except ImportError:
79 79 pass
80 80
81 81 # specify version string, otherwise 'hg identify' will be used:
82 82 version = ''
83 83
84 84 class install_package_data(install_data):
85 85 def finalize_options(self):
86 86 self.set_undefined_options('install',
87 87 ('install_lib', 'install_dir'))
88 88 install_data.finalize_options(self)
89 89
90 90 mercurial.version.remember_version(version)
91 91 cmdclass = {'install_data': install_package_data}
92 92
93 93 ext_modules=[
94 94 Extension('mercurial.base85', ['mercurial/base85.c']),
95 95 Extension('mercurial.bdiff', ['mercurial/bdiff.c']),
96 96 Extension('mercurial.diffhelpers', ['mercurial/diffhelpers.c']),
97 97 Extension('mercurial.mpatch', ['mercurial/mpatch.c']),
98 98 Extension('mercurial.parsers', ['mercurial/parsers.c']),
99 99 ]
100 100
101 101 packages = ['mercurial', 'mercurial.hgweb', 'hgext', 'hgext.convert',
102 102 'hgext.highlight']
103 103
104 104 try:
105 105 import posix
106 106 ext_modules.append(Extension('mercurial.osutil', ['mercurial/osutil.c']))
107 107
108 108 if sys.platform == 'linux2' and os.uname()[2] > '2.6':
109 109 # The inotify extension is only usable with Linux 2.6 kernels.
110 110 # You also need a reasonably recent C library.
111 111 cc = new_compiler()
112 112 if has_function(cc, 'inotify_add_watch'):
113 113 ext_modules.append(Extension('hgext.inotify.linux._inotify',
114 114 ['hgext/inotify/linux/_inotify.c']))
115 115 packages.extend(['hgext.inotify', 'hgext.inotify.linux'])
116 116 except ImportError:
117 117 pass
118 118
119 119 setup(name='mercurial',
120 120 version=mercurial.version.get_version(),
121 121 author='Matt Mackall',
122 122 author_email='mpm@selenic.com',
123 123 url='http://selenic.com/mercurial',
124 124 description='Scalable distributed SCM',
125 125 license='GNU GPL',
126 126 scripts=scripts,
127 127 packages=packages,
128 128 ext_modules=ext_modules,
129 129 data_files=[(os.path.join('mercurial', root),
130 130 [os.path.join(root, file_) for file_ in files])
131 131 for root, dirs, files in os.walk('templates')],
132 132 cmdclass=cmdclass,
133 133 options=dict(py2exe=dict(packages=['hgext', 'email']),
134 134 bdist_mpkg=dict(zipdist=True,
135 135 license='COPYING',
136 136 readme='contrib/macosx/Readme.html',
137 137 welcome='contrib/macosx/Welcome.html')),
138 138 **extra)
General Comments 0
You need to be logged in to leave comments. Login now