##// END OF EJS Templates
Merge with mpm
Brendan Cully -
r4537:2f489b00 merge default
parent child Browse files
Show More
@@ -0,0 +1,19 b''
1 #!/bin/sh
2 # a test for issue586
3
4 hg init a
5 cd a
6 echo a > a
7 hg ci -Ama
8
9 hg init ../b
10 cd ../b
11 echo b > b
12 hg ci -Amb
13
14 hg pull -f ../a
15 hg merge
16 hg rm -f a
17 hg ci -Amc
18
19 hg st -A
@@ -0,0 +1,13 b''
1 adding a
2 adding b
3 pulling from ../a
4 searching for changes
5 warning: repository is unrelated
6 adding changesets
7 adding manifests
8 adding file changes
9 added 1 changesets with 1 changes to 1 files (+1 heads)
10 (run 'hg heads' to see heads, 'hg merge' to merge)
11 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
12 (branch merge, don't forget to commit)
13 C b
@@ -1,243 +1,239 b''
1 # hgweb/server.py - The standalone hg web server.
1 # hgweb/server.py - The standalone hg web server.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
9 import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
10 from mercurial import ui, hg, util, templater
10 from mercurial import ui, hg, util, templater
11 from hgweb_mod import hgweb
11 from hgweb_mod import hgweb
12 from hgwebdir_mod import hgwebdir
12 from hgwebdir_mod import hgwebdir
13 from request import wsgiapplication
13 from request import wsgiapplication
14 from mercurial.i18n import gettext as _
14 from mercurial.i18n import gettext as _
15
15
16 def _splitURI(uri):
16 def _splitURI(uri):
17 """ Return path and query splited from uri
17 """ Return path and query splited from uri
18
18
19 Just like CGI environment, the path is unquoted, the query is
19 Just like CGI environment, the path is unquoted, the query is
20 not.
20 not.
21 """
21 """
22 if '?' in uri:
22 if '?' in uri:
23 path, query = uri.split('?', 1)
23 path, query = uri.split('?', 1)
24 else:
24 else:
25 path, query = uri, ''
25 path, query = uri, ''
26 return urllib.unquote(path), query
26 return urllib.unquote(path), query
27
27
28 class _error_logger(object):
28 class _error_logger(object):
29 def __init__(self, handler):
29 def __init__(self, handler):
30 self.handler = handler
30 self.handler = handler
31 def flush(self):
31 def flush(self):
32 pass
32 pass
33 def write(self, str):
33 def write(self, str):
34 self.writelines(str.split('\n'))
34 self.writelines(str.split('\n'))
35 def writelines(self, seq):
35 def writelines(self, seq):
36 for msg in seq:
36 for msg in seq:
37 self.handler.log_error("HG error: %s", msg)
37 self.handler.log_error("HG error: %s", msg)
38
38
39 class _hgwebhandler(object, BaseHTTPServer.BaseHTTPRequestHandler):
39 class _hgwebhandler(object, BaseHTTPServer.BaseHTTPRequestHandler):
40 def __init__(self, *args, **kargs):
40 def __init__(self, *args, **kargs):
41 self.protocol_version = 'HTTP/1.1'
41 self.protocol_version = 'HTTP/1.1'
42 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
42 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
43
43
44 def log_error(self, format, *args):
44 def log_error(self, format, *args):
45 errorlog = self.server.errorlog
45 errorlog = self.server.errorlog
46 errorlog.write("%s - - [%s] %s\n" % (self.client_address[0],
46 errorlog.write("%s - - [%s] %s\n" % (self.client_address[0],
47 self.log_date_time_string(),
47 self.log_date_time_string(),
48 format % args))
48 format % args))
49
49
50 def log_message(self, format, *args):
50 def log_message(self, format, *args):
51 accesslog = self.server.accesslog
51 accesslog = self.server.accesslog
52 accesslog.write("%s - - [%s] %s\n" % (self.client_address[0],
52 accesslog.write("%s - - [%s] %s\n" % (self.client_address[0],
53 self.log_date_time_string(),
53 self.log_date_time_string(),
54 format % args))
54 format % args))
55
55
56 def do_POST(self):
56 def do_POST(self):
57 try:
57 try:
58 try:
58 try:
59 self.do_hgweb()
59 self.do_hgweb()
60 except socket.error, inst:
60 except socket.error, inst:
61 if inst[0] != errno.EPIPE:
61 if inst[0] != errno.EPIPE:
62 raise
62 raise
63 except StandardError, inst:
63 except StandardError, inst:
64 self._start_response("500 Internal Server Error", [])
64 self._start_response("500 Internal Server Error", [])
65 self._write("Internal Server Error")
65 self._write("Internal Server Error")
66 tb = "".join(traceback.format_exception(*sys.exc_info()))
66 tb = "".join(traceback.format_exception(*sys.exc_info()))
67 self.log_error("Exception happened during processing request '%s':\n%s",
67 self.log_error("Exception happened during processing request '%s':\n%s",
68 self.path, tb)
68 self.path, tb)
69
69
70 def do_GET(self):
70 def do_GET(self):
71 self.do_POST()
71 self.do_POST()
72
72
73 def do_hgweb(self):
73 def do_hgweb(self):
74 path_info, query = _splitURI(self.path)
74 path_info, query = _splitURI(self.path)
75
75
76 env = {}
76 env = {}
77 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
77 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
78 env['REQUEST_METHOD'] = self.command
78 env['REQUEST_METHOD'] = self.command
79 env['SERVER_NAME'] = self.server.server_name
79 env['SERVER_NAME'] = self.server.server_name
80 env['SERVER_PORT'] = str(self.server.server_port)
80 env['SERVER_PORT'] = str(self.server.server_port)
81 env['REQUEST_URI'] = self.path
81 env['REQUEST_URI'] = self.path
82 env['PATH_INFO'] = path_info
82 env['PATH_INFO'] = path_info
83 env['REMOTE_HOST'] = self.client_address[0]
83 env['REMOTE_HOST'] = self.client_address[0]
84 env['REMOTE_ADDR'] = self.client_address[0]
84 env['REMOTE_ADDR'] = self.client_address[0]
85 if query:
85 if query:
86 env['QUERY_STRING'] = query
86 env['QUERY_STRING'] = query
87
87
88 if self.headers.typeheader is None:
88 if self.headers.typeheader is None:
89 env['CONTENT_TYPE'] = self.headers.type
89 env['CONTENT_TYPE'] = self.headers.type
90 else:
90 else:
91 env['CONTENT_TYPE'] = self.headers.typeheader
91 env['CONTENT_TYPE'] = self.headers.typeheader
92 length = self.headers.getheader('content-length')
92 length = self.headers.getheader('content-length')
93 if length:
93 if length:
94 env['CONTENT_LENGTH'] = length
94 env['CONTENT_LENGTH'] = length
95 for header in [h for h in self.headers.keys() \
95 for header in [h for h in self.headers.keys() \
96 if h not in ('content-type', 'content-length')]:
96 if h not in ('content-type', 'content-length')]:
97 hkey = 'HTTP_' + header.replace('-', '_').upper()
97 hkey = 'HTTP_' + header.replace('-', '_').upper()
98 hval = self.headers.getheader(header)
98 hval = self.headers.getheader(header)
99 hval = hval.replace('\n', '').strip()
99 hval = hval.replace('\n', '').strip()
100 if hval:
100 if hval:
101 env[hkey] = hval
101 env[hkey] = hval
102 env['SERVER_PROTOCOL'] = self.request_version
102 env['SERVER_PROTOCOL'] = self.request_version
103 env['wsgi.version'] = (1, 0)
103 env['wsgi.version'] = (1, 0)
104 env['wsgi.url_scheme'] = 'http'
104 env['wsgi.url_scheme'] = 'http'
105 env['wsgi.input'] = self.rfile
105 env['wsgi.input'] = self.rfile
106 env['wsgi.errors'] = _error_logger(self)
106 env['wsgi.errors'] = _error_logger(self)
107 env['wsgi.multithread'] = isinstance(self.server,
107 env['wsgi.multithread'] = isinstance(self.server,
108 SocketServer.ThreadingMixIn)
108 SocketServer.ThreadingMixIn)
109 env['wsgi.multiprocess'] = isinstance(self.server,
109 env['wsgi.multiprocess'] = isinstance(self.server,
110 SocketServer.ForkingMixIn)
110 SocketServer.ForkingMixIn)
111 env['wsgi.run_once'] = 0
111 env['wsgi.run_once'] = 0
112
112
113 self.close_connection = True
113 self.close_connection = True
114 self.saved_status = None
114 self.saved_status = None
115 self.saved_headers = []
115 self.saved_headers = []
116 self.sent_headers = False
116 self.sent_headers = False
117 self.length = None
117 self.length = None
118 req = self.server.reqmaker(env, self._start_response)
118 req = self.server.reqmaker(env, self._start_response)
119 for data in req:
119 for data in req:
120 if data:
120 if data:
121 self._write(data)
121 self._write(data)
122
122
123 def send_headers(self):
123 def send_headers(self):
124 if not self.saved_status:
124 if not self.saved_status:
125 raise AssertionError("Sending headers before start_response() called")
125 raise AssertionError("Sending headers before start_response() called")
126 saved_status = self.saved_status.split(None, 1)
126 saved_status = self.saved_status.split(None, 1)
127 saved_status[0] = int(saved_status[0])
127 saved_status[0] = int(saved_status[0])
128 self.send_response(*saved_status)
128 self.send_response(*saved_status)
129 should_close = True
129 should_close = True
130 for h in self.saved_headers:
130 for h in self.saved_headers:
131 self.send_header(*h)
131 self.send_header(*h)
132 if h[0].lower() == 'content-length':
132 if h[0].lower() == 'content-length':
133 should_close = False
133 should_close = False
134 self.length = int(h[1])
134 self.length = int(h[1])
135 # The value of the Connection header is a list of case-insensitive
135 # The value of the Connection header is a list of case-insensitive
136 # tokens separated by commas and optional whitespace.
136 # tokens separated by commas and optional whitespace.
137 if 'close' in [token.strip().lower() for token in
137 if 'close' in [token.strip().lower() for token in
138 self.headers.get('connection', '').split(',')]:
138 self.headers.get('connection', '').split(',')]:
139 should_close = True
139 should_close = True
140 if should_close:
140 if should_close:
141 self.send_header('Connection', 'close')
141 self.send_header('Connection', 'close')
142 self.close_connection = should_close
142 self.close_connection = should_close
143 self.end_headers()
143 self.end_headers()
144 self.sent_headers = True
144 self.sent_headers = True
145
145
146 def _start_response(self, http_status, headers, exc_info=None):
146 def _start_response(self, http_status, headers, exc_info=None):
147 code, msg = http_status.split(None, 1)
147 code, msg = http_status.split(None, 1)
148 code = int(code)
148 code = int(code)
149 self.saved_status = http_status
149 self.saved_status = http_status
150 bad_headers = ('connection', 'transfer-encoding')
150 bad_headers = ('connection', 'transfer-encoding')
151 self.saved_headers = [ h for h in headers \
151 self.saved_headers = [ h for h in headers \
152 if h[0].lower() not in bad_headers ]
152 if h[0].lower() not in bad_headers ]
153 return self._write
153 return self._write
154
154
155 def _write(self, data):
155 def _write(self, data):
156 if not self.saved_status:
156 if not self.saved_status:
157 raise AssertionError("data written before start_response() called")
157 raise AssertionError("data written before start_response() called")
158 elif not self.sent_headers:
158 elif not self.sent_headers:
159 self.send_headers()
159 self.send_headers()
160 if self.length is not None:
160 if self.length is not None:
161 if len(data) > self.length:
161 if len(data) > self.length:
162 raise AssertionError("Content-length header sent, but more bytes than specified are being written.")
162 raise AssertionError("Content-length header sent, but more bytes than specified are being written.")
163 self.length = self.length - len(data)
163 self.length = self.length - len(data)
164 self.wfile.write(data)
164 self.wfile.write(data)
165 self.wfile.flush()
165 self.wfile.flush()
166
166
167 def create_server(ui, repo):
167 def create_server(ui, repo):
168 use_threads = True
168 use_threads = True
169
169
170 def openlog(opt, default):
170 def openlog(opt, default):
171 if opt and opt != '-':
171 if opt and opt != '-':
172 return open(opt, 'w')
172 return open(opt, 'w')
173 return default
173 return default
174
174
175 address = ui.config("web", "address", "")
175 address = ui.config("web", "address", "")
176 port = int(ui.config("web", "port", 8000))
176 port = int(ui.config("web", "port", 8000))
177 use_ipv6 = ui.configbool("web", "ipv6")
177 use_ipv6 = ui.configbool("web", "ipv6")
178 webdir_conf = ui.config("web", "webdir_conf")
178 webdir_conf = ui.config("web", "webdir_conf")
179 accesslog = openlog(ui.config("web", "accesslog", "-"), sys.stdout)
179 accesslog = openlog(ui.config("web", "accesslog", "-"), sys.stdout)
180 errorlog = openlog(ui.config("web", "errorlog", "-"), sys.stderr)
180 errorlog = openlog(ui.config("web", "errorlog", "-"), sys.stderr)
181
181
182 if use_threads:
182 if use_threads:
183 try:
183 try:
184 from threading import activeCount
184 from threading import activeCount
185 except ImportError:
185 except ImportError:
186 use_threads = False
186 use_threads = False
187
187
188 if use_threads:
188 if use_threads:
189 _mixin = SocketServer.ThreadingMixIn
189 _mixin = SocketServer.ThreadingMixIn
190 else:
190 else:
191 if hasattr(os, "fork"):
191 if hasattr(os, "fork"):
192 _mixin = SocketServer.ForkingMixIn
192 _mixin = SocketServer.ForkingMixIn
193 else:
193 else:
194 class _mixin:
194 class _mixin:
195 pass
195 pass
196
196
197 class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
197 class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
198
198
199 # SO_REUSEADDR has broken semantics on windows
199 # SO_REUSEADDR has broken semantics on windows
200 if os.name == 'nt':
200 if os.name == 'nt':
201 allow_reuse_address = 0
201 allow_reuse_address = 0
202
202
203 def __init__(self, *args, **kargs):
203 def __init__(self, *args, **kargs):
204 BaseHTTPServer.HTTPServer.__init__(self, *args, **kargs)
204 BaseHTTPServer.HTTPServer.__init__(self, *args, **kargs)
205 self.accesslog = accesslog
205 self.accesslog = accesslog
206 self.errorlog = errorlog
206 self.errorlog = errorlog
207 self.daemon_threads = True
207 self.daemon_threads = True
208 def make_handler():
208 def make_handler():
209 if webdir_conf:
209 if webdir_conf:
210 hgwebobj = hgwebdir(webdir_conf, ui)
210 hgwebobj = hgwebdir(webdir_conf, ui)
211 elif repo is not None:
211 elif repo is not None:
212 hgwebobj = hgweb(hg.repository(repo.ui, repo.root))
212 hgwebobj = hgweb(hg.repository(repo.ui, repo.root))
213 else:
213 else:
214 raise hg.RepoError(_("There is no Mercurial repository here"
214 raise hg.RepoError(_("There is no Mercurial repository here"
215 " (.hg not found)"))
215 " (.hg not found)"))
216 return hgwebobj
216 return hgwebobj
217 self.reqmaker = wsgiapplication(make_handler)
217 self.reqmaker = wsgiapplication(make_handler)
218
218
219 addr, port = self.socket.getsockname()[:2]
219 addr = address
220 if addr in ('0.0.0.0', '::'):
220 if addr in ('', '::'):
221 addr = socket.gethostname()
221 addr = socket.gethostname()
222 else:
222
223 try:
224 addr = socket.gethostbyaddr(addr)[0]
225 except socket.error:
226 pass
227 self.addr, self.port = addr, port
223 self.addr, self.port = addr, port
228
224
229 class IPv6HTTPServer(MercurialHTTPServer):
225 class IPv6HTTPServer(MercurialHTTPServer):
230 address_family = getattr(socket, 'AF_INET6', None)
226 address_family = getattr(socket, 'AF_INET6', None)
231
227
232 def __init__(self, *args, **kwargs):
228 def __init__(self, *args, **kwargs):
233 if self.address_family is None:
229 if self.address_family is None:
234 raise hg.RepoError(_('IPv6 not available on this system'))
230 raise hg.RepoError(_('IPv6 not available on this system'))
235 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
231 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
236
232
237 try:
233 try:
238 if use_ipv6:
234 if use_ipv6:
239 return IPv6HTTPServer((address, port), _hgwebhandler)
235 return IPv6HTTPServer((address, port), _hgwebhandler)
240 else:
236 else:
241 return MercurialHTTPServer((address, port), _hgwebhandler)
237 return MercurialHTTPServer((address, port), _hgwebhandler)
242 except socket.error, inst:
238 except socket.error, inst:
243 raise util.Abort(_('cannot start server: %s') % inst.args[1])
239 raise util.Abort(_('cannot start server: %s') % inst.args[1])
@@ -1,1965 +1,1967 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.root = os.path.realpath(path)
33 self.root = os.path.realpath(path)
34 self.path = os.path.join(self.root, ".hg")
34 self.path = os.path.join(self.root, ".hg")
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 requirements = ["revlogv1"]
44 requirements = ["revlogv1"]
45 if parentui.configbool('format', 'usestore', True):
45 if parentui.configbool('format', 'usestore', True):
46 os.mkdir(os.path.join(self.path, "store"))
46 os.mkdir(os.path.join(self.path, "store"))
47 requirements.append("store")
47 requirements.append("store")
48 # create an invalid changelog
48 # create an invalid changelog
49 self.opener("00changelog.i", "a").write(
49 self.opener("00changelog.i", "a").write(
50 '\0\0\0\2' # represents revlogv2
50 '\0\0\0\2' # represents revlogv2
51 ' dummy changelog to prevent using the old repo layout'
51 ' dummy changelog to prevent using the old repo layout'
52 )
52 )
53 reqfile = self.opener("requires", "w")
53 reqfile = self.opener("requires", "w")
54 for r in requirements:
54 for r in requirements:
55 reqfile.write("%s\n" % r)
55 reqfile.write("%s\n" % r)
56 reqfile.close()
56 reqfile.close()
57 else:
57 else:
58 raise repo.RepoError(_("repository %s not found") % path)
58 raise repo.RepoError(_("repository %s not found") % path)
59 elif create:
59 elif create:
60 raise repo.RepoError(_("repository %s already exists") % path)
60 raise repo.RepoError(_("repository %s already exists") % path)
61 else:
61 else:
62 # find requirements
62 # find requirements
63 try:
63 try:
64 requirements = self.opener("requires").read().splitlines()
64 requirements = self.opener("requires").read().splitlines()
65 except IOError, inst:
65 except IOError, inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 requirements = []
68 requirements = []
69 # check them
69 # check them
70 for r in requirements:
70 for r in requirements:
71 if r not in self.supported:
71 if r not in self.supported:
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73
73
74 # setup store
74 # setup store
75 if "store" in requirements:
75 if "store" in requirements:
76 self.encodefn = util.encodefilename
76 self.encodefn = util.encodefilename
77 self.decodefn = util.decodefilename
77 self.decodefn = util.decodefilename
78 self.spath = os.path.join(self.path, "store")
78 self.spath = os.path.join(self.path, "store")
79 else:
79 else:
80 self.encodefn = lambda x: x
80 self.encodefn = lambda x: x
81 self.decodefn = lambda x: x
81 self.decodefn = lambda x: x
82 self.spath = self.path
82 self.spath = self.path
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84
84
85 self.ui = ui.ui(parentui=parentui)
85 self.ui = ui.ui(parentui=parentui)
86 try:
86 try:
87 self.ui.readconfig(self.join("hgrc"), self.root)
87 self.ui.readconfig(self.join("hgrc"), self.root)
88 except IOError:
88 except IOError:
89 pass
89 pass
90
90
91 self.changelog = changelog.changelog(self.sopener)
91 self.changelog = changelog.changelog(self.sopener)
92 self.sopener.defversion = self.changelog.version
92 self.sopener.defversion = self.changelog.version
93 self.manifest = manifest.manifest(self.sopener)
93 self.manifest = manifest.manifest(self.sopener)
94
94
95 fallback = self.ui.config('ui', 'fallbackencoding')
95 fallback = self.ui.config('ui', 'fallbackencoding')
96 if fallback:
96 if fallback:
97 util._fallbackencoding = fallback
97 util._fallbackencoding = fallback
98
98
99 self.tagscache = None
99 self.tagscache = None
100 self.branchcache = None
100 self.branchcache = None
101 self.nodetagscache = None
101 self.nodetagscache = None
102 self.filterpats = {}
102 self.filterpats = {}
103 self.transhandle = None
103 self.transhandle = None
104
104
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
106
106
107 def url(self):
107 def url(self):
108 return 'file:' + self.root
108 return 'file:' + self.root
109
109
110 def hook(self, name, throw=False, **args):
110 def hook(self, name, throw=False, **args):
111 def callhook(hname, funcname):
111 def callhook(hname, funcname):
112 '''call python hook. hook is callable object, looked up as
112 '''call python hook. hook is callable object, looked up as
113 name in python module. if callable returns "true", hook
113 name in python module. if callable returns "true", hook
114 fails, else passes. if hook raises exception, treated as
114 fails, else passes. if hook raises exception, treated as
115 hook failure. exception propagates if throw is "true".
115 hook failure. exception propagates if throw is "true".
116
116
117 reason for "true" meaning "hook failed" is so that
117 reason for "true" meaning "hook failed" is so that
118 unmodified commands (e.g. mercurial.commands.update) can
118 unmodified commands (e.g. mercurial.commands.update) can
119 be run as hooks without wrappers to convert return values.'''
119 be run as hooks without wrappers to convert return values.'''
120
120
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
122 obj = funcname
122 obj = funcname
123 if not callable(obj):
123 if not callable(obj):
124 d = funcname.rfind('.')
124 d = funcname.rfind('.')
125 if d == -1:
125 if d == -1:
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
127 'a module)') % (hname, funcname))
127 'a module)') % (hname, funcname))
128 modname = funcname[:d]
128 modname = funcname[:d]
129 try:
129 try:
130 obj = __import__(modname)
130 obj = __import__(modname)
131 except ImportError:
131 except ImportError:
132 try:
132 try:
133 # extensions are loaded with hgext_ prefix
133 # extensions are loaded with hgext_ prefix
134 obj = __import__("hgext_%s" % modname)
134 obj = __import__("hgext_%s" % modname)
135 except ImportError:
135 except ImportError:
136 raise util.Abort(_('%s hook is invalid '
136 raise util.Abort(_('%s hook is invalid '
137 '(import of "%s" failed)') %
137 '(import of "%s" failed)') %
138 (hname, modname))
138 (hname, modname))
139 try:
139 try:
140 for p in funcname.split('.')[1:]:
140 for p in funcname.split('.')[1:]:
141 obj = getattr(obj, p)
141 obj = getattr(obj, p)
142 except AttributeError, err:
142 except AttributeError, err:
143 raise util.Abort(_('%s hook is invalid '
143 raise util.Abort(_('%s hook is invalid '
144 '("%s" is not defined)') %
144 '("%s" is not defined)') %
145 (hname, funcname))
145 (hname, funcname))
146 if not callable(obj):
146 if not callable(obj):
147 raise util.Abort(_('%s hook is invalid '
147 raise util.Abort(_('%s hook is invalid '
148 '("%s" is not callable)') %
148 '("%s" is not callable)') %
149 (hname, funcname))
149 (hname, funcname))
150 try:
150 try:
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
152 except (KeyboardInterrupt, util.SignalInterrupt):
152 except (KeyboardInterrupt, util.SignalInterrupt):
153 raise
153 raise
154 except Exception, exc:
154 except Exception, exc:
155 if isinstance(exc, util.Abort):
155 if isinstance(exc, util.Abort):
156 self.ui.warn(_('error: %s hook failed: %s\n') %
156 self.ui.warn(_('error: %s hook failed: %s\n') %
157 (hname, exc.args[0]))
157 (hname, exc.args[0]))
158 else:
158 else:
159 self.ui.warn(_('error: %s hook raised an exception: '
159 self.ui.warn(_('error: %s hook raised an exception: '
160 '%s\n') % (hname, exc))
160 '%s\n') % (hname, exc))
161 if throw:
161 if throw:
162 raise
162 raise
163 self.ui.print_exc()
163 self.ui.print_exc()
164 return True
164 return True
165 if r:
165 if r:
166 if throw:
166 if throw:
167 raise util.Abort(_('%s hook failed') % hname)
167 raise util.Abort(_('%s hook failed') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
169 return r
169 return r
170
170
171 def runhook(name, cmd):
171 def runhook(name, cmd):
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
174 r = util.system(cmd, environ=env, cwd=self.root)
174 r = util.system(cmd, environ=env, cwd=self.root)
175 if r:
175 if r:
176 desc, r = util.explain_exit(r)
176 desc, r = util.explain_exit(r)
177 if throw:
177 if throw:
178 raise util.Abort(_('%s hook %s') % (name, desc))
178 raise util.Abort(_('%s hook %s') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
180 return r
180 return r
181
181
182 r = False
182 r = False
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
184 if hname.split(".", 1)[0] == name and cmd]
184 if hname.split(".", 1)[0] == name and cmd]
185 hooks.sort()
185 hooks.sort()
186 for hname, cmd in hooks:
186 for hname, cmd in hooks:
187 if callable(cmd):
187 if callable(cmd):
188 r = callhook(hname, cmd) or r
188 r = callhook(hname, cmd) or r
189 elif cmd.startswith('python:'):
189 elif cmd.startswith('python:'):
190 r = callhook(hname, cmd[7:].strip()) or r
190 r = callhook(hname, cmd[7:].strip()) or r
191 else:
191 else:
192 r = runhook(hname, cmd) or r
192 r = runhook(hname, cmd) or r
193 return r
193 return r
194
194
195 tag_disallowed = ':\r\n'
195 tag_disallowed = ':\r\n'
196
196
197 def _tag(self, name, node, message, local, user, date, parent=None):
197 def _tag(self, name, node, message, local, user, date, parent=None):
198 use_dirstate = parent is None
198 use_dirstate = parent is None
199
199
200 for c in self.tag_disallowed:
200 for c in self.tag_disallowed:
201 if c in name:
201 if c in name:
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
203
203
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
205
205
206 if local:
206 if local:
207 # local tags are stored in the current charset
207 # local tags are stored in the current charset
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
209 self.hook('tag', node=hex(node), tag=name, local=local)
209 self.hook('tag', node=hex(node), tag=name, local=local)
210 return
210 return
211
211
212 # committed tags are stored in UTF-8
212 # committed tags are stored in UTF-8
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
214 if use_dirstate:
214 if use_dirstate:
215 self.wfile('.hgtags', 'ab').write(line)
215 self.wfile('.hgtags', 'ab').write(line)
216 else:
216 else:
217 ntags = self.filectx('.hgtags', parent).data()
217 ntags = self.filectx('.hgtags', parent).data()
218 self.wfile('.hgtags', 'ab').write(ntags + line)
218 self.wfile('.hgtags', 'ab').write(ntags + line)
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
220 self.add(['.hgtags'])
220 self.add(['.hgtags'])
221
221
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
223
223
224 self.hook('tag', node=hex(node), tag=name, local=local)
224 self.hook('tag', node=hex(node), tag=name, local=local)
225
225
226 return tagnode
226 return tagnode
227
227
228 def tag(self, name, node, message, local, user, date):
228 def tag(self, name, node, message, local, user, date):
229 '''tag a revision with a symbolic name.
229 '''tag a revision with a symbolic name.
230
230
231 if local is True, the tag is stored in a per-repository file.
231 if local is True, the tag is stored in a per-repository file.
232 otherwise, it is stored in the .hgtags file, and a new
232 otherwise, it is stored in the .hgtags file, and a new
233 changeset is committed with the change.
233 changeset is committed with the change.
234
234
235 keyword arguments:
235 keyword arguments:
236
236
237 local: whether to store tag in non-version-controlled file
237 local: whether to store tag in non-version-controlled file
238 (default False)
238 (default False)
239
239
240 message: commit message to use if committing
240 message: commit message to use if committing
241
241
242 user: name of user to use if committing
242 user: name of user to use if committing
243
243
244 date: date tuple to use if committing'''
244 date: date tuple to use if committing'''
245
245
246 for x in self.status()[:5]:
246 for x in self.status()[:5]:
247 if '.hgtags' in x:
247 if '.hgtags' in x:
248 raise util.Abort(_('working copy of .hgtags is changed '
248 raise util.Abort(_('working copy of .hgtags is changed '
249 '(please commit .hgtags manually)'))
249 '(please commit .hgtags manually)'))
250
250
251
251
252 self._tag(name, node, message, local, user, date)
252 self._tag(name, node, message, local, user, date)
253
253
254 def tags(self):
254 def tags(self):
255 '''return a mapping of tag to node'''
255 '''return a mapping of tag to node'''
256 if self.tagscache:
256 if self.tagscache:
257 return self.tagscache
257 return self.tagscache
258
258
259 globaltags = {}
259 globaltags = {}
260
260
261 def readtags(lines, fn):
261 def readtags(lines, fn):
262 filetags = {}
262 filetags = {}
263 count = 0
263 count = 0
264
264
265 def warn(msg):
265 def warn(msg):
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
267
267
268 for l in lines:
268 for l in lines:
269 count += 1
269 count += 1
270 if not l:
270 if not l:
271 continue
271 continue
272 s = l.split(" ", 1)
272 s = l.split(" ", 1)
273 if len(s) != 2:
273 if len(s) != 2:
274 warn(_("cannot parse entry"))
274 warn(_("cannot parse entry"))
275 continue
275 continue
276 node, key = s
276 node, key = s
277 key = util.tolocal(key.strip()) # stored in UTF-8
277 key = util.tolocal(key.strip()) # stored in UTF-8
278 try:
278 try:
279 bin_n = bin(node)
279 bin_n = bin(node)
280 except TypeError:
280 except TypeError:
281 warn(_("node '%s' is not well formed") % node)
281 warn(_("node '%s' is not well formed") % node)
282 continue
282 continue
283 if bin_n not in self.changelog.nodemap:
283 if bin_n not in self.changelog.nodemap:
284 warn(_("tag '%s' refers to unknown node") % key)
284 warn(_("tag '%s' refers to unknown node") % key)
285 continue
285 continue
286
286
287 h = []
287 h = []
288 if key in filetags:
288 if key in filetags:
289 n, h = filetags[key]
289 n, h = filetags[key]
290 h.append(n)
290 h.append(n)
291 filetags[key] = (bin_n, h)
291 filetags[key] = (bin_n, h)
292
292
293 for k,nh in filetags.items():
293 for k,nh in filetags.items():
294 if k not in globaltags:
294 if k not in globaltags:
295 globaltags[k] = nh
295 globaltags[k] = nh
296 continue
296 continue
297 # we prefer the global tag if:
297 # we prefer the global tag if:
298 # it supercedes us OR
298 # it supercedes us OR
299 # mutual supercedes and it has a higher rank
299 # mutual supercedes and it has a higher rank
300 # otherwise we win because we're tip-most
300 # otherwise we win because we're tip-most
301 an, ah = nh
301 an, ah = nh
302 bn, bh = globaltags[k]
302 bn, bh = globaltags[k]
303 if bn != an and an in bh and \
303 if bn != an and an in bh and \
304 (bn not in ah or len(bh) > len(ah)):
304 (bn not in ah or len(bh) > len(ah)):
305 an = bn
305 an = bn
306 ah.extend([n for n in bh if n not in ah])
306 ah.extend([n for n in bh if n not in ah])
307 globaltags[k] = an, ah
307 globaltags[k] = an, ah
308
308
309 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
310 f = None
310 f = None
311 for rev, node, fnode in self._hgtagsnodes():
311 for rev, node, fnode in self._hgtagsnodes():
312 f = (f and f.filectx(fnode) or
312 f = (f and f.filectx(fnode) or
313 self.filectx('.hgtags', fileid=fnode))
313 self.filectx('.hgtags', fileid=fnode))
314 readtags(f.data().splitlines(), f)
314 readtags(f.data().splitlines(), f)
315
315
316 try:
316 try:
317 data = util.fromlocal(self.opener("localtags").read())
317 data = util.fromlocal(self.opener("localtags").read())
318 # localtags are stored in the local character set
318 # localtags are stored in the local character set
319 # while the internal tag table is stored in UTF-8
319 # while the internal tag table is stored in UTF-8
320 readtags(data.splitlines(), "localtags")
320 readtags(data.splitlines(), "localtags")
321 except IOError:
321 except IOError:
322 pass
322 pass
323
323
324 self.tagscache = {}
324 self.tagscache = {}
325 for k,nh in globaltags.items():
325 for k,nh in globaltags.items():
326 n = nh[0]
326 n = nh[0]
327 if n != nullid:
327 if n != nullid:
328 self.tagscache[k] = n
328 self.tagscache[k] = n
329 self.tagscache['tip'] = self.changelog.tip()
329 self.tagscache['tip'] = self.changelog.tip()
330
330
331 return self.tagscache
331 return self.tagscache
332
332
333 def _hgtagsnodes(self):
333 def _hgtagsnodes(self):
334 heads = self.heads()
334 heads = self.heads()
335 heads.reverse()
335 heads.reverse()
336 last = {}
336 last = {}
337 ret = []
337 ret = []
338 for node in heads:
338 for node in heads:
339 c = self.changectx(node)
339 c = self.changectx(node)
340 rev = c.rev()
340 rev = c.rev()
341 try:
341 try:
342 fnode = c.filenode('.hgtags')
342 fnode = c.filenode('.hgtags')
343 except revlog.LookupError:
343 except revlog.LookupError:
344 continue
344 continue
345 ret.append((rev, node, fnode))
345 ret.append((rev, node, fnode))
346 if fnode in last:
346 if fnode in last:
347 ret[last[fnode]] = None
347 ret[last[fnode]] = None
348 last[fnode] = len(ret) - 1
348 last[fnode] = len(ret) - 1
349 return [item for item in ret if item]
349 return [item for item in ret if item]
350
350
351 def tagslist(self):
351 def tagslist(self):
352 '''return a list of tags ordered by revision'''
352 '''return a list of tags ordered by revision'''
353 l = []
353 l = []
354 for t, n in self.tags().items():
354 for t, n in self.tags().items():
355 try:
355 try:
356 r = self.changelog.rev(n)
356 r = self.changelog.rev(n)
357 except:
357 except:
358 r = -2 # sort to the beginning of the list if unknown
358 r = -2 # sort to the beginning of the list if unknown
359 l.append((r, t, n))
359 l.append((r, t, n))
360 l.sort()
360 l.sort()
361 return [(t, n) for r, t, n in l]
361 return [(t, n) for r, t, n in l]
362
362
363 def nodetags(self, node):
363 def nodetags(self, node):
364 '''return the tags associated with a node'''
364 '''return the tags associated with a node'''
365 if not self.nodetagscache:
365 if not self.nodetagscache:
366 self.nodetagscache = {}
366 self.nodetagscache = {}
367 for t, n in self.tags().items():
367 for t, n in self.tags().items():
368 self.nodetagscache.setdefault(n, []).append(t)
368 self.nodetagscache.setdefault(n, []).append(t)
369 return self.nodetagscache.get(node, [])
369 return self.nodetagscache.get(node, [])
370
370
371 def _branchtags(self):
371 def _branchtags(self):
372 partial, last, lrev = self._readbranchcache()
372 partial, last, lrev = self._readbranchcache()
373
373
374 tiprev = self.changelog.count() - 1
374 tiprev = self.changelog.count() - 1
375 if lrev != tiprev:
375 if lrev != tiprev:
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
378
378
379 return partial
379 return partial
380
380
381 def branchtags(self):
381 def branchtags(self):
382 if self.branchcache is not None:
382 if self.branchcache is not None:
383 return self.branchcache
383 return self.branchcache
384
384
385 self.branchcache = {} # avoid recursion in changectx
385 self.branchcache = {} # avoid recursion in changectx
386 partial = self._branchtags()
386 partial = self._branchtags()
387
387
388 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
389 # charset internally
389 # charset internally
390 for k, v in partial.items():
390 for k, v in partial.items():
391 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
392 return self.branchcache
392 return self.branchcache
393
393
394 def _readbranchcache(self):
394 def _readbranchcache(self):
395 partial = {}
395 partial = {}
396 try:
396 try:
397 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
398 lines = f.read().split('\n')
398 lines = f.read().split('\n')
399 f.close()
399 f.close()
400 except (IOError, OSError):
400 except (IOError, OSError):
401 return {}, nullid, nullrev
401 return {}, nullid, nullrev
402
402
403 try:
403 try:
404 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
405 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
406 if not (lrev < self.changelog.count() and
406 if not (lrev < self.changelog.count() and
407 self.changelog.node(lrev) == last): # sanity check
407 self.changelog.node(lrev) == last): # sanity check
408 # invalidate the cache
408 # invalidate the cache
409 raise ValueError('Invalid branch cache: unknown tip')
409 raise ValueError('Invalid branch cache: unknown tip')
410 for l in lines:
410 for l in lines:
411 if not l: continue
411 if not l: continue
412 node, label = l.split(" ", 1)
412 node, label = l.split(" ", 1)
413 partial[label.strip()] = bin(node)
413 partial[label.strip()] = bin(node)
414 except (KeyboardInterrupt, util.SignalInterrupt):
414 except (KeyboardInterrupt, util.SignalInterrupt):
415 raise
415 raise
416 except Exception, inst:
416 except Exception, inst:
417 if self.ui.debugflag:
417 if self.ui.debugflag:
418 self.ui.warn(str(inst), '\n')
418 self.ui.warn(str(inst), '\n')
419 partial, last, lrev = {}, nullid, nullrev
419 partial, last, lrev = {}, nullid, nullrev
420 return partial, last, lrev
420 return partial, last, lrev
421
421
422 def _writebranchcache(self, branches, tip, tiprev):
422 def _writebranchcache(self, branches, tip, tiprev):
423 try:
423 try:
424 f = self.opener("branch.cache", "w", atomictemp=True)
424 f = self.opener("branch.cache", "w", atomictemp=True)
425 f.write("%s %s\n" % (hex(tip), tiprev))
425 f.write("%s %s\n" % (hex(tip), tiprev))
426 for label, node in branches.iteritems():
426 for label, node in branches.iteritems():
427 f.write("%s %s\n" % (hex(node), label))
427 f.write("%s %s\n" % (hex(node), label))
428 f.rename()
428 f.rename()
429 except (IOError, OSError):
429 except (IOError, OSError):
430 pass
430 pass
431
431
432 def _updatebranchcache(self, partial, start, end):
432 def _updatebranchcache(self, partial, start, end):
433 for r in xrange(start, end):
433 for r in xrange(start, end):
434 c = self.changectx(r)
434 c = self.changectx(r)
435 b = c.branch()
435 b = c.branch()
436 partial[b] = c.node()
436 partial[b] = c.node()
437
437
438 def lookup(self, key):
438 def lookup(self, key):
439 if key == '.':
439 if key == '.':
440 key, second = self.dirstate.parents()
440 key, second = self.dirstate.parents()
441 if key == nullid:
441 if key == nullid:
442 raise repo.RepoError(_("no revision checked out"))
442 raise repo.RepoError(_("no revision checked out"))
443 if second != nullid:
443 if second != nullid:
444 self.ui.warn(_("warning: working directory has two parents, "
444 self.ui.warn(_("warning: working directory has two parents, "
445 "tag '.' uses the first\n"))
445 "tag '.' uses the first\n"))
446 elif key == 'null':
446 elif key == 'null':
447 return nullid
447 return nullid
448 n = self.changelog._match(key)
448 n = self.changelog._match(key)
449 if n:
449 if n:
450 return n
450 return n
451 if key in self.tags():
451 if key in self.tags():
452 return self.tags()[key]
452 return self.tags()[key]
453 if key in self.branchtags():
453 if key in self.branchtags():
454 return self.branchtags()[key]
454 return self.branchtags()[key]
455 n = self.changelog._partialmatch(key)
455 n = self.changelog._partialmatch(key)
456 if n:
456 if n:
457 return n
457 return n
458 raise repo.RepoError(_("unknown revision '%s'") % key)
458 raise repo.RepoError(_("unknown revision '%s'") % key)
459
459
460 def dev(self):
460 def dev(self):
461 return os.lstat(self.path).st_dev
461 return os.lstat(self.path).st_dev
462
462
463 def local(self):
463 def local(self):
464 return True
464 return True
465
465
466 def join(self, f):
466 def join(self, f):
467 return os.path.join(self.path, f)
467 return os.path.join(self.path, f)
468
468
469 def sjoin(self, f):
469 def sjoin(self, f):
470 f = self.encodefn(f)
470 f = self.encodefn(f)
471 return os.path.join(self.spath, f)
471 return os.path.join(self.spath, f)
472
472
473 def wjoin(self, f):
473 def wjoin(self, f):
474 return os.path.join(self.root, f)
474 return os.path.join(self.root, f)
475
475
476 def file(self, f):
476 def file(self, f):
477 if f[0] == '/':
477 if f[0] == '/':
478 f = f[1:]
478 f = f[1:]
479 return filelog.filelog(self.sopener, f)
479 return filelog.filelog(self.sopener, f)
480
480
481 def changectx(self, changeid=None):
481 def changectx(self, changeid=None):
482 return context.changectx(self, changeid)
482 return context.changectx(self, changeid)
483
483
484 def workingctx(self):
484 def workingctx(self):
485 return context.workingctx(self)
485 return context.workingctx(self)
486
486
487 def parents(self, changeid=None):
487 def parents(self, changeid=None):
488 '''
488 '''
489 get list of changectxs for parents of changeid or working directory
489 get list of changectxs for parents of changeid or working directory
490 '''
490 '''
491 if changeid is None:
491 if changeid is None:
492 pl = self.dirstate.parents()
492 pl = self.dirstate.parents()
493 else:
493 else:
494 n = self.changelog.lookup(changeid)
494 n = self.changelog.lookup(changeid)
495 pl = self.changelog.parents(n)
495 pl = self.changelog.parents(n)
496 if pl[1] == nullid:
496 if pl[1] == nullid:
497 return [self.changectx(pl[0])]
497 return [self.changectx(pl[0])]
498 return [self.changectx(pl[0]), self.changectx(pl[1])]
498 return [self.changectx(pl[0]), self.changectx(pl[1])]
499
499
500 def filectx(self, path, changeid=None, fileid=None):
500 def filectx(self, path, changeid=None, fileid=None):
501 """changeid can be a changeset revision, node, or tag.
501 """changeid can be a changeset revision, node, or tag.
502 fileid can be a file revision or node."""
502 fileid can be a file revision or node."""
503 return context.filectx(self, path, changeid, fileid)
503 return context.filectx(self, path, changeid, fileid)
504
504
505 def getcwd(self):
505 def getcwd(self):
506 return self.dirstate.getcwd()
506 return self.dirstate.getcwd()
507
507
508 def pathto(self, f, cwd=None):
508 def pathto(self, f, cwd=None):
509 return self.dirstate.pathto(f, cwd)
509 return self.dirstate.pathto(f, cwd)
510
510
511 def wfile(self, f, mode='r'):
511 def wfile(self, f, mode='r'):
512 return self.wopener(f, mode)
512 return self.wopener(f, mode)
513
513
514 def _link(self, f):
514 def _link(self, f):
515 return os.path.islink(self.wjoin(f))
515 return os.path.islink(self.wjoin(f))
516
516
517 def _filter(self, filter, filename, data):
517 def _filter(self, filter, filename, data):
518 if filter not in self.filterpats:
518 if filter not in self.filterpats:
519 l = []
519 l = []
520 for pat, cmd in self.ui.configitems(filter):
520 for pat, cmd in self.ui.configitems(filter):
521 mf = util.matcher(self.root, "", [pat], [], [])[1]
521 mf = util.matcher(self.root, "", [pat], [], [])[1]
522 l.append((mf, cmd))
522 l.append((mf, cmd))
523 self.filterpats[filter] = l
523 self.filterpats[filter] = l
524
524
525 for mf, cmd in self.filterpats[filter]:
525 for mf, cmd in self.filterpats[filter]:
526 if mf(filename):
526 if mf(filename):
527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
528 data = util.filter(data, cmd)
528 data = util.filter(data, cmd)
529 break
529 break
530
530
531 return data
531 return data
532
532
533 def wread(self, filename):
533 def wread(self, filename):
534 if self._link(filename):
534 if self._link(filename):
535 data = os.readlink(self.wjoin(filename))
535 data = os.readlink(self.wjoin(filename))
536 else:
536 else:
537 data = self.wopener(filename, 'r').read()
537 data = self.wopener(filename, 'r').read()
538 return self._filter("encode", filename, data)
538 return self._filter("encode", filename, data)
539
539
540 def wwrite(self, filename, data, flags):
540 def wwrite(self, filename, data, flags):
541 data = self._filter("decode", filename, data)
541 data = self._filter("decode", filename, data)
542 if "l" in flags:
542 if "l" in flags:
543 f = self.wjoin(filename)
543 f = self.wjoin(filename)
544 try:
544 try:
545 os.unlink(f)
545 os.unlink(f)
546 except OSError:
546 except OSError:
547 pass
547 pass
548 d = os.path.dirname(f)
548 d = os.path.dirname(f)
549 if not os.path.exists(d):
549 if not os.path.exists(d):
550 os.makedirs(d)
550 os.makedirs(d)
551 os.symlink(data, f)
551 os.symlink(data, f)
552 else:
552 else:
553 try:
553 try:
554 if self._link(filename):
554 if self._link(filename):
555 os.unlink(self.wjoin(filename))
555 os.unlink(self.wjoin(filename))
556 except OSError:
556 except OSError:
557 pass
557 pass
558 self.wopener(filename, 'w').write(data)
558 self.wopener(filename, 'w').write(data)
559 util.set_exec(self.wjoin(filename), "x" in flags)
559 util.set_exec(self.wjoin(filename), "x" in flags)
560
560
561 def wwritedata(self, filename, data):
561 def wwritedata(self, filename, data):
562 return self._filter("decode", filename, data)
562 return self._filter("decode", filename, data)
563
563
564 def transaction(self):
564 def transaction(self):
565 tr = self.transhandle
565 tr = self.transhandle
566 if tr != None and tr.running():
566 if tr != None and tr.running():
567 return tr.nest()
567 return tr.nest()
568
568
569 # save dirstate for rollback
569 # save dirstate for rollback
570 try:
570 try:
571 ds = self.opener("dirstate").read()
571 ds = self.opener("dirstate").read()
572 except IOError:
572 except IOError:
573 ds = ""
573 ds = ""
574 self.opener("journal.dirstate", "w").write(ds)
574 self.opener("journal.dirstate", "w").write(ds)
575
575
576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
577 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
577 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
579 self.sjoin("journal"),
580 aftertrans(renames))
580 aftertrans(renames))
581 self.transhandle = tr
581 self.transhandle = tr
582 return tr
582 return tr
583
583
584 def recover(self):
584 def recover(self):
585 l = self.lock()
585 l = self.lock()
586 if os.path.exists(self.sjoin("journal")):
586 if os.path.exists(self.sjoin("journal")):
587 self.ui.status(_("rolling back interrupted transaction\n"))
587 self.ui.status(_("rolling back interrupted transaction\n"))
588 transaction.rollback(self.sopener, self.sjoin("journal"))
588 transaction.rollback(self.sopener, self.sjoin("journal"))
589 self.reload()
589 self.reload()
590 return True
590 return True
591 else:
591 else:
592 self.ui.warn(_("no interrupted transaction available\n"))
592 self.ui.warn(_("no interrupted transaction available\n"))
593 return False
593 return False
594
594
595 def rollback(self, wlock=None, lock=None):
595 def rollback(self, wlock=None, lock=None):
596 if not wlock:
596 if not wlock:
597 wlock = self.wlock()
597 wlock = self.wlock()
598 if not lock:
598 if not lock:
599 lock = self.lock()
599 lock = self.lock()
600 if os.path.exists(self.sjoin("undo")):
600 if os.path.exists(self.sjoin("undo")):
601 self.ui.status(_("rolling back last transaction\n"))
601 self.ui.status(_("rolling back last transaction\n"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 self.reload()
604 self.reload()
605 self.wreload()
605 self.wreload()
606 else:
606 else:
607 self.ui.warn(_("no rollback information available\n"))
607 self.ui.warn(_("no rollback information available\n"))
608
608
609 def wreload(self):
609 def wreload(self):
610 self.dirstate.reload()
610 self.dirstate.reload()
611
611
612 def reload(self):
612 def reload(self):
613 self.changelog.load()
613 self.changelog.load()
614 self.manifest.load()
614 self.manifest.load()
615 self.tagscache = None
615 self.tagscache = None
616 self.nodetagscache = None
616 self.nodetagscache = None
617
617
618 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
618 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
619 desc=None):
619 desc=None):
620 try:
620 try:
621 l = lock.lock(lockname, 0, releasefn, desc=desc)
621 l = lock.lock(lockname, 0, releasefn, desc=desc)
622 except lock.LockHeld, inst:
622 except lock.LockHeld, inst:
623 if not wait:
623 if not wait:
624 raise
624 raise
625 self.ui.warn(_("waiting for lock on %s held by %r\n") %
625 self.ui.warn(_("waiting for lock on %s held by %r\n") %
626 (desc, inst.locker))
626 (desc, inst.locker))
627 # default to 600 seconds timeout
627 # default to 600 seconds timeout
628 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
628 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
629 releasefn, desc=desc)
629 releasefn, desc=desc)
630 if acquirefn:
630 if acquirefn:
631 acquirefn()
631 acquirefn()
632 return l
632 return l
633
633
634 def lock(self, wait=1):
634 def lock(self, wait=1):
635 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
635 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
636 desc=_('repository %s') % self.origroot)
636 desc=_('repository %s') % self.origroot)
637
637
638 def wlock(self, wait=1):
638 def wlock(self, wait=1):
639 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
639 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
640 self.wreload,
640 self.wreload,
641 desc=_('working directory of %s') % self.origroot)
641 desc=_('working directory of %s') % self.origroot)
642
642
643 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
643 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
644 """
644 """
645 commit an individual file as part of a larger transaction
645 commit an individual file as part of a larger transaction
646 """
646 """
647
647
648 t = self.wread(fn)
648 t = self.wread(fn)
649 fl = self.file(fn)
649 fl = self.file(fn)
650 fp1 = manifest1.get(fn, nullid)
650 fp1 = manifest1.get(fn, nullid)
651 fp2 = manifest2.get(fn, nullid)
651 fp2 = manifest2.get(fn, nullid)
652
652
653 meta = {}
653 meta = {}
654 cp = self.dirstate.copied(fn)
654 cp = self.dirstate.copied(fn)
655 if cp:
655 if cp:
656 # Mark the new revision of this file as a copy of another
656 # Mark the new revision of this file as a copy of another
657 # file. This copy data will effectively act as a parent
657 # file. This copy data will effectively act as a parent
658 # of this new revision. If this is a merge, the first
658 # of this new revision. If this is a merge, the first
659 # parent will be the nullid (meaning "look up the copy data")
659 # parent will be the nullid (meaning "look up the copy data")
660 # and the second one will be the other parent. For example:
660 # and the second one will be the other parent. For example:
661 #
661 #
662 # 0 --- 1 --- 3 rev1 changes file foo
662 # 0 --- 1 --- 3 rev1 changes file foo
663 # \ / rev2 renames foo to bar and changes it
663 # \ / rev2 renames foo to bar and changes it
664 # \- 2 -/ rev3 should have bar with all changes and
664 # \- 2 -/ rev3 should have bar with all changes and
665 # should record that bar descends from
665 # should record that bar descends from
666 # bar in rev2 and foo in rev1
666 # bar in rev2 and foo in rev1
667 #
667 #
668 # this allows this merge to succeed:
668 # this allows this merge to succeed:
669 #
669 #
670 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
670 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
671 # \ / merging rev3 and rev4 should use bar@rev2
671 # \ / merging rev3 and rev4 should use bar@rev2
672 # \- 2 --- 4 as the merge base
672 # \- 2 --- 4 as the merge base
673 #
673 #
674 meta["copy"] = cp
674 meta["copy"] = cp
675 if not manifest2: # not a branch merge
675 if not manifest2: # not a branch merge
676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
677 fp2 = nullid
677 fp2 = nullid
678 elif fp2 != nullid: # copied on remote side
678 elif fp2 != nullid: # copied on remote side
679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
680 elif fp1 != nullid: # copied on local side, reversed
680 elif fp1 != nullid: # copied on local side, reversed
681 meta["copyrev"] = hex(manifest2.get(cp))
681 meta["copyrev"] = hex(manifest2.get(cp))
682 fp2 = fp1
682 fp2 = fp1
683 else: # directory rename
683 else: # directory rename
684 meta["copyrev"] = hex(manifest1.get(cp, nullid))
684 meta["copyrev"] = hex(manifest1.get(cp, nullid))
685 self.ui.debug(_(" %s: copy %s:%s\n") %
685 self.ui.debug(_(" %s: copy %s:%s\n") %
686 (fn, cp, meta["copyrev"]))
686 (fn, cp, meta["copyrev"]))
687 fp1 = nullid
687 fp1 = nullid
688 elif fp2 != nullid:
688 elif fp2 != nullid:
689 # is one parent an ancestor of the other?
689 # is one parent an ancestor of the other?
690 fpa = fl.ancestor(fp1, fp2)
690 fpa = fl.ancestor(fp1, fp2)
691 if fpa == fp1:
691 if fpa == fp1:
692 fp1, fp2 = fp2, nullid
692 fp1, fp2 = fp2, nullid
693 elif fpa == fp2:
693 elif fpa == fp2:
694 fp2 = nullid
694 fp2 = nullid
695
695
696 # is the file unmodified from the parent? report existing entry
696 # is the file unmodified from the parent? report existing entry
697 if fp2 == nullid and not fl.cmp(fp1, t):
697 if fp2 == nullid and not fl.cmp(fp1, t):
698 return fp1
698 return fp1
699
699
700 changelist.append(fn)
700 changelist.append(fn)
701 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
701 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
702
702
703 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
703 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
704 if p1 is None:
704 if p1 is None:
705 p1, p2 = self.dirstate.parents()
705 p1, p2 = self.dirstate.parents()
706 return self.commit(files=files, text=text, user=user, date=date,
706 return self.commit(files=files, text=text, user=user, date=date,
707 p1=p1, p2=p2, wlock=wlock, extra=extra)
707 p1=p1, p2=p2, wlock=wlock, extra=extra)
708
708
709 def commit(self, files=None, text="", user=None, date=None,
709 def commit(self, files=None, text="", user=None, date=None,
710 match=util.always, force=False, lock=None, wlock=None,
710 match=util.always, force=False, lock=None, wlock=None,
711 force_editor=False, p1=None, p2=None, extra={}):
711 force_editor=False, p1=None, p2=None, extra={}):
712
712
713 commit = []
713 commit = []
714 remove = []
714 remove = []
715 changed = []
715 changed = []
716 use_dirstate = (p1 is None) # not rawcommit
716 use_dirstate = (p1 is None) # not rawcommit
717 extra = extra.copy()
717 extra = extra.copy()
718
718
719 if use_dirstate:
719 if use_dirstate:
720 if files:
720 if files:
721 for f in files:
721 for f in files:
722 s = self.dirstate.state(f)
722 s = self.dirstate.state(f)
723 if s in 'nmai':
723 if s in 'nmai':
724 commit.append(f)
724 commit.append(f)
725 elif s == 'r':
725 elif s == 'r':
726 remove.append(f)
726 remove.append(f)
727 else:
727 else:
728 self.ui.warn(_("%s not tracked!\n") % f)
728 self.ui.warn(_("%s not tracked!\n") % f)
729 else:
729 else:
730 changes = self.status(match=match)[:5]
730 changes = self.status(match=match)[:5]
731 modified, added, removed, deleted, unknown = changes
731 modified, added, removed, deleted, unknown = changes
732 commit = modified + added
732 commit = modified + added
733 remove = removed
733 remove = removed
734 else:
734 else:
735 commit = files
735 commit = files
736
736
737 if use_dirstate:
737 if use_dirstate:
738 p1, p2 = self.dirstate.parents()
738 p1, p2 = self.dirstate.parents()
739 update_dirstate = True
739 update_dirstate = True
740 else:
740 else:
741 p1, p2 = p1, p2 or nullid
741 p1, p2 = p1, p2 or nullid
742 update_dirstate = (self.dirstate.parents()[0] == p1)
742 update_dirstate = (self.dirstate.parents()[0] == p1)
743
743
744 c1 = self.changelog.read(p1)
744 c1 = self.changelog.read(p1)
745 c2 = self.changelog.read(p2)
745 c2 = self.changelog.read(p2)
746 m1 = self.manifest.read(c1[0]).copy()
746 m1 = self.manifest.read(c1[0]).copy()
747 m2 = self.manifest.read(c2[0])
747 m2 = self.manifest.read(c2[0])
748
748
749 if use_dirstate:
749 if use_dirstate:
750 branchname = self.workingctx().branch()
750 branchname = self.workingctx().branch()
751 try:
751 try:
752 branchname = branchname.decode('UTF-8').encode('UTF-8')
752 branchname = branchname.decode('UTF-8').encode('UTF-8')
753 except UnicodeDecodeError:
753 except UnicodeDecodeError:
754 raise util.Abort(_('branch name not in UTF-8!'))
754 raise util.Abort(_('branch name not in UTF-8!'))
755 else:
755 else:
756 branchname = ""
756 branchname = ""
757
757
758 if use_dirstate:
758 if use_dirstate:
759 oldname = c1[5].get("branch") # stored in UTF-8
759 oldname = c1[5].get("branch") # stored in UTF-8
760 if not commit and not remove and not force and p2 == nullid and \
760 if not commit and not remove and not force and p2 == nullid and \
761 branchname == oldname:
761 branchname == oldname:
762 self.ui.status(_("nothing changed\n"))
762 self.ui.status(_("nothing changed\n"))
763 return None
763 return None
764
764
765 xp1 = hex(p1)
765 xp1 = hex(p1)
766 if p2 == nullid: xp2 = ''
766 if p2 == nullid: xp2 = ''
767 else: xp2 = hex(p2)
767 else: xp2 = hex(p2)
768
768
769 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
769 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
770
770
771 if not wlock:
771 if not wlock:
772 wlock = self.wlock()
772 wlock = self.wlock()
773 if not lock:
773 if not lock:
774 lock = self.lock()
774 lock = self.lock()
775 tr = self.transaction()
775 tr = self.transaction()
776
776
777 # check in files
777 # check in files
778 new = {}
778 new = {}
779 linkrev = self.changelog.count()
779 linkrev = self.changelog.count()
780 commit.sort()
780 commit.sort()
781 is_exec = util.execfunc(self.root, m1.execf)
781 is_exec = util.execfunc(self.root, m1.execf)
782 is_link = util.linkfunc(self.root, m1.linkf)
782 is_link = util.linkfunc(self.root, m1.linkf)
783 for f in commit:
783 for f in commit:
784 self.ui.note(f + "\n")
784 self.ui.note(f + "\n")
785 try:
785 try:
786 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
786 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
787 new_exec = is_exec(f)
787 new_exec = is_exec(f)
788 new_link = is_link(f)
788 new_link = is_link(f)
789 if not changed or changed[-1] != f:
789 if not changed or changed[-1] != f:
790 # mention the file in the changelog if some flag changed,
790 # mention the file in the changelog if some flag changed,
791 # even if there was no content change.
791 # even if there was no content change.
792 old_exec = m1.execf(f)
792 old_exec = m1.execf(f)
793 old_link = m1.linkf(f)
793 old_link = m1.linkf(f)
794 if old_exec != new_exec or old_link != new_link:
794 if old_exec != new_exec or old_link != new_link:
795 changed.append(f)
795 changed.append(f)
796 m1.set(f, new_exec, new_link)
796 m1.set(f, new_exec, new_link)
797 except (OSError, IOError):
797 except (OSError, IOError):
798 if use_dirstate:
798 if use_dirstate:
799 self.ui.warn(_("trouble committing %s!\n") % f)
799 self.ui.warn(_("trouble committing %s!\n") % f)
800 raise
800 raise
801 else:
801 else:
802 remove.append(f)
802 remove.append(f)
803
803
804 # update manifest
804 # update manifest
805 m1.update(new)
805 m1.update(new)
806 remove.sort()
806 remove.sort()
807 removed = []
807 removed = []
808
808
809 for f in remove:
809 for f in remove:
810 if f in m1:
810 if f in m1:
811 del m1[f]
811 del m1[f]
812 removed.append(f)
812 removed.append(f)
813 elif f in m2:
814 removed.append(f)
813 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
815 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
814
816
815 # add changeset
817 # add changeset
816 new = new.keys()
818 new = new.keys()
817 new.sort()
819 new.sort()
818
820
819 user = user or self.ui.username()
821 user = user or self.ui.username()
820 if not text or force_editor:
822 if not text or force_editor:
821 edittext = []
823 edittext = []
822 if text:
824 if text:
823 edittext.append(text)
825 edittext.append(text)
824 edittext.append("")
826 edittext.append("")
825 edittext.append("HG: user: %s" % user)
827 edittext.append("HG: user: %s" % user)
826 if p2 != nullid:
828 if p2 != nullid:
827 edittext.append("HG: branch merge")
829 edittext.append("HG: branch merge")
828 if branchname:
830 if branchname:
829 edittext.append("HG: branch %s" % util.tolocal(branchname))
831 edittext.append("HG: branch %s" % util.tolocal(branchname))
830 edittext.extend(["HG: changed %s" % f for f in changed])
832 edittext.extend(["HG: changed %s" % f for f in changed])
831 edittext.extend(["HG: removed %s" % f for f in removed])
833 edittext.extend(["HG: removed %s" % f for f in removed])
832 if not changed and not remove:
834 if not changed and not remove:
833 edittext.append("HG: no files changed")
835 edittext.append("HG: no files changed")
834 edittext.append("")
836 edittext.append("")
835 # run editor in the repository root
837 # run editor in the repository root
836 olddir = os.getcwd()
838 olddir = os.getcwd()
837 os.chdir(self.root)
839 os.chdir(self.root)
838 text = self.ui.edit("\n".join(edittext), user)
840 text = self.ui.edit("\n".join(edittext), user)
839 os.chdir(olddir)
841 os.chdir(olddir)
840
842
841 lines = [line.rstrip() for line in text.rstrip().splitlines()]
843 lines = [line.rstrip() for line in text.rstrip().splitlines()]
842 while lines and not lines[0]:
844 while lines and not lines[0]:
843 del lines[0]
845 del lines[0]
844 if not lines:
846 if not lines:
845 return None
847 return None
846 text = '\n'.join(lines)
848 text = '\n'.join(lines)
847 if branchname:
849 if branchname:
848 extra["branch"] = branchname
850 extra["branch"] = branchname
849 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
851 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
850 user, date, extra)
852 user, date, extra)
851 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
853 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
852 parent2=xp2)
854 parent2=xp2)
853 tr.close()
855 tr.close()
854
856
855 if self.branchcache and "branch" in extra:
857 if self.branchcache and "branch" in extra:
856 self.branchcache[util.tolocal(extra["branch"])] = n
858 self.branchcache[util.tolocal(extra["branch"])] = n
857
859
858 if use_dirstate or update_dirstate:
860 if use_dirstate or update_dirstate:
859 self.dirstate.setparents(n)
861 self.dirstate.setparents(n)
860 if use_dirstate:
862 if use_dirstate:
861 self.dirstate.update(new, "n")
863 self.dirstate.update(new, "n")
862 self.dirstate.forget(removed)
864 self.dirstate.forget(removed)
863
865
864 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
866 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
865 return n
867 return n
866
868
867 def walk(self, node=None, files=[], match=util.always, badmatch=None):
869 def walk(self, node=None, files=[], match=util.always, badmatch=None):
868 '''
870 '''
869 walk recursively through the directory tree or a given
871 walk recursively through the directory tree or a given
870 changeset, finding all files matched by the match
872 changeset, finding all files matched by the match
871 function
873 function
872
874
873 results are yielded in a tuple (src, filename), where src
875 results are yielded in a tuple (src, filename), where src
874 is one of:
876 is one of:
875 'f' the file was found in the directory tree
877 'f' the file was found in the directory tree
876 'm' the file was only in the dirstate and not in the tree
878 'm' the file was only in the dirstate and not in the tree
877 'b' file was not found and matched badmatch
879 'b' file was not found and matched badmatch
878 '''
880 '''
879
881
880 if node:
882 if node:
881 fdict = dict.fromkeys(files)
883 fdict = dict.fromkeys(files)
882 # for dirstate.walk, files=['.'] means "walk the whole tree".
884 # for dirstate.walk, files=['.'] means "walk the whole tree".
883 # follow that here, too
885 # follow that here, too
884 fdict.pop('.', None)
886 fdict.pop('.', None)
885 mdict = self.manifest.read(self.changelog.read(node)[0])
887 mdict = self.manifest.read(self.changelog.read(node)[0])
886 mfiles = mdict.keys()
888 mfiles = mdict.keys()
887 mfiles.sort()
889 mfiles.sort()
888 for fn in mfiles:
890 for fn in mfiles:
889 for ffn in fdict:
891 for ffn in fdict:
890 # match if the file is the exact name or a directory
892 # match if the file is the exact name or a directory
891 if ffn == fn or fn.startswith("%s/" % ffn):
893 if ffn == fn or fn.startswith("%s/" % ffn):
892 del fdict[ffn]
894 del fdict[ffn]
893 break
895 break
894 if match(fn):
896 if match(fn):
895 yield 'm', fn
897 yield 'm', fn
896 ffiles = fdict.keys()
898 ffiles = fdict.keys()
897 ffiles.sort()
899 ffiles.sort()
898 for fn in ffiles:
900 for fn in ffiles:
899 if badmatch and badmatch(fn):
901 if badmatch and badmatch(fn):
900 if match(fn):
902 if match(fn):
901 yield 'b', fn
903 yield 'b', fn
902 else:
904 else:
903 self.ui.warn(_('%s: No such file in rev %s\n')
905 self.ui.warn(_('%s: No such file in rev %s\n')
904 % (self.pathto(fn), short(node)))
906 % (self.pathto(fn), short(node)))
905 else:
907 else:
906 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
908 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
907 yield src, fn
909 yield src, fn
908
910
909 def status(self, node1=None, node2=None, files=[], match=util.always,
911 def status(self, node1=None, node2=None, files=[], match=util.always,
910 wlock=None, list_ignored=False, list_clean=False):
912 wlock=None, list_ignored=False, list_clean=False):
911 """return status of files between two nodes or node and working directory
913 """return status of files between two nodes or node and working directory
912
914
913 If node1 is None, use the first dirstate parent instead.
915 If node1 is None, use the first dirstate parent instead.
914 If node2 is None, compare node1 with working directory.
916 If node2 is None, compare node1 with working directory.
915 """
917 """
916
918
917 def fcmp(fn, getnode):
919 def fcmp(fn, getnode):
918 t1 = self.wread(fn)
920 t1 = self.wread(fn)
919 return self.file(fn).cmp(getnode(fn), t1)
921 return self.file(fn).cmp(getnode(fn), t1)
920
922
921 def mfmatches(node):
923 def mfmatches(node):
922 change = self.changelog.read(node)
924 change = self.changelog.read(node)
923 mf = self.manifest.read(change[0]).copy()
925 mf = self.manifest.read(change[0]).copy()
924 for fn in mf.keys():
926 for fn in mf.keys():
925 if not match(fn):
927 if not match(fn):
926 del mf[fn]
928 del mf[fn]
927 return mf
929 return mf
928
930
929 modified, added, removed, deleted, unknown = [], [], [], [], []
931 modified, added, removed, deleted, unknown = [], [], [], [], []
930 ignored, clean = [], []
932 ignored, clean = [], []
931
933
932 compareworking = False
934 compareworking = False
933 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
935 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
934 compareworking = True
936 compareworking = True
935
937
936 if not compareworking:
938 if not compareworking:
937 # read the manifest from node1 before the manifest from node2,
939 # read the manifest from node1 before the manifest from node2,
938 # so that we'll hit the manifest cache if we're going through
940 # so that we'll hit the manifest cache if we're going through
939 # all the revisions in parent->child order.
941 # all the revisions in parent->child order.
940 mf1 = mfmatches(node1)
942 mf1 = mfmatches(node1)
941
943
942 mywlock = False
944 mywlock = False
943
945
944 # are we comparing the working directory?
946 # are we comparing the working directory?
945 if not node2:
947 if not node2:
946 (lookup, modified, added, removed, deleted, unknown,
948 (lookup, modified, added, removed, deleted, unknown,
947 ignored, clean) = self.dirstate.status(files, match,
949 ignored, clean) = self.dirstate.status(files, match,
948 list_ignored, list_clean)
950 list_ignored, list_clean)
949
951
950 # are we comparing working dir against its parent?
952 # are we comparing working dir against its parent?
951 if compareworking:
953 if compareworking:
952 if lookup:
954 if lookup:
953 # do a full compare of any files that might have changed
955 # do a full compare of any files that might have changed
954 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
956 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
955 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
957 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
956 nullid)
958 nullid)
957 for f in lookup:
959 for f in lookup:
958 if fcmp(f, getnode):
960 if fcmp(f, getnode):
959 modified.append(f)
961 modified.append(f)
960 else:
962 else:
961 clean.append(f)
963 clean.append(f)
962 if not wlock and not mywlock:
964 if not wlock and not mywlock:
963 mywlock = True
965 mywlock = True
964 try:
966 try:
965 wlock = self.wlock(wait=0)
967 wlock = self.wlock(wait=0)
966 except lock.LockException:
968 except lock.LockException:
967 pass
969 pass
968 if wlock:
970 if wlock:
969 self.dirstate.update([f], "n")
971 self.dirstate.update([f], "n")
970 else:
972 else:
971 # we are comparing working dir against non-parent
973 # we are comparing working dir against non-parent
972 # generate a pseudo-manifest for the working dir
974 # generate a pseudo-manifest for the working dir
973 # XXX: create it in dirstate.py ?
975 # XXX: create it in dirstate.py ?
974 mf2 = mfmatches(self.dirstate.parents()[0])
976 mf2 = mfmatches(self.dirstate.parents()[0])
975 is_exec = util.execfunc(self.root, mf2.execf)
977 is_exec = util.execfunc(self.root, mf2.execf)
976 is_link = util.linkfunc(self.root, mf2.linkf)
978 is_link = util.linkfunc(self.root, mf2.linkf)
977 for f in lookup + modified + added:
979 for f in lookup + modified + added:
978 mf2[f] = ""
980 mf2[f] = ""
979 mf2.set(f, is_exec(f), is_link(f))
981 mf2.set(f, is_exec(f), is_link(f))
980 for f in removed:
982 for f in removed:
981 if f in mf2:
983 if f in mf2:
982 del mf2[f]
984 del mf2[f]
983
985
984 if mywlock and wlock:
986 if mywlock and wlock:
985 wlock.release()
987 wlock.release()
986 else:
988 else:
987 # we are comparing two revisions
989 # we are comparing two revisions
988 mf2 = mfmatches(node2)
990 mf2 = mfmatches(node2)
989
991
990 if not compareworking:
992 if not compareworking:
991 # flush lists from dirstate before comparing manifests
993 # flush lists from dirstate before comparing manifests
992 modified, added, clean = [], [], []
994 modified, added, clean = [], [], []
993
995
994 # make sure to sort the files so we talk to the disk in a
996 # make sure to sort the files so we talk to the disk in a
995 # reasonable order
997 # reasonable order
996 mf2keys = mf2.keys()
998 mf2keys = mf2.keys()
997 mf2keys.sort()
999 mf2keys.sort()
998 getnode = lambda fn: mf1.get(fn, nullid)
1000 getnode = lambda fn: mf1.get(fn, nullid)
999 for fn in mf2keys:
1001 for fn in mf2keys:
1000 if mf1.has_key(fn):
1002 if mf1.has_key(fn):
1001 if mf1.flags(fn) != mf2.flags(fn) or \
1003 if mf1.flags(fn) != mf2.flags(fn) or \
1002 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
1004 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
1003 fcmp(fn, getnode))):
1005 fcmp(fn, getnode))):
1004 modified.append(fn)
1006 modified.append(fn)
1005 elif list_clean:
1007 elif list_clean:
1006 clean.append(fn)
1008 clean.append(fn)
1007 del mf1[fn]
1009 del mf1[fn]
1008 else:
1010 else:
1009 added.append(fn)
1011 added.append(fn)
1010
1012
1011 removed = mf1.keys()
1013 removed = mf1.keys()
1012
1014
1013 # sort and return results:
1015 # sort and return results:
1014 for l in modified, added, removed, deleted, unknown, ignored, clean:
1016 for l in modified, added, removed, deleted, unknown, ignored, clean:
1015 l.sort()
1017 l.sort()
1016 return (modified, added, removed, deleted, unknown, ignored, clean)
1018 return (modified, added, removed, deleted, unknown, ignored, clean)
1017
1019
1018 def add(self, list, wlock=None):
1020 def add(self, list, wlock=None):
1019 if not wlock:
1021 if not wlock:
1020 wlock = self.wlock()
1022 wlock = self.wlock()
1021 for f in list:
1023 for f in list:
1022 p = self.wjoin(f)
1024 p = self.wjoin(f)
1023 islink = os.path.islink(p)
1025 islink = os.path.islink(p)
1024 size = os.lstat(p).st_size
1026 size = os.lstat(p).st_size
1025 if size > 10000000:
1027 if size > 10000000:
1026 self.ui.warn(_("%s: files over 10MB may cause memory and"
1028 self.ui.warn(_("%s: files over 10MB may cause memory and"
1027 " performance problems\n"
1029 " performance problems\n"
1028 "(use 'hg revert %s' to unadd the file)\n")
1030 "(use 'hg revert %s' to unadd the file)\n")
1029 % (f, f))
1031 % (f, f))
1030 if not islink and not os.path.exists(p):
1032 if not islink and not os.path.exists(p):
1031 self.ui.warn(_("%s does not exist!\n") % f)
1033 self.ui.warn(_("%s does not exist!\n") % f)
1032 elif not islink and not os.path.isfile(p):
1034 elif not islink and not os.path.isfile(p):
1033 self.ui.warn(_("%s not added: only files and symlinks "
1035 self.ui.warn(_("%s not added: only files and symlinks "
1034 "supported currently\n") % f)
1036 "supported currently\n") % f)
1035 elif self.dirstate.state(f) in 'an':
1037 elif self.dirstate.state(f) in 'an':
1036 self.ui.warn(_("%s already tracked!\n") % f)
1038 self.ui.warn(_("%s already tracked!\n") % f)
1037 else:
1039 else:
1038 self.dirstate.update([f], "a")
1040 self.dirstate.update([f], "a")
1039
1041
1040 def forget(self, list, wlock=None):
1042 def forget(self, list, wlock=None):
1041 if not wlock:
1043 if not wlock:
1042 wlock = self.wlock()
1044 wlock = self.wlock()
1043 for f in list:
1045 for f in list:
1044 if self.dirstate.state(f) not in 'ai':
1046 if self.dirstate.state(f) not in 'ai':
1045 self.ui.warn(_("%s not added!\n") % f)
1047 self.ui.warn(_("%s not added!\n") % f)
1046 else:
1048 else:
1047 self.dirstate.forget([f])
1049 self.dirstate.forget([f])
1048
1050
1049 def remove(self, list, unlink=False, wlock=None):
1051 def remove(self, list, unlink=False, wlock=None):
1050 if unlink:
1052 if unlink:
1051 for f in list:
1053 for f in list:
1052 try:
1054 try:
1053 util.unlink(self.wjoin(f))
1055 util.unlink(self.wjoin(f))
1054 except OSError, inst:
1056 except OSError, inst:
1055 if inst.errno != errno.ENOENT:
1057 if inst.errno != errno.ENOENT:
1056 raise
1058 raise
1057 if not wlock:
1059 if not wlock:
1058 wlock = self.wlock()
1060 wlock = self.wlock()
1059 for f in list:
1061 for f in list:
1060 if unlink and os.path.exists(self.wjoin(f)):
1062 if unlink and os.path.exists(self.wjoin(f)):
1061 self.ui.warn(_("%s still exists!\n") % f)
1063 self.ui.warn(_("%s still exists!\n") % f)
1062 elif self.dirstate.state(f) == 'a':
1064 elif self.dirstate.state(f) == 'a':
1063 self.dirstate.forget([f])
1065 self.dirstate.forget([f])
1064 elif f not in self.dirstate:
1066 elif f not in self.dirstate:
1065 self.ui.warn(_("%s not tracked!\n") % f)
1067 self.ui.warn(_("%s not tracked!\n") % f)
1066 else:
1068 else:
1067 self.dirstate.update([f], "r")
1069 self.dirstate.update([f], "r")
1068
1070
1069 def undelete(self, list, wlock=None):
1071 def undelete(self, list, wlock=None):
1070 p = self.dirstate.parents()[0]
1072 p = self.dirstate.parents()[0]
1071 mn = self.changelog.read(p)[0]
1073 mn = self.changelog.read(p)[0]
1072 m = self.manifest.read(mn)
1074 m = self.manifest.read(mn)
1073 if not wlock:
1075 if not wlock:
1074 wlock = self.wlock()
1076 wlock = self.wlock()
1075 for f in list:
1077 for f in list:
1076 if self.dirstate.state(f) not in "r":
1078 if self.dirstate.state(f) not in "r":
1077 self.ui.warn("%s not removed!\n" % f)
1079 self.ui.warn("%s not removed!\n" % f)
1078 else:
1080 else:
1079 t = self.file(f).read(m[f])
1081 t = self.file(f).read(m[f])
1080 self.wwrite(f, t, m.flags(f))
1082 self.wwrite(f, t, m.flags(f))
1081 self.dirstate.update([f], "n")
1083 self.dirstate.update([f], "n")
1082
1084
1083 def copy(self, source, dest, wlock=None):
1085 def copy(self, source, dest, wlock=None):
1084 p = self.wjoin(dest)
1086 p = self.wjoin(dest)
1085 if not (os.path.exists(p) or os.path.islink(p)):
1087 if not (os.path.exists(p) or os.path.islink(p)):
1086 self.ui.warn(_("%s does not exist!\n") % dest)
1088 self.ui.warn(_("%s does not exist!\n") % dest)
1087 elif not (os.path.isfile(p) or os.path.islink(p)):
1089 elif not (os.path.isfile(p) or os.path.islink(p)):
1088 self.ui.warn(_("copy failed: %s is not a file or a "
1090 self.ui.warn(_("copy failed: %s is not a file or a "
1089 "symbolic link\n") % dest)
1091 "symbolic link\n") % dest)
1090 else:
1092 else:
1091 if not wlock:
1093 if not wlock:
1092 wlock = self.wlock()
1094 wlock = self.wlock()
1093 if self.dirstate.state(dest) == '?':
1095 if self.dirstate.state(dest) == '?':
1094 self.dirstate.update([dest], "a")
1096 self.dirstate.update([dest], "a")
1095 self.dirstate.copy(source, dest)
1097 self.dirstate.copy(source, dest)
1096
1098
1097 def heads(self, start=None):
1099 def heads(self, start=None):
1098 heads = self.changelog.heads(start)
1100 heads = self.changelog.heads(start)
1099 # sort the output in rev descending order
1101 # sort the output in rev descending order
1100 heads = [(-self.changelog.rev(h), h) for h in heads]
1102 heads = [(-self.changelog.rev(h), h) for h in heads]
1101 heads.sort()
1103 heads.sort()
1102 return [n for (r, n) in heads]
1104 return [n for (r, n) in heads]
1103
1105
1104 def branches(self, nodes):
1106 def branches(self, nodes):
1105 if not nodes:
1107 if not nodes:
1106 nodes = [self.changelog.tip()]
1108 nodes = [self.changelog.tip()]
1107 b = []
1109 b = []
1108 for n in nodes:
1110 for n in nodes:
1109 t = n
1111 t = n
1110 while 1:
1112 while 1:
1111 p = self.changelog.parents(n)
1113 p = self.changelog.parents(n)
1112 if p[1] != nullid or p[0] == nullid:
1114 if p[1] != nullid or p[0] == nullid:
1113 b.append((t, n, p[0], p[1]))
1115 b.append((t, n, p[0], p[1]))
1114 break
1116 break
1115 n = p[0]
1117 n = p[0]
1116 return b
1118 return b
1117
1119
1118 def between(self, pairs):
1120 def between(self, pairs):
1119 r = []
1121 r = []
1120
1122
1121 for top, bottom in pairs:
1123 for top, bottom in pairs:
1122 n, l, i = top, [], 0
1124 n, l, i = top, [], 0
1123 f = 1
1125 f = 1
1124
1126
1125 while n != bottom:
1127 while n != bottom:
1126 p = self.changelog.parents(n)[0]
1128 p = self.changelog.parents(n)[0]
1127 if i == f:
1129 if i == f:
1128 l.append(n)
1130 l.append(n)
1129 f = f * 2
1131 f = f * 2
1130 n = p
1132 n = p
1131 i += 1
1133 i += 1
1132
1134
1133 r.append(l)
1135 r.append(l)
1134
1136
1135 return r
1137 return r
1136
1138
1137 def findincoming(self, remote, base=None, heads=None, force=False):
1139 def findincoming(self, remote, base=None, heads=None, force=False):
1138 """Return list of roots of the subsets of missing nodes from remote
1140 """Return list of roots of the subsets of missing nodes from remote
1139
1141
1140 If base dict is specified, assume that these nodes and their parents
1142 If base dict is specified, assume that these nodes and their parents
1141 exist on the remote side and that no child of a node of base exists
1143 exist on the remote side and that no child of a node of base exists
1142 in both remote and self.
1144 in both remote and self.
1143 Furthermore base will be updated to include the nodes that exists
1145 Furthermore base will be updated to include the nodes that exists
1144 in self and remote but no children exists in self and remote.
1146 in self and remote but no children exists in self and remote.
1145 If a list of heads is specified, return only nodes which are heads
1147 If a list of heads is specified, return only nodes which are heads
1146 or ancestors of these heads.
1148 or ancestors of these heads.
1147
1149
1148 All the ancestors of base are in self and in remote.
1150 All the ancestors of base are in self and in remote.
1149 All the descendants of the list returned are missing in self.
1151 All the descendants of the list returned are missing in self.
1150 (and so we know that the rest of the nodes are missing in remote, see
1152 (and so we know that the rest of the nodes are missing in remote, see
1151 outgoing)
1153 outgoing)
1152 """
1154 """
1153 m = self.changelog.nodemap
1155 m = self.changelog.nodemap
1154 search = []
1156 search = []
1155 fetch = {}
1157 fetch = {}
1156 seen = {}
1158 seen = {}
1157 seenbranch = {}
1159 seenbranch = {}
1158 if base == None:
1160 if base == None:
1159 base = {}
1161 base = {}
1160
1162
1161 if not heads:
1163 if not heads:
1162 heads = remote.heads()
1164 heads = remote.heads()
1163
1165
1164 if self.changelog.tip() == nullid:
1166 if self.changelog.tip() == nullid:
1165 base[nullid] = 1
1167 base[nullid] = 1
1166 if heads != [nullid]:
1168 if heads != [nullid]:
1167 return [nullid]
1169 return [nullid]
1168 return []
1170 return []
1169
1171
1170 # assume we're closer to the tip than the root
1172 # assume we're closer to the tip than the root
1171 # and start by examining the heads
1173 # and start by examining the heads
1172 self.ui.status(_("searching for changes\n"))
1174 self.ui.status(_("searching for changes\n"))
1173
1175
1174 unknown = []
1176 unknown = []
1175 for h in heads:
1177 for h in heads:
1176 if h not in m:
1178 if h not in m:
1177 unknown.append(h)
1179 unknown.append(h)
1178 else:
1180 else:
1179 base[h] = 1
1181 base[h] = 1
1180
1182
1181 if not unknown:
1183 if not unknown:
1182 return []
1184 return []
1183
1185
1184 req = dict.fromkeys(unknown)
1186 req = dict.fromkeys(unknown)
1185 reqcnt = 0
1187 reqcnt = 0
1186
1188
1187 # search through remote branches
1189 # search through remote branches
1188 # a 'branch' here is a linear segment of history, with four parts:
1190 # a 'branch' here is a linear segment of history, with four parts:
1189 # head, root, first parent, second parent
1191 # head, root, first parent, second parent
1190 # (a branch always has two parents (or none) by definition)
1192 # (a branch always has two parents (or none) by definition)
1191 unknown = remote.branches(unknown)
1193 unknown = remote.branches(unknown)
1192 while unknown:
1194 while unknown:
1193 r = []
1195 r = []
1194 while unknown:
1196 while unknown:
1195 n = unknown.pop(0)
1197 n = unknown.pop(0)
1196 if n[0] in seen:
1198 if n[0] in seen:
1197 continue
1199 continue
1198
1200
1199 self.ui.debug(_("examining %s:%s\n")
1201 self.ui.debug(_("examining %s:%s\n")
1200 % (short(n[0]), short(n[1])))
1202 % (short(n[0]), short(n[1])))
1201 if n[0] == nullid: # found the end of the branch
1203 if n[0] == nullid: # found the end of the branch
1202 pass
1204 pass
1203 elif n in seenbranch:
1205 elif n in seenbranch:
1204 self.ui.debug(_("branch already found\n"))
1206 self.ui.debug(_("branch already found\n"))
1205 continue
1207 continue
1206 elif n[1] and n[1] in m: # do we know the base?
1208 elif n[1] and n[1] in m: # do we know the base?
1207 self.ui.debug(_("found incomplete branch %s:%s\n")
1209 self.ui.debug(_("found incomplete branch %s:%s\n")
1208 % (short(n[0]), short(n[1])))
1210 % (short(n[0]), short(n[1])))
1209 search.append(n) # schedule branch range for scanning
1211 search.append(n) # schedule branch range for scanning
1210 seenbranch[n] = 1
1212 seenbranch[n] = 1
1211 else:
1213 else:
1212 if n[1] not in seen and n[1] not in fetch:
1214 if n[1] not in seen and n[1] not in fetch:
1213 if n[2] in m and n[3] in m:
1215 if n[2] in m and n[3] in m:
1214 self.ui.debug(_("found new changeset %s\n") %
1216 self.ui.debug(_("found new changeset %s\n") %
1215 short(n[1]))
1217 short(n[1]))
1216 fetch[n[1]] = 1 # earliest unknown
1218 fetch[n[1]] = 1 # earliest unknown
1217 for p in n[2:4]:
1219 for p in n[2:4]:
1218 if p in m:
1220 if p in m:
1219 base[p] = 1 # latest known
1221 base[p] = 1 # latest known
1220
1222
1221 for p in n[2:4]:
1223 for p in n[2:4]:
1222 if p not in req and p not in m:
1224 if p not in req and p not in m:
1223 r.append(p)
1225 r.append(p)
1224 req[p] = 1
1226 req[p] = 1
1225 seen[n[0]] = 1
1227 seen[n[0]] = 1
1226
1228
1227 if r:
1229 if r:
1228 reqcnt += 1
1230 reqcnt += 1
1229 self.ui.debug(_("request %d: %s\n") %
1231 self.ui.debug(_("request %d: %s\n") %
1230 (reqcnt, " ".join(map(short, r))))
1232 (reqcnt, " ".join(map(short, r))))
1231 for p in xrange(0, len(r), 10):
1233 for p in xrange(0, len(r), 10):
1232 for b in remote.branches(r[p:p+10]):
1234 for b in remote.branches(r[p:p+10]):
1233 self.ui.debug(_("received %s:%s\n") %
1235 self.ui.debug(_("received %s:%s\n") %
1234 (short(b[0]), short(b[1])))
1236 (short(b[0]), short(b[1])))
1235 unknown.append(b)
1237 unknown.append(b)
1236
1238
1237 # do binary search on the branches we found
1239 # do binary search on the branches we found
1238 while search:
1240 while search:
1239 n = search.pop(0)
1241 n = search.pop(0)
1240 reqcnt += 1
1242 reqcnt += 1
1241 l = remote.between([(n[0], n[1])])[0]
1243 l = remote.between([(n[0], n[1])])[0]
1242 l.append(n[1])
1244 l.append(n[1])
1243 p = n[0]
1245 p = n[0]
1244 f = 1
1246 f = 1
1245 for i in l:
1247 for i in l:
1246 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1248 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1247 if i in m:
1249 if i in m:
1248 if f <= 2:
1250 if f <= 2:
1249 self.ui.debug(_("found new branch changeset %s\n") %
1251 self.ui.debug(_("found new branch changeset %s\n") %
1250 short(p))
1252 short(p))
1251 fetch[p] = 1
1253 fetch[p] = 1
1252 base[i] = 1
1254 base[i] = 1
1253 else:
1255 else:
1254 self.ui.debug(_("narrowed branch search to %s:%s\n")
1256 self.ui.debug(_("narrowed branch search to %s:%s\n")
1255 % (short(p), short(i)))
1257 % (short(p), short(i)))
1256 search.append((p, i))
1258 search.append((p, i))
1257 break
1259 break
1258 p, f = i, f * 2
1260 p, f = i, f * 2
1259
1261
1260 # sanity check our fetch list
1262 # sanity check our fetch list
1261 for f in fetch.keys():
1263 for f in fetch.keys():
1262 if f in m:
1264 if f in m:
1263 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1265 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1264
1266
1265 if base.keys() == [nullid]:
1267 if base.keys() == [nullid]:
1266 if force:
1268 if force:
1267 self.ui.warn(_("warning: repository is unrelated\n"))
1269 self.ui.warn(_("warning: repository is unrelated\n"))
1268 else:
1270 else:
1269 raise util.Abort(_("repository is unrelated"))
1271 raise util.Abort(_("repository is unrelated"))
1270
1272
1271 self.ui.debug(_("found new changesets starting at ") +
1273 self.ui.debug(_("found new changesets starting at ") +
1272 " ".join([short(f) for f in fetch]) + "\n")
1274 " ".join([short(f) for f in fetch]) + "\n")
1273
1275
1274 self.ui.debug(_("%d total queries\n") % reqcnt)
1276 self.ui.debug(_("%d total queries\n") % reqcnt)
1275
1277
1276 return fetch.keys()
1278 return fetch.keys()
1277
1279
1278 def findoutgoing(self, remote, base=None, heads=None, force=False):
1280 def findoutgoing(self, remote, base=None, heads=None, force=False):
1279 """Return list of nodes that are roots of subsets not in remote
1281 """Return list of nodes that are roots of subsets not in remote
1280
1282
1281 If base dict is specified, assume that these nodes and their parents
1283 If base dict is specified, assume that these nodes and their parents
1282 exist on the remote side.
1284 exist on the remote side.
1283 If a list of heads is specified, return only nodes which are heads
1285 If a list of heads is specified, return only nodes which are heads
1284 or ancestors of these heads, and return a second element which
1286 or ancestors of these heads, and return a second element which
1285 contains all remote heads which get new children.
1287 contains all remote heads which get new children.
1286 """
1288 """
1287 if base == None:
1289 if base == None:
1288 base = {}
1290 base = {}
1289 self.findincoming(remote, base, heads, force=force)
1291 self.findincoming(remote, base, heads, force=force)
1290
1292
1291 self.ui.debug(_("common changesets up to ")
1293 self.ui.debug(_("common changesets up to ")
1292 + " ".join(map(short, base.keys())) + "\n")
1294 + " ".join(map(short, base.keys())) + "\n")
1293
1295
1294 remain = dict.fromkeys(self.changelog.nodemap)
1296 remain = dict.fromkeys(self.changelog.nodemap)
1295
1297
1296 # prune everything remote has from the tree
1298 # prune everything remote has from the tree
1297 del remain[nullid]
1299 del remain[nullid]
1298 remove = base.keys()
1300 remove = base.keys()
1299 while remove:
1301 while remove:
1300 n = remove.pop(0)
1302 n = remove.pop(0)
1301 if n in remain:
1303 if n in remain:
1302 del remain[n]
1304 del remain[n]
1303 for p in self.changelog.parents(n):
1305 for p in self.changelog.parents(n):
1304 remove.append(p)
1306 remove.append(p)
1305
1307
1306 # find every node whose parents have been pruned
1308 # find every node whose parents have been pruned
1307 subset = []
1309 subset = []
1308 # find every remote head that will get new children
1310 # find every remote head that will get new children
1309 updated_heads = {}
1311 updated_heads = {}
1310 for n in remain:
1312 for n in remain:
1311 p1, p2 = self.changelog.parents(n)
1313 p1, p2 = self.changelog.parents(n)
1312 if p1 not in remain and p2 not in remain:
1314 if p1 not in remain and p2 not in remain:
1313 subset.append(n)
1315 subset.append(n)
1314 if heads:
1316 if heads:
1315 if p1 in heads:
1317 if p1 in heads:
1316 updated_heads[p1] = True
1318 updated_heads[p1] = True
1317 if p2 in heads:
1319 if p2 in heads:
1318 updated_heads[p2] = True
1320 updated_heads[p2] = True
1319
1321
1320 # this is the set of all roots we have to push
1322 # this is the set of all roots we have to push
1321 if heads:
1323 if heads:
1322 return subset, updated_heads.keys()
1324 return subset, updated_heads.keys()
1323 else:
1325 else:
1324 return subset
1326 return subset
1325
1327
1326 def pull(self, remote, heads=None, force=False, lock=None):
1328 def pull(self, remote, heads=None, force=False, lock=None):
1327 mylock = False
1329 mylock = False
1328 if not lock:
1330 if not lock:
1329 lock = self.lock()
1331 lock = self.lock()
1330 mylock = True
1332 mylock = True
1331
1333
1332 try:
1334 try:
1333 fetch = self.findincoming(remote, force=force)
1335 fetch = self.findincoming(remote, force=force)
1334 if fetch == [nullid]:
1336 if fetch == [nullid]:
1335 self.ui.status(_("requesting all changes\n"))
1337 self.ui.status(_("requesting all changes\n"))
1336
1338
1337 if not fetch:
1339 if not fetch:
1338 self.ui.status(_("no changes found\n"))
1340 self.ui.status(_("no changes found\n"))
1339 return 0
1341 return 0
1340
1342
1341 if heads is None:
1343 if heads is None:
1342 cg = remote.changegroup(fetch, 'pull')
1344 cg = remote.changegroup(fetch, 'pull')
1343 else:
1345 else:
1344 if 'changegroupsubset' not in remote.capabilities:
1346 if 'changegroupsubset' not in remote.capabilities:
1345 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1347 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1346 cg = remote.changegroupsubset(fetch, heads, 'pull')
1348 cg = remote.changegroupsubset(fetch, heads, 'pull')
1347 return self.addchangegroup(cg, 'pull', remote.url())
1349 return self.addchangegroup(cg, 'pull', remote.url())
1348 finally:
1350 finally:
1349 if mylock:
1351 if mylock:
1350 lock.release()
1352 lock.release()
1351
1353
1352 def push(self, remote, force=False, revs=None):
1354 def push(self, remote, force=False, revs=None):
1353 # there are two ways to push to remote repo:
1355 # there are two ways to push to remote repo:
1354 #
1356 #
1355 # addchangegroup assumes local user can lock remote
1357 # addchangegroup assumes local user can lock remote
1356 # repo (local filesystem, old ssh servers).
1358 # repo (local filesystem, old ssh servers).
1357 #
1359 #
1358 # unbundle assumes local user cannot lock remote repo (new ssh
1360 # unbundle assumes local user cannot lock remote repo (new ssh
1359 # servers, http servers).
1361 # servers, http servers).
1360
1362
1361 if remote.capable('unbundle'):
1363 if remote.capable('unbundle'):
1362 return self.push_unbundle(remote, force, revs)
1364 return self.push_unbundle(remote, force, revs)
1363 return self.push_addchangegroup(remote, force, revs)
1365 return self.push_addchangegroup(remote, force, revs)
1364
1366
1365 def prepush(self, remote, force, revs):
1367 def prepush(self, remote, force, revs):
1366 base = {}
1368 base = {}
1367 remote_heads = remote.heads()
1369 remote_heads = remote.heads()
1368 inc = self.findincoming(remote, base, remote_heads, force=force)
1370 inc = self.findincoming(remote, base, remote_heads, force=force)
1369
1371
1370 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1372 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1371 if revs is not None:
1373 if revs is not None:
1372 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1374 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1373 else:
1375 else:
1374 bases, heads = update, self.changelog.heads()
1376 bases, heads = update, self.changelog.heads()
1375
1377
1376 if not bases:
1378 if not bases:
1377 self.ui.status(_("no changes found\n"))
1379 self.ui.status(_("no changes found\n"))
1378 return None, 1
1380 return None, 1
1379 elif not force:
1381 elif not force:
1380 # check if we're creating new remote heads
1382 # check if we're creating new remote heads
1381 # to be a remote head after push, node must be either
1383 # to be a remote head after push, node must be either
1382 # - unknown locally
1384 # - unknown locally
1383 # - a local outgoing head descended from update
1385 # - a local outgoing head descended from update
1384 # - a remote head that's known locally and not
1386 # - a remote head that's known locally and not
1385 # ancestral to an outgoing head
1387 # ancestral to an outgoing head
1386
1388
1387 warn = 0
1389 warn = 0
1388
1390
1389 if remote_heads == [nullid]:
1391 if remote_heads == [nullid]:
1390 warn = 0
1392 warn = 0
1391 elif not revs and len(heads) > len(remote_heads):
1393 elif not revs and len(heads) > len(remote_heads):
1392 warn = 1
1394 warn = 1
1393 else:
1395 else:
1394 newheads = list(heads)
1396 newheads = list(heads)
1395 for r in remote_heads:
1397 for r in remote_heads:
1396 if r in self.changelog.nodemap:
1398 if r in self.changelog.nodemap:
1397 desc = self.changelog.heads(r, heads)
1399 desc = self.changelog.heads(r, heads)
1398 l = [h for h in heads if h in desc]
1400 l = [h for h in heads if h in desc]
1399 if not l:
1401 if not l:
1400 newheads.append(r)
1402 newheads.append(r)
1401 else:
1403 else:
1402 newheads.append(r)
1404 newheads.append(r)
1403 if len(newheads) > len(remote_heads):
1405 if len(newheads) > len(remote_heads):
1404 warn = 1
1406 warn = 1
1405
1407
1406 if warn:
1408 if warn:
1407 self.ui.warn(_("abort: push creates new remote branches!\n"))
1409 self.ui.warn(_("abort: push creates new remote branches!\n"))
1408 self.ui.status(_("(did you forget to merge?"
1410 self.ui.status(_("(did you forget to merge?"
1409 " use push -f to force)\n"))
1411 " use push -f to force)\n"))
1410 return None, 1
1412 return None, 1
1411 elif inc:
1413 elif inc:
1412 self.ui.warn(_("note: unsynced remote changes!\n"))
1414 self.ui.warn(_("note: unsynced remote changes!\n"))
1413
1415
1414
1416
1415 if revs is None:
1417 if revs is None:
1416 cg = self.changegroup(update, 'push')
1418 cg = self.changegroup(update, 'push')
1417 else:
1419 else:
1418 cg = self.changegroupsubset(update, revs, 'push')
1420 cg = self.changegroupsubset(update, revs, 'push')
1419 return cg, remote_heads
1421 return cg, remote_heads
1420
1422
1421 def push_addchangegroup(self, remote, force, revs):
1423 def push_addchangegroup(self, remote, force, revs):
1422 lock = remote.lock()
1424 lock = remote.lock()
1423
1425
1424 ret = self.prepush(remote, force, revs)
1426 ret = self.prepush(remote, force, revs)
1425 if ret[0] is not None:
1427 if ret[0] is not None:
1426 cg, remote_heads = ret
1428 cg, remote_heads = ret
1427 return remote.addchangegroup(cg, 'push', self.url())
1429 return remote.addchangegroup(cg, 'push', self.url())
1428 return ret[1]
1430 return ret[1]
1429
1431
1430 def push_unbundle(self, remote, force, revs):
1432 def push_unbundle(self, remote, force, revs):
1431 # local repo finds heads on server, finds out what revs it
1433 # local repo finds heads on server, finds out what revs it
1432 # must push. once revs transferred, if server finds it has
1434 # must push. once revs transferred, if server finds it has
1433 # different heads (someone else won commit/push race), server
1435 # different heads (someone else won commit/push race), server
1434 # aborts.
1436 # aborts.
1435
1437
1436 ret = self.prepush(remote, force, revs)
1438 ret = self.prepush(remote, force, revs)
1437 if ret[0] is not None:
1439 if ret[0] is not None:
1438 cg, remote_heads = ret
1440 cg, remote_heads = ret
1439 if force: remote_heads = ['force']
1441 if force: remote_heads = ['force']
1440 return remote.unbundle(cg, remote_heads, 'push')
1442 return remote.unbundle(cg, remote_heads, 'push')
1441 return ret[1]
1443 return ret[1]
1442
1444
1443 def changegroupinfo(self, nodes):
1445 def changegroupinfo(self, nodes):
1444 self.ui.note(_("%d changesets found\n") % len(nodes))
1446 self.ui.note(_("%d changesets found\n") % len(nodes))
1445 if self.ui.debugflag:
1447 if self.ui.debugflag:
1446 self.ui.debug(_("List of changesets:\n"))
1448 self.ui.debug(_("List of changesets:\n"))
1447 for node in nodes:
1449 for node in nodes:
1448 self.ui.debug("%s\n" % hex(node))
1450 self.ui.debug("%s\n" % hex(node))
1449
1451
1450 def changegroupsubset(self, bases, heads, source):
1452 def changegroupsubset(self, bases, heads, source):
1451 """This function generates a changegroup consisting of all the nodes
1453 """This function generates a changegroup consisting of all the nodes
1452 that are descendents of any of the bases, and ancestors of any of
1454 that are descendents of any of the bases, and ancestors of any of
1453 the heads.
1455 the heads.
1454
1456
1455 It is fairly complex as determining which filenodes and which
1457 It is fairly complex as determining which filenodes and which
1456 manifest nodes need to be included for the changeset to be complete
1458 manifest nodes need to be included for the changeset to be complete
1457 is non-trivial.
1459 is non-trivial.
1458
1460
1459 Another wrinkle is doing the reverse, figuring out which changeset in
1461 Another wrinkle is doing the reverse, figuring out which changeset in
1460 the changegroup a particular filenode or manifestnode belongs to."""
1462 the changegroup a particular filenode or manifestnode belongs to."""
1461
1463
1462 self.hook('preoutgoing', throw=True, source=source)
1464 self.hook('preoutgoing', throw=True, source=source)
1463
1465
1464 # Set up some initial variables
1466 # Set up some initial variables
1465 # Make it easy to refer to self.changelog
1467 # Make it easy to refer to self.changelog
1466 cl = self.changelog
1468 cl = self.changelog
1467 # msng is short for missing - compute the list of changesets in this
1469 # msng is short for missing - compute the list of changesets in this
1468 # changegroup.
1470 # changegroup.
1469 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1471 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1470 self.changegroupinfo(msng_cl_lst)
1472 self.changegroupinfo(msng_cl_lst)
1471 # Some bases may turn out to be superfluous, and some heads may be
1473 # Some bases may turn out to be superfluous, and some heads may be
1472 # too. nodesbetween will return the minimal set of bases and heads
1474 # too. nodesbetween will return the minimal set of bases and heads
1473 # necessary to re-create the changegroup.
1475 # necessary to re-create the changegroup.
1474
1476
1475 # Known heads are the list of heads that it is assumed the recipient
1477 # Known heads are the list of heads that it is assumed the recipient
1476 # of this changegroup will know about.
1478 # of this changegroup will know about.
1477 knownheads = {}
1479 knownheads = {}
1478 # We assume that all parents of bases are known heads.
1480 # We assume that all parents of bases are known heads.
1479 for n in bases:
1481 for n in bases:
1480 for p in cl.parents(n):
1482 for p in cl.parents(n):
1481 if p != nullid:
1483 if p != nullid:
1482 knownheads[p] = 1
1484 knownheads[p] = 1
1483 knownheads = knownheads.keys()
1485 knownheads = knownheads.keys()
1484 if knownheads:
1486 if knownheads:
1485 # Now that we know what heads are known, we can compute which
1487 # Now that we know what heads are known, we can compute which
1486 # changesets are known. The recipient must know about all
1488 # changesets are known. The recipient must know about all
1487 # changesets required to reach the known heads from the null
1489 # changesets required to reach the known heads from the null
1488 # changeset.
1490 # changeset.
1489 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1491 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1490 junk = None
1492 junk = None
1491 # Transform the list into an ersatz set.
1493 # Transform the list into an ersatz set.
1492 has_cl_set = dict.fromkeys(has_cl_set)
1494 has_cl_set = dict.fromkeys(has_cl_set)
1493 else:
1495 else:
1494 # If there were no known heads, the recipient cannot be assumed to
1496 # If there were no known heads, the recipient cannot be assumed to
1495 # know about any changesets.
1497 # know about any changesets.
1496 has_cl_set = {}
1498 has_cl_set = {}
1497
1499
1498 # Make it easy to refer to self.manifest
1500 # Make it easy to refer to self.manifest
1499 mnfst = self.manifest
1501 mnfst = self.manifest
1500 # We don't know which manifests are missing yet
1502 # We don't know which manifests are missing yet
1501 msng_mnfst_set = {}
1503 msng_mnfst_set = {}
1502 # Nor do we know which filenodes are missing.
1504 # Nor do we know which filenodes are missing.
1503 msng_filenode_set = {}
1505 msng_filenode_set = {}
1504
1506
1505 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1507 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1506 junk = None
1508 junk = None
1507
1509
1508 # A changeset always belongs to itself, so the changenode lookup
1510 # A changeset always belongs to itself, so the changenode lookup
1509 # function for a changenode is identity.
1511 # function for a changenode is identity.
1510 def identity(x):
1512 def identity(x):
1511 return x
1513 return x
1512
1514
1513 # A function generating function. Sets up an environment for the
1515 # A function generating function. Sets up an environment for the
1514 # inner function.
1516 # inner function.
1515 def cmp_by_rev_func(revlog):
1517 def cmp_by_rev_func(revlog):
1516 # Compare two nodes by their revision number in the environment's
1518 # Compare two nodes by their revision number in the environment's
1517 # revision history. Since the revision number both represents the
1519 # revision history. Since the revision number both represents the
1518 # most efficient order to read the nodes in, and represents a
1520 # most efficient order to read the nodes in, and represents a
1519 # topological sorting of the nodes, this function is often useful.
1521 # topological sorting of the nodes, this function is often useful.
1520 def cmp_by_rev(a, b):
1522 def cmp_by_rev(a, b):
1521 return cmp(revlog.rev(a), revlog.rev(b))
1523 return cmp(revlog.rev(a), revlog.rev(b))
1522 return cmp_by_rev
1524 return cmp_by_rev
1523
1525
1524 # If we determine that a particular file or manifest node must be a
1526 # If we determine that a particular file or manifest node must be a
1525 # node that the recipient of the changegroup will already have, we can
1527 # node that the recipient of the changegroup will already have, we can
1526 # also assume the recipient will have all the parents. This function
1528 # also assume the recipient will have all the parents. This function
1527 # prunes them from the set of missing nodes.
1529 # prunes them from the set of missing nodes.
1528 def prune_parents(revlog, hasset, msngset):
1530 def prune_parents(revlog, hasset, msngset):
1529 haslst = hasset.keys()
1531 haslst = hasset.keys()
1530 haslst.sort(cmp_by_rev_func(revlog))
1532 haslst.sort(cmp_by_rev_func(revlog))
1531 for node in haslst:
1533 for node in haslst:
1532 parentlst = [p for p in revlog.parents(node) if p != nullid]
1534 parentlst = [p for p in revlog.parents(node) if p != nullid]
1533 while parentlst:
1535 while parentlst:
1534 n = parentlst.pop()
1536 n = parentlst.pop()
1535 if n not in hasset:
1537 if n not in hasset:
1536 hasset[n] = 1
1538 hasset[n] = 1
1537 p = [p for p in revlog.parents(n) if p != nullid]
1539 p = [p for p in revlog.parents(n) if p != nullid]
1538 parentlst.extend(p)
1540 parentlst.extend(p)
1539 for n in hasset:
1541 for n in hasset:
1540 msngset.pop(n, None)
1542 msngset.pop(n, None)
1541
1543
1542 # This is a function generating function used to set up an environment
1544 # This is a function generating function used to set up an environment
1543 # for the inner function to execute in.
1545 # for the inner function to execute in.
1544 def manifest_and_file_collector(changedfileset):
1546 def manifest_and_file_collector(changedfileset):
1545 # This is an information gathering function that gathers
1547 # This is an information gathering function that gathers
1546 # information from each changeset node that goes out as part of
1548 # information from each changeset node that goes out as part of
1547 # the changegroup. The information gathered is a list of which
1549 # the changegroup. The information gathered is a list of which
1548 # manifest nodes are potentially required (the recipient may
1550 # manifest nodes are potentially required (the recipient may
1549 # already have them) and total list of all files which were
1551 # already have them) and total list of all files which were
1550 # changed in any changeset in the changegroup.
1552 # changed in any changeset in the changegroup.
1551 #
1553 #
1552 # We also remember the first changenode we saw any manifest
1554 # We also remember the first changenode we saw any manifest
1553 # referenced by so we can later determine which changenode 'owns'
1555 # referenced by so we can later determine which changenode 'owns'
1554 # the manifest.
1556 # the manifest.
1555 def collect_manifests_and_files(clnode):
1557 def collect_manifests_and_files(clnode):
1556 c = cl.read(clnode)
1558 c = cl.read(clnode)
1557 for f in c[3]:
1559 for f in c[3]:
1558 # This is to make sure we only have one instance of each
1560 # This is to make sure we only have one instance of each
1559 # filename string for each filename.
1561 # filename string for each filename.
1560 changedfileset.setdefault(f, f)
1562 changedfileset.setdefault(f, f)
1561 msng_mnfst_set.setdefault(c[0], clnode)
1563 msng_mnfst_set.setdefault(c[0], clnode)
1562 return collect_manifests_and_files
1564 return collect_manifests_and_files
1563
1565
1564 # Figure out which manifest nodes (of the ones we think might be part
1566 # Figure out which manifest nodes (of the ones we think might be part
1565 # of the changegroup) the recipient must know about and remove them
1567 # of the changegroup) the recipient must know about and remove them
1566 # from the changegroup.
1568 # from the changegroup.
1567 def prune_manifests():
1569 def prune_manifests():
1568 has_mnfst_set = {}
1570 has_mnfst_set = {}
1569 for n in msng_mnfst_set:
1571 for n in msng_mnfst_set:
1570 # If a 'missing' manifest thinks it belongs to a changenode
1572 # If a 'missing' manifest thinks it belongs to a changenode
1571 # the recipient is assumed to have, obviously the recipient
1573 # the recipient is assumed to have, obviously the recipient
1572 # must have that manifest.
1574 # must have that manifest.
1573 linknode = cl.node(mnfst.linkrev(n))
1575 linknode = cl.node(mnfst.linkrev(n))
1574 if linknode in has_cl_set:
1576 if linknode in has_cl_set:
1575 has_mnfst_set[n] = 1
1577 has_mnfst_set[n] = 1
1576 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1578 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1577
1579
1578 # Use the information collected in collect_manifests_and_files to say
1580 # Use the information collected in collect_manifests_and_files to say
1579 # which changenode any manifestnode belongs to.
1581 # which changenode any manifestnode belongs to.
1580 def lookup_manifest_link(mnfstnode):
1582 def lookup_manifest_link(mnfstnode):
1581 return msng_mnfst_set[mnfstnode]
1583 return msng_mnfst_set[mnfstnode]
1582
1584
1583 # A function generating function that sets up the initial environment
1585 # A function generating function that sets up the initial environment
1584 # the inner function.
1586 # the inner function.
1585 def filenode_collector(changedfiles):
1587 def filenode_collector(changedfiles):
1586 next_rev = [0]
1588 next_rev = [0]
1587 # This gathers information from each manifestnode included in the
1589 # This gathers information from each manifestnode included in the
1588 # changegroup about which filenodes the manifest node references
1590 # changegroup about which filenodes the manifest node references
1589 # so we can include those in the changegroup too.
1591 # so we can include those in the changegroup too.
1590 #
1592 #
1591 # It also remembers which changenode each filenode belongs to. It
1593 # It also remembers which changenode each filenode belongs to. It
1592 # does this by assuming the a filenode belongs to the changenode
1594 # does this by assuming the a filenode belongs to the changenode
1593 # the first manifest that references it belongs to.
1595 # the first manifest that references it belongs to.
1594 def collect_msng_filenodes(mnfstnode):
1596 def collect_msng_filenodes(mnfstnode):
1595 r = mnfst.rev(mnfstnode)
1597 r = mnfst.rev(mnfstnode)
1596 if r == next_rev[0]:
1598 if r == next_rev[0]:
1597 # If the last rev we looked at was the one just previous,
1599 # If the last rev we looked at was the one just previous,
1598 # we only need to see a diff.
1600 # we only need to see a diff.
1599 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1601 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1600 # For each line in the delta
1602 # For each line in the delta
1601 for dline in delta.splitlines():
1603 for dline in delta.splitlines():
1602 # get the filename and filenode for that line
1604 # get the filename and filenode for that line
1603 f, fnode = dline.split('\0')
1605 f, fnode = dline.split('\0')
1604 fnode = bin(fnode[:40])
1606 fnode = bin(fnode[:40])
1605 f = changedfiles.get(f, None)
1607 f = changedfiles.get(f, None)
1606 # And if the file is in the list of files we care
1608 # And if the file is in the list of files we care
1607 # about.
1609 # about.
1608 if f is not None:
1610 if f is not None:
1609 # Get the changenode this manifest belongs to
1611 # Get the changenode this manifest belongs to
1610 clnode = msng_mnfst_set[mnfstnode]
1612 clnode = msng_mnfst_set[mnfstnode]
1611 # Create the set of filenodes for the file if
1613 # Create the set of filenodes for the file if
1612 # there isn't one already.
1614 # there isn't one already.
1613 ndset = msng_filenode_set.setdefault(f, {})
1615 ndset = msng_filenode_set.setdefault(f, {})
1614 # And set the filenode's changelog node to the
1616 # And set the filenode's changelog node to the
1615 # manifest's if it hasn't been set already.
1617 # manifest's if it hasn't been set already.
1616 ndset.setdefault(fnode, clnode)
1618 ndset.setdefault(fnode, clnode)
1617 else:
1619 else:
1618 # Otherwise we need a full manifest.
1620 # Otherwise we need a full manifest.
1619 m = mnfst.read(mnfstnode)
1621 m = mnfst.read(mnfstnode)
1620 # For every file in we care about.
1622 # For every file in we care about.
1621 for f in changedfiles:
1623 for f in changedfiles:
1622 fnode = m.get(f, None)
1624 fnode = m.get(f, None)
1623 # If it's in the manifest
1625 # If it's in the manifest
1624 if fnode is not None:
1626 if fnode is not None:
1625 # See comments above.
1627 # See comments above.
1626 clnode = msng_mnfst_set[mnfstnode]
1628 clnode = msng_mnfst_set[mnfstnode]
1627 ndset = msng_filenode_set.setdefault(f, {})
1629 ndset = msng_filenode_set.setdefault(f, {})
1628 ndset.setdefault(fnode, clnode)
1630 ndset.setdefault(fnode, clnode)
1629 # Remember the revision we hope to see next.
1631 # Remember the revision we hope to see next.
1630 next_rev[0] = r + 1
1632 next_rev[0] = r + 1
1631 return collect_msng_filenodes
1633 return collect_msng_filenodes
1632
1634
1633 # We have a list of filenodes we think we need for a file, lets remove
1635 # We have a list of filenodes we think we need for a file, lets remove
1634 # all those we now the recipient must have.
1636 # all those we now the recipient must have.
1635 def prune_filenodes(f, filerevlog):
1637 def prune_filenodes(f, filerevlog):
1636 msngset = msng_filenode_set[f]
1638 msngset = msng_filenode_set[f]
1637 hasset = {}
1639 hasset = {}
1638 # If a 'missing' filenode thinks it belongs to a changenode we
1640 # If a 'missing' filenode thinks it belongs to a changenode we
1639 # assume the recipient must have, then the recipient must have
1641 # assume the recipient must have, then the recipient must have
1640 # that filenode.
1642 # that filenode.
1641 for n in msngset:
1643 for n in msngset:
1642 clnode = cl.node(filerevlog.linkrev(n))
1644 clnode = cl.node(filerevlog.linkrev(n))
1643 if clnode in has_cl_set:
1645 if clnode in has_cl_set:
1644 hasset[n] = 1
1646 hasset[n] = 1
1645 prune_parents(filerevlog, hasset, msngset)
1647 prune_parents(filerevlog, hasset, msngset)
1646
1648
1647 # A function generator function that sets up the a context for the
1649 # A function generator function that sets up the a context for the
1648 # inner function.
1650 # inner function.
1649 def lookup_filenode_link_func(fname):
1651 def lookup_filenode_link_func(fname):
1650 msngset = msng_filenode_set[fname]
1652 msngset = msng_filenode_set[fname]
1651 # Lookup the changenode the filenode belongs to.
1653 # Lookup the changenode the filenode belongs to.
1652 def lookup_filenode_link(fnode):
1654 def lookup_filenode_link(fnode):
1653 return msngset[fnode]
1655 return msngset[fnode]
1654 return lookup_filenode_link
1656 return lookup_filenode_link
1655
1657
1656 # Now that we have all theses utility functions to help out and
1658 # Now that we have all theses utility functions to help out and
1657 # logically divide up the task, generate the group.
1659 # logically divide up the task, generate the group.
1658 def gengroup():
1660 def gengroup():
1659 # The set of changed files starts empty.
1661 # The set of changed files starts empty.
1660 changedfiles = {}
1662 changedfiles = {}
1661 # Create a changenode group generator that will call our functions
1663 # Create a changenode group generator that will call our functions
1662 # back to lookup the owning changenode and collect information.
1664 # back to lookup the owning changenode and collect information.
1663 group = cl.group(msng_cl_lst, identity,
1665 group = cl.group(msng_cl_lst, identity,
1664 manifest_and_file_collector(changedfiles))
1666 manifest_and_file_collector(changedfiles))
1665 for chnk in group:
1667 for chnk in group:
1666 yield chnk
1668 yield chnk
1667
1669
1668 # The list of manifests has been collected by the generator
1670 # The list of manifests has been collected by the generator
1669 # calling our functions back.
1671 # calling our functions back.
1670 prune_manifests()
1672 prune_manifests()
1671 msng_mnfst_lst = msng_mnfst_set.keys()
1673 msng_mnfst_lst = msng_mnfst_set.keys()
1672 # Sort the manifestnodes by revision number.
1674 # Sort the manifestnodes by revision number.
1673 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1675 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1674 # Create a generator for the manifestnodes that calls our lookup
1676 # Create a generator for the manifestnodes that calls our lookup
1675 # and data collection functions back.
1677 # and data collection functions back.
1676 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1678 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1677 filenode_collector(changedfiles))
1679 filenode_collector(changedfiles))
1678 for chnk in group:
1680 for chnk in group:
1679 yield chnk
1681 yield chnk
1680
1682
1681 # These are no longer needed, dereference and toss the memory for
1683 # These are no longer needed, dereference and toss the memory for
1682 # them.
1684 # them.
1683 msng_mnfst_lst = None
1685 msng_mnfst_lst = None
1684 msng_mnfst_set.clear()
1686 msng_mnfst_set.clear()
1685
1687
1686 changedfiles = changedfiles.keys()
1688 changedfiles = changedfiles.keys()
1687 changedfiles.sort()
1689 changedfiles.sort()
1688 # Go through all our files in order sorted by name.
1690 # Go through all our files in order sorted by name.
1689 for fname in changedfiles:
1691 for fname in changedfiles:
1690 filerevlog = self.file(fname)
1692 filerevlog = self.file(fname)
1691 # Toss out the filenodes that the recipient isn't really
1693 # Toss out the filenodes that the recipient isn't really
1692 # missing.
1694 # missing.
1693 if msng_filenode_set.has_key(fname):
1695 if msng_filenode_set.has_key(fname):
1694 prune_filenodes(fname, filerevlog)
1696 prune_filenodes(fname, filerevlog)
1695 msng_filenode_lst = msng_filenode_set[fname].keys()
1697 msng_filenode_lst = msng_filenode_set[fname].keys()
1696 else:
1698 else:
1697 msng_filenode_lst = []
1699 msng_filenode_lst = []
1698 # If any filenodes are left, generate the group for them,
1700 # If any filenodes are left, generate the group for them,
1699 # otherwise don't bother.
1701 # otherwise don't bother.
1700 if len(msng_filenode_lst) > 0:
1702 if len(msng_filenode_lst) > 0:
1701 yield changegroup.genchunk(fname)
1703 yield changegroup.genchunk(fname)
1702 # Sort the filenodes by their revision #
1704 # Sort the filenodes by their revision #
1703 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1705 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1704 # Create a group generator and only pass in a changenode
1706 # Create a group generator and only pass in a changenode
1705 # lookup function as we need to collect no information
1707 # lookup function as we need to collect no information
1706 # from filenodes.
1708 # from filenodes.
1707 group = filerevlog.group(msng_filenode_lst,
1709 group = filerevlog.group(msng_filenode_lst,
1708 lookup_filenode_link_func(fname))
1710 lookup_filenode_link_func(fname))
1709 for chnk in group:
1711 for chnk in group:
1710 yield chnk
1712 yield chnk
1711 if msng_filenode_set.has_key(fname):
1713 if msng_filenode_set.has_key(fname):
1712 # Don't need this anymore, toss it to free memory.
1714 # Don't need this anymore, toss it to free memory.
1713 del msng_filenode_set[fname]
1715 del msng_filenode_set[fname]
1714 # Signal that no more groups are left.
1716 # Signal that no more groups are left.
1715 yield changegroup.closechunk()
1717 yield changegroup.closechunk()
1716
1718
1717 if msng_cl_lst:
1719 if msng_cl_lst:
1718 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1720 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1719
1721
1720 return util.chunkbuffer(gengroup())
1722 return util.chunkbuffer(gengroup())
1721
1723
1722 def changegroup(self, basenodes, source):
1724 def changegroup(self, basenodes, source):
1723 """Generate a changegroup of all nodes that we have that a recipient
1725 """Generate a changegroup of all nodes that we have that a recipient
1724 doesn't.
1726 doesn't.
1725
1727
1726 This is much easier than the previous function as we can assume that
1728 This is much easier than the previous function as we can assume that
1727 the recipient has any changenode we aren't sending them."""
1729 the recipient has any changenode we aren't sending them."""
1728
1730
1729 self.hook('preoutgoing', throw=True, source=source)
1731 self.hook('preoutgoing', throw=True, source=source)
1730
1732
1731 cl = self.changelog
1733 cl = self.changelog
1732 nodes = cl.nodesbetween(basenodes, None)[0]
1734 nodes = cl.nodesbetween(basenodes, None)[0]
1733 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1735 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1734 self.changegroupinfo(nodes)
1736 self.changegroupinfo(nodes)
1735
1737
1736 def identity(x):
1738 def identity(x):
1737 return x
1739 return x
1738
1740
1739 def gennodelst(revlog):
1741 def gennodelst(revlog):
1740 for r in xrange(0, revlog.count()):
1742 for r in xrange(0, revlog.count()):
1741 n = revlog.node(r)
1743 n = revlog.node(r)
1742 if revlog.linkrev(n) in revset:
1744 if revlog.linkrev(n) in revset:
1743 yield n
1745 yield n
1744
1746
1745 def changed_file_collector(changedfileset):
1747 def changed_file_collector(changedfileset):
1746 def collect_changed_files(clnode):
1748 def collect_changed_files(clnode):
1747 c = cl.read(clnode)
1749 c = cl.read(clnode)
1748 for fname in c[3]:
1750 for fname in c[3]:
1749 changedfileset[fname] = 1
1751 changedfileset[fname] = 1
1750 return collect_changed_files
1752 return collect_changed_files
1751
1753
1752 def lookuprevlink_func(revlog):
1754 def lookuprevlink_func(revlog):
1753 def lookuprevlink(n):
1755 def lookuprevlink(n):
1754 return cl.node(revlog.linkrev(n))
1756 return cl.node(revlog.linkrev(n))
1755 return lookuprevlink
1757 return lookuprevlink
1756
1758
1757 def gengroup():
1759 def gengroup():
1758 # construct a list of all changed files
1760 # construct a list of all changed files
1759 changedfiles = {}
1761 changedfiles = {}
1760
1762
1761 for chnk in cl.group(nodes, identity,
1763 for chnk in cl.group(nodes, identity,
1762 changed_file_collector(changedfiles)):
1764 changed_file_collector(changedfiles)):
1763 yield chnk
1765 yield chnk
1764 changedfiles = changedfiles.keys()
1766 changedfiles = changedfiles.keys()
1765 changedfiles.sort()
1767 changedfiles.sort()
1766
1768
1767 mnfst = self.manifest
1769 mnfst = self.manifest
1768 nodeiter = gennodelst(mnfst)
1770 nodeiter = gennodelst(mnfst)
1769 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1771 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1770 yield chnk
1772 yield chnk
1771
1773
1772 for fname in changedfiles:
1774 for fname in changedfiles:
1773 filerevlog = self.file(fname)
1775 filerevlog = self.file(fname)
1774 nodeiter = gennodelst(filerevlog)
1776 nodeiter = gennodelst(filerevlog)
1775 nodeiter = list(nodeiter)
1777 nodeiter = list(nodeiter)
1776 if nodeiter:
1778 if nodeiter:
1777 yield changegroup.genchunk(fname)
1779 yield changegroup.genchunk(fname)
1778 lookup = lookuprevlink_func(filerevlog)
1780 lookup = lookuprevlink_func(filerevlog)
1779 for chnk in filerevlog.group(nodeiter, lookup):
1781 for chnk in filerevlog.group(nodeiter, lookup):
1780 yield chnk
1782 yield chnk
1781
1783
1782 yield changegroup.closechunk()
1784 yield changegroup.closechunk()
1783
1785
1784 if nodes:
1786 if nodes:
1785 self.hook('outgoing', node=hex(nodes[0]), source=source)
1787 self.hook('outgoing', node=hex(nodes[0]), source=source)
1786
1788
1787 return util.chunkbuffer(gengroup())
1789 return util.chunkbuffer(gengroup())
1788
1790
1789 def addchangegroup(self, source, srctype, url):
1791 def addchangegroup(self, source, srctype, url):
1790 """add changegroup to repo.
1792 """add changegroup to repo.
1791
1793
1792 return values:
1794 return values:
1793 - nothing changed or no source: 0
1795 - nothing changed or no source: 0
1794 - more heads than before: 1+added heads (2..n)
1796 - more heads than before: 1+added heads (2..n)
1795 - less heads than before: -1-removed heads (-2..-n)
1797 - less heads than before: -1-removed heads (-2..-n)
1796 - number of heads stays the same: 1
1798 - number of heads stays the same: 1
1797 """
1799 """
1798 def csmap(x):
1800 def csmap(x):
1799 self.ui.debug(_("add changeset %s\n") % short(x))
1801 self.ui.debug(_("add changeset %s\n") % short(x))
1800 return cl.count()
1802 return cl.count()
1801
1803
1802 def revmap(x):
1804 def revmap(x):
1803 return cl.rev(x)
1805 return cl.rev(x)
1804
1806
1805 if not source:
1807 if not source:
1806 return 0
1808 return 0
1807
1809
1808 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1810 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1809
1811
1810 changesets = files = revisions = 0
1812 changesets = files = revisions = 0
1811
1813
1812 tr = self.transaction()
1814 tr = self.transaction()
1813
1815
1814 # write changelog data to temp files so concurrent readers will not see
1816 # write changelog data to temp files so concurrent readers will not see
1815 # inconsistent view
1817 # inconsistent view
1816 cl = self.changelog
1818 cl = self.changelog
1817 cl.delayupdate()
1819 cl.delayupdate()
1818 oldheads = len(cl.heads())
1820 oldheads = len(cl.heads())
1819
1821
1820 # pull off the changeset group
1822 # pull off the changeset group
1821 self.ui.status(_("adding changesets\n"))
1823 self.ui.status(_("adding changesets\n"))
1822 cor = cl.count() - 1
1824 cor = cl.count() - 1
1823 chunkiter = changegroup.chunkiter(source)
1825 chunkiter = changegroup.chunkiter(source)
1824 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1826 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1825 raise util.Abort(_("received changelog group is empty"))
1827 raise util.Abort(_("received changelog group is empty"))
1826 cnr = cl.count() - 1
1828 cnr = cl.count() - 1
1827 changesets = cnr - cor
1829 changesets = cnr - cor
1828
1830
1829 # pull off the manifest group
1831 # pull off the manifest group
1830 self.ui.status(_("adding manifests\n"))
1832 self.ui.status(_("adding manifests\n"))
1831 chunkiter = changegroup.chunkiter(source)
1833 chunkiter = changegroup.chunkiter(source)
1832 # no need to check for empty manifest group here:
1834 # no need to check for empty manifest group here:
1833 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1835 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1834 # no new manifest will be created and the manifest group will
1836 # no new manifest will be created and the manifest group will
1835 # be empty during the pull
1837 # be empty during the pull
1836 self.manifest.addgroup(chunkiter, revmap, tr)
1838 self.manifest.addgroup(chunkiter, revmap, tr)
1837
1839
1838 # process the files
1840 # process the files
1839 self.ui.status(_("adding file changes\n"))
1841 self.ui.status(_("adding file changes\n"))
1840 while 1:
1842 while 1:
1841 f = changegroup.getchunk(source)
1843 f = changegroup.getchunk(source)
1842 if not f:
1844 if not f:
1843 break
1845 break
1844 self.ui.debug(_("adding %s revisions\n") % f)
1846 self.ui.debug(_("adding %s revisions\n") % f)
1845 fl = self.file(f)
1847 fl = self.file(f)
1846 o = fl.count()
1848 o = fl.count()
1847 chunkiter = changegroup.chunkiter(source)
1849 chunkiter = changegroup.chunkiter(source)
1848 if fl.addgroup(chunkiter, revmap, tr) is None:
1850 if fl.addgroup(chunkiter, revmap, tr) is None:
1849 raise util.Abort(_("received file revlog group is empty"))
1851 raise util.Abort(_("received file revlog group is empty"))
1850 revisions += fl.count() - o
1852 revisions += fl.count() - o
1851 files += 1
1853 files += 1
1852
1854
1853 # make changelog see real files again
1855 # make changelog see real files again
1854 cl.finalize(tr)
1856 cl.finalize(tr)
1855
1857
1856 newheads = len(self.changelog.heads())
1858 newheads = len(self.changelog.heads())
1857 heads = ""
1859 heads = ""
1858 if oldheads and newheads != oldheads:
1860 if oldheads and newheads != oldheads:
1859 heads = _(" (%+d heads)") % (newheads - oldheads)
1861 heads = _(" (%+d heads)") % (newheads - oldheads)
1860
1862
1861 self.ui.status(_("added %d changesets"
1863 self.ui.status(_("added %d changesets"
1862 " with %d changes to %d files%s\n")
1864 " with %d changes to %d files%s\n")
1863 % (changesets, revisions, files, heads))
1865 % (changesets, revisions, files, heads))
1864
1866
1865 if changesets > 0:
1867 if changesets > 0:
1866 self.hook('pretxnchangegroup', throw=True,
1868 self.hook('pretxnchangegroup', throw=True,
1867 node=hex(self.changelog.node(cor+1)), source=srctype,
1869 node=hex(self.changelog.node(cor+1)), source=srctype,
1868 url=url)
1870 url=url)
1869
1871
1870 tr.close()
1872 tr.close()
1871
1873
1872 if changesets > 0:
1874 if changesets > 0:
1873 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1875 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1874 source=srctype, url=url)
1876 source=srctype, url=url)
1875
1877
1876 for i in xrange(cor + 1, cnr + 1):
1878 for i in xrange(cor + 1, cnr + 1):
1877 self.hook("incoming", node=hex(self.changelog.node(i)),
1879 self.hook("incoming", node=hex(self.changelog.node(i)),
1878 source=srctype, url=url)
1880 source=srctype, url=url)
1879
1881
1880 # never return 0 here:
1882 # never return 0 here:
1881 if newheads < oldheads:
1883 if newheads < oldheads:
1882 return newheads - oldheads - 1
1884 return newheads - oldheads - 1
1883 else:
1885 else:
1884 return newheads - oldheads + 1
1886 return newheads - oldheads + 1
1885
1887
1886
1888
1887 def stream_in(self, remote):
1889 def stream_in(self, remote):
1888 fp = remote.stream_out()
1890 fp = remote.stream_out()
1889 l = fp.readline()
1891 l = fp.readline()
1890 try:
1892 try:
1891 resp = int(l)
1893 resp = int(l)
1892 except ValueError:
1894 except ValueError:
1893 raise util.UnexpectedOutput(
1895 raise util.UnexpectedOutput(
1894 _('Unexpected response from remote server:'), l)
1896 _('Unexpected response from remote server:'), l)
1895 if resp == 1:
1897 if resp == 1:
1896 raise util.Abort(_('operation forbidden by server'))
1898 raise util.Abort(_('operation forbidden by server'))
1897 elif resp == 2:
1899 elif resp == 2:
1898 raise util.Abort(_('locking the remote repository failed'))
1900 raise util.Abort(_('locking the remote repository failed'))
1899 elif resp != 0:
1901 elif resp != 0:
1900 raise util.Abort(_('the server sent an unknown error code'))
1902 raise util.Abort(_('the server sent an unknown error code'))
1901 self.ui.status(_('streaming all changes\n'))
1903 self.ui.status(_('streaming all changes\n'))
1902 l = fp.readline()
1904 l = fp.readline()
1903 try:
1905 try:
1904 total_files, total_bytes = map(int, l.split(' ', 1))
1906 total_files, total_bytes = map(int, l.split(' ', 1))
1905 except ValueError, TypeError:
1907 except ValueError, TypeError:
1906 raise util.UnexpectedOutput(
1908 raise util.UnexpectedOutput(
1907 _('Unexpected response from remote server:'), l)
1909 _('Unexpected response from remote server:'), l)
1908 self.ui.status(_('%d files to transfer, %s of data\n') %
1910 self.ui.status(_('%d files to transfer, %s of data\n') %
1909 (total_files, util.bytecount(total_bytes)))
1911 (total_files, util.bytecount(total_bytes)))
1910 start = time.time()
1912 start = time.time()
1911 for i in xrange(total_files):
1913 for i in xrange(total_files):
1912 # XXX doesn't support '\n' or '\r' in filenames
1914 # XXX doesn't support '\n' or '\r' in filenames
1913 l = fp.readline()
1915 l = fp.readline()
1914 try:
1916 try:
1915 name, size = l.split('\0', 1)
1917 name, size = l.split('\0', 1)
1916 size = int(size)
1918 size = int(size)
1917 except ValueError, TypeError:
1919 except ValueError, TypeError:
1918 raise util.UnexpectedOutput(
1920 raise util.UnexpectedOutput(
1919 _('Unexpected response from remote server:'), l)
1921 _('Unexpected response from remote server:'), l)
1920 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1922 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1921 ofp = self.sopener(name, 'w')
1923 ofp = self.sopener(name, 'w')
1922 for chunk in util.filechunkiter(fp, limit=size):
1924 for chunk in util.filechunkiter(fp, limit=size):
1923 ofp.write(chunk)
1925 ofp.write(chunk)
1924 ofp.close()
1926 ofp.close()
1925 elapsed = time.time() - start
1927 elapsed = time.time() - start
1926 if elapsed <= 0:
1928 if elapsed <= 0:
1927 elapsed = 0.001
1929 elapsed = 0.001
1928 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1930 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1929 (util.bytecount(total_bytes), elapsed,
1931 (util.bytecount(total_bytes), elapsed,
1930 util.bytecount(total_bytes / elapsed)))
1932 util.bytecount(total_bytes / elapsed)))
1931 self.reload()
1933 self.reload()
1932 return len(self.heads()) + 1
1934 return len(self.heads()) + 1
1933
1935
1934 def clone(self, remote, heads=[], stream=False):
1936 def clone(self, remote, heads=[], stream=False):
1935 '''clone remote repository.
1937 '''clone remote repository.
1936
1938
1937 keyword arguments:
1939 keyword arguments:
1938 heads: list of revs to clone (forces use of pull)
1940 heads: list of revs to clone (forces use of pull)
1939 stream: use streaming clone if possible'''
1941 stream: use streaming clone if possible'''
1940
1942
1941 # now, all clients that can request uncompressed clones can
1943 # now, all clients that can request uncompressed clones can
1942 # read repo formats supported by all servers that can serve
1944 # read repo formats supported by all servers that can serve
1943 # them.
1945 # them.
1944
1946
1945 # if revlog format changes, client will have to check version
1947 # if revlog format changes, client will have to check version
1946 # and format flags on "stream" capability, and use
1948 # and format flags on "stream" capability, and use
1947 # uncompressed only if compatible.
1949 # uncompressed only if compatible.
1948
1950
1949 if stream and not heads and remote.capable('stream'):
1951 if stream and not heads and remote.capable('stream'):
1950 return self.stream_in(remote)
1952 return self.stream_in(remote)
1951 return self.pull(remote, heads)
1953 return self.pull(remote, heads)
1952
1954
1953 # used to avoid circular references so destructors work
1955 # used to avoid circular references so destructors work
1954 def aftertrans(files):
1956 def aftertrans(files):
1955 renamefiles = [tuple(t) for t in files]
1957 renamefiles = [tuple(t) for t in files]
1956 def a():
1958 def a():
1957 for src, dest in renamefiles:
1959 for src, dest in renamefiles:
1958 util.rename(src, dest)
1960 util.rename(src, dest)
1959 return a
1961 return a
1960
1962
1961 def instance(ui, path, create):
1963 def instance(ui, path, create):
1962 return localrepository(ui, util.drop_scheme('file', path), create)
1964 return localrepository(ui, util.drop_scheme('file', path), create)
1963
1965
1964 def islocal(path):
1966 def islocal(path):
1965 return True
1967 return True
General Comments 0
You need to be logged in to leave comments. Login now