##// END OF EJS Templates
rename util.set_flags to setflags
Adrian Buehlmann -
r14232:df239966 default
parent child Browse files
Show More
@@ -1,1175 +1,1175 b''
1 # Subversion 1.4/1.5 Python API backend
1 # Subversion 1.4/1.5 Python API backend
2 #
2 #
3 # Copyright(C) 2007 Daniel Holth et al
3 # Copyright(C) 2007 Daniel Holth et al
4
4
5 import os
5 import os
6 import re
6 import re
7 import sys
7 import sys
8 import cPickle as pickle
8 import cPickle as pickle
9 import tempfile
9 import tempfile
10 import urllib
10 import urllib
11 import urllib2
11 import urllib2
12
12
13 from mercurial import strutil, scmutil, util, encoding
13 from mercurial import strutil, scmutil, util, encoding
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15
15
16 # Subversion stuff. Works best with very recent Python SVN bindings
16 # Subversion stuff. Works best with very recent Python SVN bindings
17 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
17 # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
18 # these bindings.
18 # these bindings.
19
19
20 from cStringIO import StringIO
20 from cStringIO import StringIO
21
21
22 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
22 from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
23 from common import commandline, converter_source, converter_sink, mapfile
23 from common import commandline, converter_source, converter_sink, mapfile
24
24
25 try:
25 try:
26 from svn.core import SubversionException, Pool
26 from svn.core import SubversionException, Pool
27 import svn
27 import svn
28 import svn.client
28 import svn.client
29 import svn.core
29 import svn.core
30 import svn.ra
30 import svn.ra
31 import svn.delta
31 import svn.delta
32 import transport
32 import transport
33 import warnings
33 import warnings
34 warnings.filterwarnings('ignore',
34 warnings.filterwarnings('ignore',
35 module='svn.core',
35 module='svn.core',
36 category=DeprecationWarning)
36 category=DeprecationWarning)
37
37
38 except ImportError:
38 except ImportError:
39 svn = None
39 svn = None
40
40
41 class SvnPathNotFound(Exception):
41 class SvnPathNotFound(Exception):
42 pass
42 pass
43
43
44 def revsplit(rev):
44 def revsplit(rev):
45 """Parse a revision string and return (uuid, path, revnum)."""
45 """Parse a revision string and return (uuid, path, revnum)."""
46 url, revnum = rev.rsplit('@', 1)
46 url, revnum = rev.rsplit('@', 1)
47 parts = url.split('/', 1)
47 parts = url.split('/', 1)
48 mod = ''
48 mod = ''
49 if len(parts) > 1:
49 if len(parts) > 1:
50 mod = '/' + parts[1]
50 mod = '/' + parts[1]
51 return parts[0][4:], mod, int(revnum)
51 return parts[0][4:], mod, int(revnum)
52
52
53 def geturl(path):
53 def geturl(path):
54 try:
54 try:
55 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
55 return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
56 except SubversionException:
56 except SubversionException:
57 pass
57 pass
58 if os.path.isdir(path):
58 if os.path.isdir(path):
59 path = os.path.normpath(os.path.abspath(path))
59 path = os.path.normpath(os.path.abspath(path))
60 if os.name == 'nt':
60 if os.name == 'nt':
61 path = '/' + util.normpath(path)
61 path = '/' + util.normpath(path)
62 # Module URL is later compared with the repository URL returned
62 # Module URL is later compared with the repository URL returned
63 # by svn API, which is UTF-8.
63 # by svn API, which is UTF-8.
64 path = encoding.tolocal(path)
64 path = encoding.tolocal(path)
65 return 'file://%s' % urllib.quote(path)
65 return 'file://%s' % urllib.quote(path)
66 return path
66 return path
67
67
68 def optrev(number):
68 def optrev(number):
69 optrev = svn.core.svn_opt_revision_t()
69 optrev = svn.core.svn_opt_revision_t()
70 optrev.kind = svn.core.svn_opt_revision_number
70 optrev.kind = svn.core.svn_opt_revision_number
71 optrev.value.number = number
71 optrev.value.number = number
72 return optrev
72 return optrev
73
73
74 class changedpath(object):
74 class changedpath(object):
75 def __init__(self, p):
75 def __init__(self, p):
76 self.copyfrom_path = p.copyfrom_path
76 self.copyfrom_path = p.copyfrom_path
77 self.copyfrom_rev = p.copyfrom_rev
77 self.copyfrom_rev = p.copyfrom_rev
78 self.action = p.action
78 self.action = p.action
79
79
80 def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
80 def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
81 strict_node_history=False):
81 strict_node_history=False):
82 protocol = -1
82 protocol = -1
83 def receiver(orig_paths, revnum, author, date, message, pool):
83 def receiver(orig_paths, revnum, author, date, message, pool):
84 if orig_paths is not None:
84 if orig_paths is not None:
85 for k, v in orig_paths.iteritems():
85 for k, v in orig_paths.iteritems():
86 orig_paths[k] = changedpath(v)
86 orig_paths[k] = changedpath(v)
87 pickle.dump((orig_paths, revnum, author, date, message),
87 pickle.dump((orig_paths, revnum, author, date, message),
88 fp, protocol)
88 fp, protocol)
89
89
90 try:
90 try:
91 # Use an ra of our own so that our parent can consume
91 # Use an ra of our own so that our parent can consume
92 # our results without confusing the server.
92 # our results without confusing the server.
93 t = transport.SvnRaTransport(url=url)
93 t = transport.SvnRaTransport(url=url)
94 svn.ra.get_log(t.ra, paths, start, end, limit,
94 svn.ra.get_log(t.ra, paths, start, end, limit,
95 discover_changed_paths,
95 discover_changed_paths,
96 strict_node_history,
96 strict_node_history,
97 receiver)
97 receiver)
98 except SubversionException, (inst, num):
98 except SubversionException, (inst, num):
99 pickle.dump(num, fp, protocol)
99 pickle.dump(num, fp, protocol)
100 except IOError:
100 except IOError:
101 # Caller may interrupt the iteration
101 # Caller may interrupt the iteration
102 pickle.dump(None, fp, protocol)
102 pickle.dump(None, fp, protocol)
103 else:
103 else:
104 pickle.dump(None, fp, protocol)
104 pickle.dump(None, fp, protocol)
105 fp.close()
105 fp.close()
106 # With large history, cleanup process goes crazy and suddenly
106 # With large history, cleanup process goes crazy and suddenly
107 # consumes *huge* amount of memory. The output file being closed,
107 # consumes *huge* amount of memory. The output file being closed,
108 # there is no need for clean termination.
108 # there is no need for clean termination.
109 os._exit(0)
109 os._exit(0)
110
110
111 def debugsvnlog(ui, **opts):
111 def debugsvnlog(ui, **opts):
112 """Fetch SVN log in a subprocess and channel them back to parent to
112 """Fetch SVN log in a subprocess and channel them back to parent to
113 avoid memory collection issues.
113 avoid memory collection issues.
114 """
114 """
115 util.set_binary(sys.stdin)
115 util.set_binary(sys.stdin)
116 util.set_binary(sys.stdout)
116 util.set_binary(sys.stdout)
117 args = decodeargs(sys.stdin.read())
117 args = decodeargs(sys.stdin.read())
118 get_log_child(sys.stdout, *args)
118 get_log_child(sys.stdout, *args)
119
119
120 class logstream(object):
120 class logstream(object):
121 """Interruptible revision log iterator."""
121 """Interruptible revision log iterator."""
122 def __init__(self, stdout):
122 def __init__(self, stdout):
123 self._stdout = stdout
123 self._stdout = stdout
124
124
125 def __iter__(self):
125 def __iter__(self):
126 while True:
126 while True:
127 try:
127 try:
128 entry = pickle.load(self._stdout)
128 entry = pickle.load(self._stdout)
129 except EOFError:
129 except EOFError:
130 raise util.Abort(_('Mercurial failed to run itself, check'
130 raise util.Abort(_('Mercurial failed to run itself, check'
131 ' hg executable is in PATH'))
131 ' hg executable is in PATH'))
132 try:
132 try:
133 orig_paths, revnum, author, date, message = entry
133 orig_paths, revnum, author, date, message = entry
134 except:
134 except:
135 if entry is None:
135 if entry is None:
136 break
136 break
137 raise SubversionException("child raised exception", entry)
137 raise SubversionException("child raised exception", entry)
138 yield entry
138 yield entry
139
139
140 def close(self):
140 def close(self):
141 if self._stdout:
141 if self._stdout:
142 self._stdout.close()
142 self._stdout.close()
143 self._stdout = None
143 self._stdout = None
144
144
145
145
146 # Check to see if the given path is a local Subversion repo. Verify this by
146 # Check to see if the given path is a local Subversion repo. Verify this by
147 # looking for several svn-specific files and directories in the given
147 # looking for several svn-specific files and directories in the given
148 # directory.
148 # directory.
149 def filecheck(ui, path, proto):
149 def filecheck(ui, path, proto):
150 for x in ('locks', 'hooks', 'format', 'db'):
150 for x in ('locks', 'hooks', 'format', 'db'):
151 if not os.path.exists(os.path.join(path, x)):
151 if not os.path.exists(os.path.join(path, x)):
152 return False
152 return False
153 return True
153 return True
154
154
155 # Check to see if a given path is the root of an svn repo over http. We verify
155 # Check to see if a given path is the root of an svn repo over http. We verify
156 # this by requesting a version-controlled URL we know can't exist and looking
156 # this by requesting a version-controlled URL we know can't exist and looking
157 # for the svn-specific "not found" XML.
157 # for the svn-specific "not found" XML.
158 def httpcheck(ui, path, proto):
158 def httpcheck(ui, path, proto):
159 try:
159 try:
160 opener = urllib2.build_opener()
160 opener = urllib2.build_opener()
161 rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
161 rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
162 data = rsp.read()
162 data = rsp.read()
163 except urllib2.HTTPError, inst:
163 except urllib2.HTTPError, inst:
164 if inst.code != 404:
164 if inst.code != 404:
165 # Except for 404 we cannot know for sure this is not an svn repo
165 # Except for 404 we cannot know for sure this is not an svn repo
166 ui.warn(_('svn: cannot probe remote repository, assume it could '
166 ui.warn(_('svn: cannot probe remote repository, assume it could '
167 'be a subversion repository. Use --source-type if you '
167 'be a subversion repository. Use --source-type if you '
168 'know better.\n'))
168 'know better.\n'))
169 return True
169 return True
170 data = inst.fp.read()
170 data = inst.fp.read()
171 except:
171 except:
172 # Could be urllib2.URLError if the URL is invalid or anything else.
172 # Could be urllib2.URLError if the URL is invalid or anything else.
173 return False
173 return False
174 return '<m:human-readable errcode="160013">' in data
174 return '<m:human-readable errcode="160013">' in data
175
175
176 protomap = {'http': httpcheck,
176 protomap = {'http': httpcheck,
177 'https': httpcheck,
177 'https': httpcheck,
178 'file': filecheck,
178 'file': filecheck,
179 }
179 }
180 def issvnurl(ui, url):
180 def issvnurl(ui, url):
181 try:
181 try:
182 proto, path = url.split('://', 1)
182 proto, path = url.split('://', 1)
183 if proto == 'file':
183 if proto == 'file':
184 path = urllib.url2pathname(path)
184 path = urllib.url2pathname(path)
185 except ValueError:
185 except ValueError:
186 proto = 'file'
186 proto = 'file'
187 path = os.path.abspath(url)
187 path = os.path.abspath(url)
188 if proto == 'file':
188 if proto == 'file':
189 path = path.replace(os.sep, '/')
189 path = path.replace(os.sep, '/')
190 check = protomap.get(proto, lambda *args: False)
190 check = protomap.get(proto, lambda *args: False)
191 while '/' in path:
191 while '/' in path:
192 if check(ui, path, proto):
192 if check(ui, path, proto):
193 return True
193 return True
194 path = path.rsplit('/', 1)[0]
194 path = path.rsplit('/', 1)[0]
195 return False
195 return False
196
196
197 # SVN conversion code stolen from bzr-svn and tailor
197 # SVN conversion code stolen from bzr-svn and tailor
198 #
198 #
199 # Subversion looks like a versioned filesystem, branches structures
199 # Subversion looks like a versioned filesystem, branches structures
200 # are defined by conventions and not enforced by the tool. First,
200 # are defined by conventions and not enforced by the tool. First,
201 # we define the potential branches (modules) as "trunk" and "branches"
201 # we define the potential branches (modules) as "trunk" and "branches"
202 # children directories. Revisions are then identified by their
202 # children directories. Revisions are then identified by their
203 # module and revision number (and a repository identifier).
203 # module and revision number (and a repository identifier).
204 #
204 #
205 # The revision graph is really a tree (or a forest). By default, a
205 # The revision graph is really a tree (or a forest). By default, a
206 # revision parent is the previous revision in the same module. If the
206 # revision parent is the previous revision in the same module. If the
207 # module directory is copied/moved from another module then the
207 # module directory is copied/moved from another module then the
208 # revision is the module root and its parent the source revision in
208 # revision is the module root and its parent the source revision in
209 # the parent module. A revision has at most one parent.
209 # the parent module. A revision has at most one parent.
210 #
210 #
211 class svn_source(converter_source):
211 class svn_source(converter_source):
212 def __init__(self, ui, url, rev=None):
212 def __init__(self, ui, url, rev=None):
213 super(svn_source, self).__init__(ui, url, rev=rev)
213 super(svn_source, self).__init__(ui, url, rev=rev)
214
214
215 if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
215 if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
216 (os.path.exists(url) and
216 (os.path.exists(url) and
217 os.path.exists(os.path.join(url, '.svn'))) or
217 os.path.exists(os.path.join(url, '.svn'))) or
218 issvnurl(ui, url)):
218 issvnurl(ui, url)):
219 raise NoRepo(_("%s does not look like a Subversion repository")
219 raise NoRepo(_("%s does not look like a Subversion repository")
220 % url)
220 % url)
221 if svn is None:
221 if svn is None:
222 raise MissingTool(_('Could not load Subversion python bindings'))
222 raise MissingTool(_('Could not load Subversion python bindings'))
223
223
224 try:
224 try:
225 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
225 version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
226 if version < (1, 4):
226 if version < (1, 4):
227 raise MissingTool(_('Subversion python bindings %d.%d found, '
227 raise MissingTool(_('Subversion python bindings %d.%d found, '
228 '1.4 or later required') % version)
228 '1.4 or later required') % version)
229 except AttributeError:
229 except AttributeError:
230 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
230 raise MissingTool(_('Subversion python bindings are too old, 1.4 '
231 'or later required'))
231 'or later required'))
232
232
233 self.lastrevs = {}
233 self.lastrevs = {}
234
234
235 latest = None
235 latest = None
236 try:
236 try:
237 # Support file://path@rev syntax. Useful e.g. to convert
237 # Support file://path@rev syntax. Useful e.g. to convert
238 # deleted branches.
238 # deleted branches.
239 at = url.rfind('@')
239 at = url.rfind('@')
240 if at >= 0:
240 if at >= 0:
241 latest = int(url[at + 1:])
241 latest = int(url[at + 1:])
242 url = url[:at]
242 url = url[:at]
243 except ValueError:
243 except ValueError:
244 pass
244 pass
245 self.url = geturl(url)
245 self.url = geturl(url)
246 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
246 self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
247 try:
247 try:
248 self.transport = transport.SvnRaTransport(url=self.url)
248 self.transport = transport.SvnRaTransport(url=self.url)
249 self.ra = self.transport.ra
249 self.ra = self.transport.ra
250 self.ctx = self.transport.client
250 self.ctx = self.transport.client
251 self.baseurl = svn.ra.get_repos_root(self.ra)
251 self.baseurl = svn.ra.get_repos_root(self.ra)
252 # Module is either empty or a repository path starting with
252 # Module is either empty or a repository path starting with
253 # a slash and not ending with a slash.
253 # a slash and not ending with a slash.
254 self.module = urllib.unquote(self.url[len(self.baseurl):])
254 self.module = urllib.unquote(self.url[len(self.baseurl):])
255 self.prevmodule = None
255 self.prevmodule = None
256 self.rootmodule = self.module
256 self.rootmodule = self.module
257 self.commits = {}
257 self.commits = {}
258 self.paths = {}
258 self.paths = {}
259 self.uuid = svn.ra.get_uuid(self.ra)
259 self.uuid = svn.ra.get_uuid(self.ra)
260 except SubversionException:
260 except SubversionException:
261 ui.traceback()
261 ui.traceback()
262 raise NoRepo(_("%s does not look like a Subversion repository")
262 raise NoRepo(_("%s does not look like a Subversion repository")
263 % self.url)
263 % self.url)
264
264
265 if rev:
265 if rev:
266 try:
266 try:
267 latest = int(rev)
267 latest = int(rev)
268 except ValueError:
268 except ValueError:
269 raise util.Abort(_('svn: revision %s is not an integer') % rev)
269 raise util.Abort(_('svn: revision %s is not an integer') % rev)
270
270
271 self.trunkname = self.ui.config('convert', 'svn.trunk', 'trunk').strip('/')
271 self.trunkname = self.ui.config('convert', 'svn.trunk', 'trunk').strip('/')
272 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
272 self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
273 try:
273 try:
274 self.startrev = int(self.startrev)
274 self.startrev = int(self.startrev)
275 if self.startrev < 0:
275 if self.startrev < 0:
276 self.startrev = 0
276 self.startrev = 0
277 except ValueError:
277 except ValueError:
278 raise util.Abort(_('svn: start revision %s is not an integer')
278 raise util.Abort(_('svn: start revision %s is not an integer')
279 % self.startrev)
279 % self.startrev)
280
280
281 try:
281 try:
282 self.head = self.latest(self.module, latest)
282 self.head = self.latest(self.module, latest)
283 except SvnPathNotFound:
283 except SvnPathNotFound:
284 self.head = None
284 self.head = None
285 if not self.head:
285 if not self.head:
286 raise util.Abort(_('no revision found in module %s')
286 raise util.Abort(_('no revision found in module %s')
287 % self.module)
287 % self.module)
288 self.last_changed = self.revnum(self.head)
288 self.last_changed = self.revnum(self.head)
289
289
290 self._changescache = None
290 self._changescache = None
291
291
292 if os.path.exists(os.path.join(url, '.svn/entries')):
292 if os.path.exists(os.path.join(url, '.svn/entries')):
293 self.wc = url
293 self.wc = url
294 else:
294 else:
295 self.wc = None
295 self.wc = None
296 self.convertfp = None
296 self.convertfp = None
297
297
298 def setrevmap(self, revmap):
298 def setrevmap(self, revmap):
299 lastrevs = {}
299 lastrevs = {}
300 for revid in revmap.iterkeys():
300 for revid in revmap.iterkeys():
301 uuid, module, revnum = revsplit(revid)
301 uuid, module, revnum = revsplit(revid)
302 lastrevnum = lastrevs.setdefault(module, revnum)
302 lastrevnum = lastrevs.setdefault(module, revnum)
303 if revnum > lastrevnum:
303 if revnum > lastrevnum:
304 lastrevs[module] = revnum
304 lastrevs[module] = revnum
305 self.lastrevs = lastrevs
305 self.lastrevs = lastrevs
306
306
307 def exists(self, path, optrev):
307 def exists(self, path, optrev):
308 try:
308 try:
309 svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
309 svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
310 optrev, False, self.ctx)
310 optrev, False, self.ctx)
311 return True
311 return True
312 except SubversionException:
312 except SubversionException:
313 return False
313 return False
314
314
315 def getheads(self):
315 def getheads(self):
316
316
317 def isdir(path, revnum):
317 def isdir(path, revnum):
318 kind = self._checkpath(path, revnum)
318 kind = self._checkpath(path, revnum)
319 return kind == svn.core.svn_node_dir
319 return kind == svn.core.svn_node_dir
320
320
321 def getcfgpath(name, rev):
321 def getcfgpath(name, rev):
322 cfgpath = self.ui.config('convert', 'svn.' + name)
322 cfgpath = self.ui.config('convert', 'svn.' + name)
323 if cfgpath is not None and cfgpath.strip() == '':
323 if cfgpath is not None and cfgpath.strip() == '':
324 return None
324 return None
325 path = (cfgpath or name).strip('/')
325 path = (cfgpath or name).strip('/')
326 if not self.exists(path, rev):
326 if not self.exists(path, rev):
327 if self.module.endswith(path) and name == 'trunk':
327 if self.module.endswith(path) and name == 'trunk':
328 # we are converting from inside this directory
328 # we are converting from inside this directory
329 return None
329 return None
330 if cfgpath:
330 if cfgpath:
331 raise util.Abort(_('expected %s to be at %r, but not found')
331 raise util.Abort(_('expected %s to be at %r, but not found')
332 % (name, path))
332 % (name, path))
333 return None
333 return None
334 self.ui.note(_('found %s at %r\n') % (name, path))
334 self.ui.note(_('found %s at %r\n') % (name, path))
335 return path
335 return path
336
336
337 rev = optrev(self.last_changed)
337 rev = optrev(self.last_changed)
338 oldmodule = ''
338 oldmodule = ''
339 trunk = getcfgpath('trunk', rev)
339 trunk = getcfgpath('trunk', rev)
340 self.tags = getcfgpath('tags', rev)
340 self.tags = getcfgpath('tags', rev)
341 branches = getcfgpath('branches', rev)
341 branches = getcfgpath('branches', rev)
342
342
343 # If the project has a trunk or branches, we will extract heads
343 # If the project has a trunk or branches, we will extract heads
344 # from them. We keep the project root otherwise.
344 # from them. We keep the project root otherwise.
345 if trunk:
345 if trunk:
346 oldmodule = self.module or ''
346 oldmodule = self.module or ''
347 self.module += '/' + trunk
347 self.module += '/' + trunk
348 self.head = self.latest(self.module, self.last_changed)
348 self.head = self.latest(self.module, self.last_changed)
349 if not self.head:
349 if not self.head:
350 raise util.Abort(_('no revision found in module %s')
350 raise util.Abort(_('no revision found in module %s')
351 % self.module)
351 % self.module)
352
352
353 # First head in the list is the module's head
353 # First head in the list is the module's head
354 self.heads = [self.head]
354 self.heads = [self.head]
355 if self.tags is not None:
355 if self.tags is not None:
356 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
356 self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
357
357
358 # Check if branches bring a few more heads to the list
358 # Check if branches bring a few more heads to the list
359 if branches:
359 if branches:
360 rpath = self.url.strip('/')
360 rpath = self.url.strip('/')
361 branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
361 branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
362 rev, False, self.ctx)
362 rev, False, self.ctx)
363 for branch in branchnames.keys():
363 for branch in branchnames.keys():
364 module = '%s/%s/%s' % (oldmodule, branches, branch)
364 module = '%s/%s/%s' % (oldmodule, branches, branch)
365 if not isdir(module, self.last_changed):
365 if not isdir(module, self.last_changed):
366 continue
366 continue
367 brevid = self.latest(module, self.last_changed)
367 brevid = self.latest(module, self.last_changed)
368 if not brevid:
368 if not brevid:
369 self.ui.note(_('ignoring empty branch %s\n') % branch)
369 self.ui.note(_('ignoring empty branch %s\n') % branch)
370 continue
370 continue
371 self.ui.note(_('found branch %s at %d\n') %
371 self.ui.note(_('found branch %s at %d\n') %
372 (branch, self.revnum(brevid)))
372 (branch, self.revnum(brevid)))
373 self.heads.append(brevid)
373 self.heads.append(brevid)
374
374
375 if self.startrev and self.heads:
375 if self.startrev and self.heads:
376 if len(self.heads) > 1:
376 if len(self.heads) > 1:
377 raise util.Abort(_('svn: start revision is not supported '
377 raise util.Abort(_('svn: start revision is not supported '
378 'with more than one branch'))
378 'with more than one branch'))
379 revnum = self.revnum(self.heads[0])
379 revnum = self.revnum(self.heads[0])
380 if revnum < self.startrev:
380 if revnum < self.startrev:
381 raise util.Abort(
381 raise util.Abort(
382 _('svn: no revision found after start revision %d')
382 _('svn: no revision found after start revision %d')
383 % self.startrev)
383 % self.startrev)
384
384
385 return self.heads
385 return self.heads
386
386
387 def getchanges(self, rev):
387 def getchanges(self, rev):
388 if self._changescache and self._changescache[0] == rev:
388 if self._changescache and self._changescache[0] == rev:
389 return self._changescache[1]
389 return self._changescache[1]
390 self._changescache = None
390 self._changescache = None
391 (paths, parents) = self.paths[rev]
391 (paths, parents) = self.paths[rev]
392 if parents:
392 if parents:
393 files, self.removed, copies = self.expandpaths(rev, paths, parents)
393 files, self.removed, copies = self.expandpaths(rev, paths, parents)
394 else:
394 else:
395 # Perform a full checkout on roots
395 # Perform a full checkout on roots
396 uuid, module, revnum = revsplit(rev)
396 uuid, module, revnum = revsplit(rev)
397 entries = svn.client.ls(self.baseurl + urllib.quote(module),
397 entries = svn.client.ls(self.baseurl + urllib.quote(module),
398 optrev(revnum), True, self.ctx)
398 optrev(revnum), True, self.ctx)
399 files = [n for n, e in entries.iteritems()
399 files = [n for n, e in entries.iteritems()
400 if e.kind == svn.core.svn_node_file]
400 if e.kind == svn.core.svn_node_file]
401 copies = {}
401 copies = {}
402 self.removed = set()
402 self.removed = set()
403
403
404 files.sort()
404 files.sort()
405 files = zip(files, [rev] * len(files))
405 files = zip(files, [rev] * len(files))
406
406
407 # caller caches the result, so free it here to release memory
407 # caller caches the result, so free it here to release memory
408 del self.paths[rev]
408 del self.paths[rev]
409 return (files, copies)
409 return (files, copies)
410
410
411 def getchangedfiles(self, rev, i):
411 def getchangedfiles(self, rev, i):
412 changes = self.getchanges(rev)
412 changes = self.getchanges(rev)
413 self._changescache = (rev, changes)
413 self._changescache = (rev, changes)
414 return [f[0] for f in changes[0]]
414 return [f[0] for f in changes[0]]
415
415
416 def getcommit(self, rev):
416 def getcommit(self, rev):
417 if rev not in self.commits:
417 if rev not in self.commits:
418 uuid, module, revnum = revsplit(rev)
418 uuid, module, revnum = revsplit(rev)
419 self.module = module
419 self.module = module
420 self.reparent(module)
420 self.reparent(module)
421 # We assume that:
421 # We assume that:
422 # - requests for revisions after "stop" come from the
422 # - requests for revisions after "stop" come from the
423 # revision graph backward traversal. Cache all of them
423 # revision graph backward traversal. Cache all of them
424 # down to stop, they will be used eventually.
424 # down to stop, they will be used eventually.
425 # - requests for revisions before "stop" come to get
425 # - requests for revisions before "stop" come to get
426 # isolated branches parents. Just fetch what is needed.
426 # isolated branches parents. Just fetch what is needed.
427 stop = self.lastrevs.get(module, 0)
427 stop = self.lastrevs.get(module, 0)
428 if revnum < stop:
428 if revnum < stop:
429 stop = revnum + 1
429 stop = revnum + 1
430 self._fetch_revisions(revnum, stop)
430 self._fetch_revisions(revnum, stop)
431 commit = self.commits[rev]
431 commit = self.commits[rev]
432 # caller caches the result, so free it here to release memory
432 # caller caches the result, so free it here to release memory
433 del self.commits[rev]
433 del self.commits[rev]
434 return commit
434 return commit
435
435
436 def gettags(self):
436 def gettags(self):
437 tags = {}
437 tags = {}
438 if self.tags is None:
438 if self.tags is None:
439 return tags
439 return tags
440
440
441 # svn tags are just a convention, project branches left in a
441 # svn tags are just a convention, project branches left in a
442 # 'tags' directory. There is no other relationship than
442 # 'tags' directory. There is no other relationship than
443 # ancestry, which is expensive to discover and makes them hard
443 # ancestry, which is expensive to discover and makes them hard
444 # to update incrementally. Worse, past revisions may be
444 # to update incrementally. Worse, past revisions may be
445 # referenced by tags far away in the future, requiring a deep
445 # referenced by tags far away in the future, requiring a deep
446 # history traversal on every calculation. Current code
446 # history traversal on every calculation. Current code
447 # performs a single backward traversal, tracking moves within
447 # performs a single backward traversal, tracking moves within
448 # the tags directory (tag renaming) and recording a new tag
448 # the tags directory (tag renaming) and recording a new tag
449 # everytime a project is copied from outside the tags
449 # everytime a project is copied from outside the tags
450 # directory. It also lists deleted tags, this behaviour may
450 # directory. It also lists deleted tags, this behaviour may
451 # change in the future.
451 # change in the future.
452 pendings = []
452 pendings = []
453 tagspath = self.tags
453 tagspath = self.tags
454 start = svn.ra.get_latest_revnum(self.ra)
454 start = svn.ra.get_latest_revnum(self.ra)
455 stream = self._getlog([self.tags], start, self.startrev)
455 stream = self._getlog([self.tags], start, self.startrev)
456 try:
456 try:
457 for entry in stream:
457 for entry in stream:
458 origpaths, revnum, author, date, message = entry
458 origpaths, revnum, author, date, message = entry
459 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
459 copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
460 in origpaths.iteritems() if e.copyfrom_path]
460 in origpaths.iteritems() if e.copyfrom_path]
461 # Apply moves/copies from more specific to general
461 # Apply moves/copies from more specific to general
462 copies.sort(reverse=True)
462 copies.sort(reverse=True)
463
463
464 srctagspath = tagspath
464 srctagspath = tagspath
465 if copies and copies[-1][2] == tagspath:
465 if copies and copies[-1][2] == tagspath:
466 # Track tags directory moves
466 # Track tags directory moves
467 srctagspath = copies.pop()[0]
467 srctagspath = copies.pop()[0]
468
468
469 for source, sourcerev, dest in copies:
469 for source, sourcerev, dest in copies:
470 if not dest.startswith(tagspath + '/'):
470 if not dest.startswith(tagspath + '/'):
471 continue
471 continue
472 for tag in pendings:
472 for tag in pendings:
473 if tag[0].startswith(dest):
473 if tag[0].startswith(dest):
474 tagpath = source + tag[0][len(dest):]
474 tagpath = source + tag[0][len(dest):]
475 tag[:2] = [tagpath, sourcerev]
475 tag[:2] = [tagpath, sourcerev]
476 break
476 break
477 else:
477 else:
478 pendings.append([source, sourcerev, dest])
478 pendings.append([source, sourcerev, dest])
479
479
480 # Filter out tags with children coming from different
480 # Filter out tags with children coming from different
481 # parts of the repository like:
481 # parts of the repository like:
482 # /tags/tag.1 (from /trunk:10)
482 # /tags/tag.1 (from /trunk:10)
483 # /tags/tag.1/foo (from /branches/foo:12)
483 # /tags/tag.1/foo (from /branches/foo:12)
484 # Here/tags/tag.1 discarded as well as its children.
484 # Here/tags/tag.1 discarded as well as its children.
485 # It happens with tools like cvs2svn. Such tags cannot
485 # It happens with tools like cvs2svn. Such tags cannot
486 # be represented in mercurial.
486 # be represented in mercurial.
487 addeds = dict((p, e.copyfrom_path) for p, e
487 addeds = dict((p, e.copyfrom_path) for p, e
488 in origpaths.iteritems()
488 in origpaths.iteritems()
489 if e.action == 'A' and e.copyfrom_path)
489 if e.action == 'A' and e.copyfrom_path)
490 badroots = set()
490 badroots = set()
491 for destroot in addeds:
491 for destroot in addeds:
492 for source, sourcerev, dest in pendings:
492 for source, sourcerev, dest in pendings:
493 if (not dest.startswith(destroot + '/')
493 if (not dest.startswith(destroot + '/')
494 or source.startswith(addeds[destroot] + '/')):
494 or source.startswith(addeds[destroot] + '/')):
495 continue
495 continue
496 badroots.add(destroot)
496 badroots.add(destroot)
497 break
497 break
498
498
499 for badroot in badroots:
499 for badroot in badroots:
500 pendings = [p for p in pendings if p[2] != badroot
500 pendings = [p for p in pendings if p[2] != badroot
501 and not p[2].startswith(badroot + '/')]
501 and not p[2].startswith(badroot + '/')]
502
502
503 # Tell tag renamings from tag creations
503 # Tell tag renamings from tag creations
504 remainings = []
504 remainings = []
505 for source, sourcerev, dest in pendings:
505 for source, sourcerev, dest in pendings:
506 tagname = dest.split('/')[-1]
506 tagname = dest.split('/')[-1]
507 if source.startswith(srctagspath):
507 if source.startswith(srctagspath):
508 remainings.append([source, sourcerev, tagname])
508 remainings.append([source, sourcerev, tagname])
509 continue
509 continue
510 if tagname in tags:
510 if tagname in tags:
511 # Keep the latest tag value
511 # Keep the latest tag value
512 continue
512 continue
513 # From revision may be fake, get one with changes
513 # From revision may be fake, get one with changes
514 try:
514 try:
515 tagid = self.latest(source, sourcerev)
515 tagid = self.latest(source, sourcerev)
516 if tagid and tagname not in tags:
516 if tagid and tagname not in tags:
517 tags[tagname] = tagid
517 tags[tagname] = tagid
518 except SvnPathNotFound:
518 except SvnPathNotFound:
519 # It happens when we are following directories
519 # It happens when we are following directories
520 # we assumed were copied with their parents
520 # we assumed were copied with their parents
521 # but were really created in the tag
521 # but were really created in the tag
522 # directory.
522 # directory.
523 pass
523 pass
524 pendings = remainings
524 pendings = remainings
525 tagspath = srctagspath
525 tagspath = srctagspath
526 finally:
526 finally:
527 stream.close()
527 stream.close()
528 return tags
528 return tags
529
529
530 def converted(self, rev, destrev):
530 def converted(self, rev, destrev):
531 if not self.wc:
531 if not self.wc:
532 return
532 return
533 if self.convertfp is None:
533 if self.convertfp is None:
534 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
534 self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
535 'a')
535 'a')
536 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
536 self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
537 self.convertfp.flush()
537 self.convertfp.flush()
538
538
539 def revid(self, revnum, module=None):
539 def revid(self, revnum, module=None):
540 return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
540 return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
541
541
542 def revnum(self, rev):
542 def revnum(self, rev):
543 return int(rev.split('@')[-1])
543 return int(rev.split('@')[-1])
544
544
545 def latest(self, path, stop=0):
545 def latest(self, path, stop=0):
546 """Find the latest revid affecting path, up to stop. It may return
546 """Find the latest revid affecting path, up to stop. It may return
547 a revision in a different module, since a branch may be moved without
547 a revision in a different module, since a branch may be moved without
548 a change being reported. Return None if computed module does not
548 a change being reported. Return None if computed module does not
549 belong to rootmodule subtree.
549 belong to rootmodule subtree.
550 """
550 """
551 if not path.startswith(self.rootmodule):
551 if not path.startswith(self.rootmodule):
552 # Requests on foreign branches may be forbidden at server level
552 # Requests on foreign branches may be forbidden at server level
553 self.ui.debug('ignoring foreign branch %r\n' % path)
553 self.ui.debug('ignoring foreign branch %r\n' % path)
554 return None
554 return None
555
555
556 if not stop:
556 if not stop:
557 stop = svn.ra.get_latest_revnum(self.ra)
557 stop = svn.ra.get_latest_revnum(self.ra)
558 try:
558 try:
559 prevmodule = self.reparent('')
559 prevmodule = self.reparent('')
560 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
560 dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
561 self.reparent(prevmodule)
561 self.reparent(prevmodule)
562 except SubversionException:
562 except SubversionException:
563 dirent = None
563 dirent = None
564 if not dirent:
564 if not dirent:
565 raise SvnPathNotFound(_('%s not found up to revision %d')
565 raise SvnPathNotFound(_('%s not found up to revision %d')
566 % (path, stop))
566 % (path, stop))
567
567
568 # stat() gives us the previous revision on this line of
568 # stat() gives us the previous revision on this line of
569 # development, but it might be in *another module*. Fetch the
569 # development, but it might be in *another module*. Fetch the
570 # log and detect renames down to the latest revision.
570 # log and detect renames down to the latest revision.
571 stream = self._getlog([path], stop, dirent.created_rev)
571 stream = self._getlog([path], stop, dirent.created_rev)
572 try:
572 try:
573 for entry in stream:
573 for entry in stream:
574 paths, revnum, author, date, message = entry
574 paths, revnum, author, date, message = entry
575 if revnum <= dirent.created_rev:
575 if revnum <= dirent.created_rev:
576 break
576 break
577
577
578 for p in paths:
578 for p in paths:
579 if not path.startswith(p) or not paths[p].copyfrom_path:
579 if not path.startswith(p) or not paths[p].copyfrom_path:
580 continue
580 continue
581 newpath = paths[p].copyfrom_path + path[len(p):]
581 newpath = paths[p].copyfrom_path + path[len(p):]
582 self.ui.debug("branch renamed from %s to %s at %d\n" %
582 self.ui.debug("branch renamed from %s to %s at %d\n" %
583 (path, newpath, revnum))
583 (path, newpath, revnum))
584 path = newpath
584 path = newpath
585 break
585 break
586 finally:
586 finally:
587 stream.close()
587 stream.close()
588
588
589 if not path.startswith(self.rootmodule):
589 if not path.startswith(self.rootmodule):
590 self.ui.debug('ignoring foreign branch %r\n' % path)
590 self.ui.debug('ignoring foreign branch %r\n' % path)
591 return None
591 return None
592 return self.revid(dirent.created_rev, path)
592 return self.revid(dirent.created_rev, path)
593
593
594 def reparent(self, module):
594 def reparent(self, module):
595 """Reparent the svn transport and return the previous parent."""
595 """Reparent the svn transport and return the previous parent."""
596 if self.prevmodule == module:
596 if self.prevmodule == module:
597 return module
597 return module
598 svnurl = self.baseurl + urllib.quote(module)
598 svnurl = self.baseurl + urllib.quote(module)
599 prevmodule = self.prevmodule
599 prevmodule = self.prevmodule
600 if prevmodule is None:
600 if prevmodule is None:
601 prevmodule = ''
601 prevmodule = ''
602 self.ui.debug("reparent to %s\n" % svnurl)
602 self.ui.debug("reparent to %s\n" % svnurl)
603 svn.ra.reparent(self.ra, svnurl)
603 svn.ra.reparent(self.ra, svnurl)
604 self.prevmodule = module
604 self.prevmodule = module
605 return prevmodule
605 return prevmodule
606
606
607 def expandpaths(self, rev, paths, parents):
607 def expandpaths(self, rev, paths, parents):
608 changed, removed = set(), set()
608 changed, removed = set(), set()
609 copies = {}
609 copies = {}
610
610
611 new_module, revnum = revsplit(rev)[1:]
611 new_module, revnum = revsplit(rev)[1:]
612 if new_module != self.module:
612 if new_module != self.module:
613 self.module = new_module
613 self.module = new_module
614 self.reparent(self.module)
614 self.reparent(self.module)
615
615
616 for i, (path, ent) in enumerate(paths):
616 for i, (path, ent) in enumerate(paths):
617 self.ui.progress(_('scanning paths'), i, item=path,
617 self.ui.progress(_('scanning paths'), i, item=path,
618 total=len(paths))
618 total=len(paths))
619 entrypath = self.getrelpath(path)
619 entrypath = self.getrelpath(path)
620
620
621 kind = self._checkpath(entrypath, revnum)
621 kind = self._checkpath(entrypath, revnum)
622 if kind == svn.core.svn_node_file:
622 if kind == svn.core.svn_node_file:
623 changed.add(self.recode(entrypath))
623 changed.add(self.recode(entrypath))
624 if not ent.copyfrom_path or not parents:
624 if not ent.copyfrom_path or not parents:
625 continue
625 continue
626 # Copy sources not in parent revisions cannot be
626 # Copy sources not in parent revisions cannot be
627 # represented, ignore their origin for now
627 # represented, ignore their origin for now
628 pmodule, prevnum = revsplit(parents[0])[1:]
628 pmodule, prevnum = revsplit(parents[0])[1:]
629 if ent.copyfrom_rev < prevnum:
629 if ent.copyfrom_rev < prevnum:
630 continue
630 continue
631 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
631 copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
632 if not copyfrom_path:
632 if not copyfrom_path:
633 continue
633 continue
634 self.ui.debug("copied to %s from %s@%s\n" %
634 self.ui.debug("copied to %s from %s@%s\n" %
635 (entrypath, copyfrom_path, ent.copyfrom_rev))
635 (entrypath, copyfrom_path, ent.copyfrom_rev))
636 copies[self.recode(entrypath)] = self.recode(copyfrom_path)
636 copies[self.recode(entrypath)] = self.recode(copyfrom_path)
637 elif kind == 0: # gone, but had better be a deleted *file*
637 elif kind == 0: # gone, but had better be a deleted *file*
638 self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
638 self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
639 pmodule, prevnum = revsplit(parents[0])[1:]
639 pmodule, prevnum = revsplit(parents[0])[1:]
640 parentpath = pmodule + "/" + entrypath
640 parentpath = pmodule + "/" + entrypath
641 fromkind = self._checkpath(entrypath, prevnum, pmodule)
641 fromkind = self._checkpath(entrypath, prevnum, pmodule)
642
642
643 if fromkind == svn.core.svn_node_file:
643 if fromkind == svn.core.svn_node_file:
644 removed.add(self.recode(entrypath))
644 removed.add(self.recode(entrypath))
645 elif fromkind == svn.core.svn_node_dir:
645 elif fromkind == svn.core.svn_node_dir:
646 oroot = parentpath.strip('/')
646 oroot = parentpath.strip('/')
647 nroot = path.strip('/')
647 nroot = path.strip('/')
648 children = self._iterfiles(oroot, prevnum)
648 children = self._iterfiles(oroot, prevnum)
649 for childpath in children:
649 for childpath in children:
650 childpath = childpath.replace(oroot, nroot)
650 childpath = childpath.replace(oroot, nroot)
651 childpath = self.getrelpath("/" + childpath, pmodule)
651 childpath = self.getrelpath("/" + childpath, pmodule)
652 if childpath:
652 if childpath:
653 removed.add(self.recode(childpath))
653 removed.add(self.recode(childpath))
654 else:
654 else:
655 self.ui.debug('unknown path in revision %d: %s\n' % \
655 self.ui.debug('unknown path in revision %d: %s\n' % \
656 (revnum, path))
656 (revnum, path))
657 elif kind == svn.core.svn_node_dir:
657 elif kind == svn.core.svn_node_dir:
658 if ent.action == 'M':
658 if ent.action == 'M':
659 # If the directory just had a prop change,
659 # If the directory just had a prop change,
660 # then we shouldn't need to look for its children.
660 # then we shouldn't need to look for its children.
661 continue
661 continue
662 if ent.action == 'R' and parents:
662 if ent.action == 'R' and parents:
663 # If a directory is replacing a file, mark the previous
663 # If a directory is replacing a file, mark the previous
664 # file as deleted
664 # file as deleted
665 pmodule, prevnum = revsplit(parents[0])[1:]
665 pmodule, prevnum = revsplit(parents[0])[1:]
666 pkind = self._checkpath(entrypath, prevnum, pmodule)
666 pkind = self._checkpath(entrypath, prevnum, pmodule)
667 if pkind == svn.core.svn_node_file:
667 if pkind == svn.core.svn_node_file:
668 removed.add(self.recode(entrypath))
668 removed.add(self.recode(entrypath))
669 elif pkind == svn.core.svn_node_dir:
669 elif pkind == svn.core.svn_node_dir:
670 # We do not know what files were kept or removed,
670 # We do not know what files were kept or removed,
671 # mark them all as changed.
671 # mark them all as changed.
672 for childpath in self._iterfiles(pmodule, prevnum):
672 for childpath in self._iterfiles(pmodule, prevnum):
673 childpath = self.getrelpath("/" + childpath)
673 childpath = self.getrelpath("/" + childpath)
674 if childpath:
674 if childpath:
675 changed.add(self.recode(childpath))
675 changed.add(self.recode(childpath))
676
676
677 for childpath in self._iterfiles(path, revnum):
677 for childpath in self._iterfiles(path, revnum):
678 childpath = self.getrelpath("/" + childpath)
678 childpath = self.getrelpath("/" + childpath)
679 if childpath:
679 if childpath:
680 changed.add(self.recode(childpath))
680 changed.add(self.recode(childpath))
681
681
682 # Handle directory copies
682 # Handle directory copies
683 if not ent.copyfrom_path or not parents:
683 if not ent.copyfrom_path or not parents:
684 continue
684 continue
685 # Copy sources not in parent revisions cannot be
685 # Copy sources not in parent revisions cannot be
686 # represented, ignore their origin for now
686 # represented, ignore their origin for now
687 pmodule, prevnum = revsplit(parents[0])[1:]
687 pmodule, prevnum = revsplit(parents[0])[1:]
688 if ent.copyfrom_rev < prevnum:
688 if ent.copyfrom_rev < prevnum:
689 continue
689 continue
690 copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
690 copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
691 if not copyfrompath:
691 if not copyfrompath:
692 continue
692 continue
693 self.ui.debug("mark %s came from %s:%d\n"
693 self.ui.debug("mark %s came from %s:%d\n"
694 % (path, copyfrompath, ent.copyfrom_rev))
694 % (path, copyfrompath, ent.copyfrom_rev))
695 children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
695 children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
696 for childpath in children:
696 for childpath in children:
697 childpath = self.getrelpath("/" + childpath, pmodule)
697 childpath = self.getrelpath("/" + childpath, pmodule)
698 if not childpath:
698 if not childpath:
699 continue
699 continue
700 copytopath = path + childpath[len(copyfrompath):]
700 copytopath = path + childpath[len(copyfrompath):]
701 copytopath = self.getrelpath(copytopath)
701 copytopath = self.getrelpath(copytopath)
702 copies[self.recode(copytopath)] = self.recode(childpath)
702 copies[self.recode(copytopath)] = self.recode(childpath)
703
703
704 self.ui.progress(_('scanning paths'), None)
704 self.ui.progress(_('scanning paths'), None)
705 changed.update(removed)
705 changed.update(removed)
706 return (list(changed), removed, copies)
706 return (list(changed), removed, copies)
707
707
708 def _fetch_revisions(self, from_revnum, to_revnum):
708 def _fetch_revisions(self, from_revnum, to_revnum):
709 if from_revnum < to_revnum:
709 if from_revnum < to_revnum:
710 from_revnum, to_revnum = to_revnum, from_revnum
710 from_revnum, to_revnum = to_revnum, from_revnum
711
711
712 self.child_cset = None
712 self.child_cset = None
713
713
714 def parselogentry(orig_paths, revnum, author, date, message):
714 def parselogentry(orig_paths, revnum, author, date, message):
715 """Return the parsed commit object or None, and True if
715 """Return the parsed commit object or None, and True if
716 the revision is a branch root.
716 the revision is a branch root.
717 """
717 """
718 self.ui.debug("parsing revision %d (%d changes)\n" %
718 self.ui.debug("parsing revision %d (%d changes)\n" %
719 (revnum, len(orig_paths)))
719 (revnum, len(orig_paths)))
720
720
721 branched = False
721 branched = False
722 rev = self.revid(revnum)
722 rev = self.revid(revnum)
723 # branch log might return entries for a parent we already have
723 # branch log might return entries for a parent we already have
724
724
725 if rev in self.commits or revnum < to_revnum:
725 if rev in self.commits or revnum < to_revnum:
726 return None, branched
726 return None, branched
727
727
728 parents = []
728 parents = []
729 # check whether this revision is the start of a branch or part
729 # check whether this revision is the start of a branch or part
730 # of a branch renaming
730 # of a branch renaming
731 orig_paths = sorted(orig_paths.iteritems())
731 orig_paths = sorted(orig_paths.iteritems())
732 root_paths = [(p, e) for p, e in orig_paths
732 root_paths = [(p, e) for p, e in orig_paths
733 if self.module.startswith(p)]
733 if self.module.startswith(p)]
734 if root_paths:
734 if root_paths:
735 path, ent = root_paths[-1]
735 path, ent = root_paths[-1]
736 if ent.copyfrom_path:
736 if ent.copyfrom_path:
737 branched = True
737 branched = True
738 newpath = ent.copyfrom_path + self.module[len(path):]
738 newpath = ent.copyfrom_path + self.module[len(path):]
739 # ent.copyfrom_rev may not be the actual last revision
739 # ent.copyfrom_rev may not be the actual last revision
740 previd = self.latest(newpath, ent.copyfrom_rev)
740 previd = self.latest(newpath, ent.copyfrom_rev)
741 if previd is not None:
741 if previd is not None:
742 prevmodule, prevnum = revsplit(previd)[1:]
742 prevmodule, prevnum = revsplit(previd)[1:]
743 if prevnum >= self.startrev:
743 if prevnum >= self.startrev:
744 parents = [previd]
744 parents = [previd]
745 self.ui.note(
745 self.ui.note(
746 _('found parent of branch %s at %d: %s\n') %
746 _('found parent of branch %s at %d: %s\n') %
747 (self.module, prevnum, prevmodule))
747 (self.module, prevnum, prevmodule))
748 else:
748 else:
749 self.ui.debug("no copyfrom path, don't know what to do.\n")
749 self.ui.debug("no copyfrom path, don't know what to do.\n")
750
750
751 paths = []
751 paths = []
752 # filter out unrelated paths
752 # filter out unrelated paths
753 for path, ent in orig_paths:
753 for path, ent in orig_paths:
754 if self.getrelpath(path) is None:
754 if self.getrelpath(path) is None:
755 continue
755 continue
756 paths.append((path, ent))
756 paths.append((path, ent))
757
757
758 # Example SVN datetime. Includes microseconds.
758 # Example SVN datetime. Includes microseconds.
759 # ISO-8601 conformant
759 # ISO-8601 conformant
760 # '2007-01-04T17:35:00.902377Z'
760 # '2007-01-04T17:35:00.902377Z'
761 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
761 date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
762
762
763 log = message and self.recode(message) or ''
763 log = message and self.recode(message) or ''
764 author = author and self.recode(author) or ''
764 author = author and self.recode(author) or ''
765 try:
765 try:
766 branch = self.module.split("/")[-1]
766 branch = self.module.split("/")[-1]
767 if branch == self.trunkname:
767 if branch == self.trunkname:
768 branch = None
768 branch = None
769 except IndexError:
769 except IndexError:
770 branch = None
770 branch = None
771
771
772 cset = commit(author=author,
772 cset = commit(author=author,
773 date=util.datestr(date),
773 date=util.datestr(date),
774 desc=log,
774 desc=log,
775 parents=parents,
775 parents=parents,
776 branch=branch,
776 branch=branch,
777 rev=rev)
777 rev=rev)
778
778
779 self.commits[rev] = cset
779 self.commits[rev] = cset
780 # The parents list is *shared* among self.paths and the
780 # The parents list is *shared* among self.paths and the
781 # commit object. Both will be updated below.
781 # commit object. Both will be updated below.
782 self.paths[rev] = (paths, cset.parents)
782 self.paths[rev] = (paths, cset.parents)
783 if self.child_cset and not self.child_cset.parents:
783 if self.child_cset and not self.child_cset.parents:
784 self.child_cset.parents[:] = [rev]
784 self.child_cset.parents[:] = [rev]
785 self.child_cset = cset
785 self.child_cset = cset
786 return cset, branched
786 return cset, branched
787
787
788 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
788 self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
789 (self.module, from_revnum, to_revnum))
789 (self.module, from_revnum, to_revnum))
790
790
791 try:
791 try:
792 firstcset = None
792 firstcset = None
793 lastonbranch = False
793 lastonbranch = False
794 stream = self._getlog([self.module], from_revnum, to_revnum)
794 stream = self._getlog([self.module], from_revnum, to_revnum)
795 try:
795 try:
796 for entry in stream:
796 for entry in stream:
797 paths, revnum, author, date, message = entry
797 paths, revnum, author, date, message = entry
798 if revnum < self.startrev:
798 if revnum < self.startrev:
799 lastonbranch = True
799 lastonbranch = True
800 break
800 break
801 if not paths:
801 if not paths:
802 self.ui.debug('revision %d has no entries\n' % revnum)
802 self.ui.debug('revision %d has no entries\n' % revnum)
803 # If we ever leave the loop on an empty
803 # If we ever leave the loop on an empty
804 # revision, do not try to get a parent branch
804 # revision, do not try to get a parent branch
805 lastonbranch = lastonbranch or revnum == 0
805 lastonbranch = lastonbranch or revnum == 0
806 continue
806 continue
807 cset, lastonbranch = parselogentry(paths, revnum, author,
807 cset, lastonbranch = parselogentry(paths, revnum, author,
808 date, message)
808 date, message)
809 if cset:
809 if cset:
810 firstcset = cset
810 firstcset = cset
811 if lastonbranch:
811 if lastonbranch:
812 break
812 break
813 finally:
813 finally:
814 stream.close()
814 stream.close()
815
815
816 if not lastonbranch and firstcset and not firstcset.parents:
816 if not lastonbranch and firstcset and not firstcset.parents:
817 # The first revision of the sequence (the last fetched one)
817 # The first revision of the sequence (the last fetched one)
818 # has invalid parents if not a branch root. Find the parent
818 # has invalid parents if not a branch root. Find the parent
819 # revision now, if any.
819 # revision now, if any.
820 try:
820 try:
821 firstrevnum = self.revnum(firstcset.rev)
821 firstrevnum = self.revnum(firstcset.rev)
822 if firstrevnum > 1:
822 if firstrevnum > 1:
823 latest = self.latest(self.module, firstrevnum - 1)
823 latest = self.latest(self.module, firstrevnum - 1)
824 if latest:
824 if latest:
825 firstcset.parents.append(latest)
825 firstcset.parents.append(latest)
826 except SvnPathNotFound:
826 except SvnPathNotFound:
827 pass
827 pass
828 except SubversionException, (inst, num):
828 except SubversionException, (inst, num):
829 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
829 if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
830 raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
830 raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
831 raise
831 raise
832
832
833 def getfile(self, file, rev):
833 def getfile(self, file, rev):
834 # TODO: ra.get_file transmits the whole file instead of diffs.
834 # TODO: ra.get_file transmits the whole file instead of diffs.
835 if file in self.removed:
835 if file in self.removed:
836 raise IOError()
836 raise IOError()
837 mode = ''
837 mode = ''
838 try:
838 try:
839 new_module, revnum = revsplit(rev)[1:]
839 new_module, revnum = revsplit(rev)[1:]
840 if self.module != new_module:
840 if self.module != new_module:
841 self.module = new_module
841 self.module = new_module
842 self.reparent(self.module)
842 self.reparent(self.module)
843 io = StringIO()
843 io = StringIO()
844 info = svn.ra.get_file(self.ra, file, revnum, io)
844 info = svn.ra.get_file(self.ra, file, revnum, io)
845 data = io.getvalue()
845 data = io.getvalue()
846 # ra.get_files() seems to keep a reference on the input buffer
846 # ra.get_files() seems to keep a reference on the input buffer
847 # preventing collection. Release it explicitely.
847 # preventing collection. Release it explicitely.
848 io.close()
848 io.close()
849 if isinstance(info, list):
849 if isinstance(info, list):
850 info = info[-1]
850 info = info[-1]
851 mode = ("svn:executable" in info) and 'x' or ''
851 mode = ("svn:executable" in info) and 'x' or ''
852 mode = ("svn:special" in info) and 'l' or mode
852 mode = ("svn:special" in info) and 'l' or mode
853 except SubversionException, e:
853 except SubversionException, e:
854 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
854 notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
855 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
855 svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
856 if e.apr_err in notfound: # File not found
856 if e.apr_err in notfound: # File not found
857 raise IOError()
857 raise IOError()
858 raise
858 raise
859 if mode == 'l':
859 if mode == 'l':
860 link_prefix = "link "
860 link_prefix = "link "
861 if data.startswith(link_prefix):
861 if data.startswith(link_prefix):
862 data = data[len(link_prefix):]
862 data = data[len(link_prefix):]
863 return data, mode
863 return data, mode
864
864
865 def _iterfiles(self, path, revnum):
865 def _iterfiles(self, path, revnum):
866 """Enumerate all files in path at revnum, recursively."""
866 """Enumerate all files in path at revnum, recursively."""
867 path = path.strip('/')
867 path = path.strip('/')
868 pool = Pool()
868 pool = Pool()
869 rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
869 rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
870 entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
870 entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
871 if path:
871 if path:
872 path += '/'
872 path += '/'
873 return ((path + p) for p, e in entries.iteritems()
873 return ((path + p) for p, e in entries.iteritems()
874 if e.kind == svn.core.svn_node_file)
874 if e.kind == svn.core.svn_node_file)
875
875
876 def getrelpath(self, path, module=None):
876 def getrelpath(self, path, module=None):
877 if module is None:
877 if module is None:
878 module = self.module
878 module = self.module
879 # Given the repository url of this wc, say
879 # Given the repository url of this wc, say
880 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
880 # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
881 # extract the "entry" portion (a relative path) from what
881 # extract the "entry" portion (a relative path) from what
882 # svn log --xml says, ie
882 # svn log --xml says, ie
883 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
883 # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
884 # that is to say "tests/PloneTestCase.py"
884 # that is to say "tests/PloneTestCase.py"
885 if path.startswith(module):
885 if path.startswith(module):
886 relative = path.rstrip('/')[len(module):]
886 relative = path.rstrip('/')[len(module):]
887 if relative.startswith('/'):
887 if relative.startswith('/'):
888 return relative[1:]
888 return relative[1:]
889 elif relative == '':
889 elif relative == '':
890 return relative
890 return relative
891
891
892 # The path is outside our tracked tree...
892 # The path is outside our tracked tree...
893 self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
893 self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
894 return None
894 return None
895
895
896 def _checkpath(self, path, revnum, module=None):
896 def _checkpath(self, path, revnum, module=None):
897 if module is not None:
897 if module is not None:
898 prevmodule = self.reparent('')
898 prevmodule = self.reparent('')
899 path = module + '/' + path
899 path = module + '/' + path
900 try:
900 try:
901 # ra.check_path does not like leading slashes very much, it leads
901 # ra.check_path does not like leading slashes very much, it leads
902 # to PROPFIND subversion errors
902 # to PROPFIND subversion errors
903 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
903 return svn.ra.check_path(self.ra, path.strip('/'), revnum)
904 finally:
904 finally:
905 if module is not None:
905 if module is not None:
906 self.reparent(prevmodule)
906 self.reparent(prevmodule)
907
907
908 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
908 def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
909 strict_node_history=False):
909 strict_node_history=False):
910 # Normalize path names, svn >= 1.5 only wants paths relative to
910 # Normalize path names, svn >= 1.5 only wants paths relative to
911 # supplied URL
911 # supplied URL
912 relpaths = []
912 relpaths = []
913 for p in paths:
913 for p in paths:
914 if not p.startswith('/'):
914 if not p.startswith('/'):
915 p = self.module + '/' + p
915 p = self.module + '/' + p
916 relpaths.append(p.strip('/'))
916 relpaths.append(p.strip('/'))
917 args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
917 args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
918 strict_node_history]
918 strict_node_history]
919 arg = encodeargs(args)
919 arg = encodeargs(args)
920 hgexe = util.hgexecutable()
920 hgexe = util.hgexecutable()
921 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
921 cmd = '%s debugsvnlog' % util.shellquote(hgexe)
922 stdin, stdout = util.popen2(util.quotecommand(cmd))
922 stdin, stdout = util.popen2(util.quotecommand(cmd))
923 stdin.write(arg)
923 stdin.write(arg)
924 try:
924 try:
925 stdin.close()
925 stdin.close()
926 except IOError:
926 except IOError:
927 raise util.Abort(_('Mercurial failed to run itself, check'
927 raise util.Abort(_('Mercurial failed to run itself, check'
928 ' hg executable is in PATH'))
928 ' hg executable is in PATH'))
929 return logstream(stdout)
929 return logstream(stdout)
930
930
931 pre_revprop_change = '''#!/bin/sh
931 pre_revprop_change = '''#!/bin/sh
932
932
933 REPOS="$1"
933 REPOS="$1"
934 REV="$2"
934 REV="$2"
935 USER="$3"
935 USER="$3"
936 PROPNAME="$4"
936 PROPNAME="$4"
937 ACTION="$5"
937 ACTION="$5"
938
938
939 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
939 if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
940 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
940 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
941 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
941 if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
942
942
943 echo "Changing prohibited revision property" >&2
943 echo "Changing prohibited revision property" >&2
944 exit 1
944 exit 1
945 '''
945 '''
946
946
947 class svn_sink(converter_sink, commandline):
947 class svn_sink(converter_sink, commandline):
948 commit_re = re.compile(r'Committed revision (\d+).', re.M)
948 commit_re = re.compile(r'Committed revision (\d+).', re.M)
949 uuid_re = re.compile(r'Repository UUID:\s*(\S+)', re.M)
949 uuid_re = re.compile(r'Repository UUID:\s*(\S+)', re.M)
950
950
951 def prerun(self):
951 def prerun(self):
952 if self.wc:
952 if self.wc:
953 os.chdir(self.wc)
953 os.chdir(self.wc)
954
954
955 def postrun(self):
955 def postrun(self):
956 if self.wc:
956 if self.wc:
957 os.chdir(self.cwd)
957 os.chdir(self.cwd)
958
958
959 def join(self, name):
959 def join(self, name):
960 return os.path.join(self.wc, '.svn', name)
960 return os.path.join(self.wc, '.svn', name)
961
961
962 def revmapfile(self):
962 def revmapfile(self):
963 return self.join('hg-shamap')
963 return self.join('hg-shamap')
964
964
965 def authorfile(self):
965 def authorfile(self):
966 return self.join('hg-authormap')
966 return self.join('hg-authormap')
967
967
968 def __init__(self, ui, path):
968 def __init__(self, ui, path):
969
969
970 converter_sink.__init__(self, ui, path)
970 converter_sink.__init__(self, ui, path)
971 commandline.__init__(self, ui, 'svn')
971 commandline.__init__(self, ui, 'svn')
972 self.delete = []
972 self.delete = []
973 self.setexec = []
973 self.setexec = []
974 self.delexec = []
974 self.delexec = []
975 self.copies = []
975 self.copies = []
976 self.wc = None
976 self.wc = None
977 self.cwd = os.getcwd()
977 self.cwd = os.getcwd()
978
978
979 path = os.path.realpath(path)
979 path = os.path.realpath(path)
980
980
981 created = False
981 created = False
982 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
982 if os.path.isfile(os.path.join(path, '.svn', 'entries')):
983 self.wc = path
983 self.wc = path
984 self.run0('update')
984 self.run0('update')
985 else:
985 else:
986 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
986 wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
987
987
988 if os.path.isdir(os.path.dirname(path)):
988 if os.path.isdir(os.path.dirname(path)):
989 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
989 if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
990 ui.status(_('initializing svn repository %r\n') %
990 ui.status(_('initializing svn repository %r\n') %
991 os.path.basename(path))
991 os.path.basename(path))
992 commandline(ui, 'svnadmin').run0('create', path)
992 commandline(ui, 'svnadmin').run0('create', path)
993 created = path
993 created = path
994 path = util.normpath(path)
994 path = util.normpath(path)
995 if not path.startswith('/'):
995 if not path.startswith('/'):
996 path = '/' + path
996 path = '/' + path
997 path = 'file://' + path
997 path = 'file://' + path
998
998
999 ui.status(_('initializing svn working copy %r\n')
999 ui.status(_('initializing svn working copy %r\n')
1000 % os.path.basename(wcpath))
1000 % os.path.basename(wcpath))
1001 self.run0('checkout', path, wcpath)
1001 self.run0('checkout', path, wcpath)
1002
1002
1003 self.wc = wcpath
1003 self.wc = wcpath
1004 self.opener = scmutil.opener(self.wc)
1004 self.opener = scmutil.opener(self.wc)
1005 self.wopener = scmutil.opener(self.wc)
1005 self.wopener = scmutil.opener(self.wc)
1006 self.childmap = mapfile(ui, self.join('hg-childmap'))
1006 self.childmap = mapfile(ui, self.join('hg-childmap'))
1007 self.is_exec = util.checkexec(self.wc) and util.is_exec or None
1007 self.is_exec = util.checkexec(self.wc) and util.is_exec or None
1008
1008
1009 if created:
1009 if created:
1010 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1010 hook = os.path.join(created, 'hooks', 'pre-revprop-change')
1011 fp = open(hook, 'w')
1011 fp = open(hook, 'w')
1012 fp.write(pre_revprop_change)
1012 fp.write(pre_revprop_change)
1013 fp.close()
1013 fp.close()
1014 util.set_flags(hook, False, True)
1014 util.setflags(hook, False, True)
1015
1015
1016 output = self.run0('info')
1016 output = self.run0('info')
1017 self.uuid = self.uuid_re.search(output).group(1).strip()
1017 self.uuid = self.uuid_re.search(output).group(1).strip()
1018
1018
1019 def wjoin(self, *names):
1019 def wjoin(self, *names):
1020 return os.path.join(self.wc, *names)
1020 return os.path.join(self.wc, *names)
1021
1021
1022 def putfile(self, filename, flags, data):
1022 def putfile(self, filename, flags, data):
1023 if 'l' in flags:
1023 if 'l' in flags:
1024 self.wopener.symlink(data, filename)
1024 self.wopener.symlink(data, filename)
1025 else:
1025 else:
1026 try:
1026 try:
1027 if os.path.islink(self.wjoin(filename)):
1027 if os.path.islink(self.wjoin(filename)):
1028 os.unlink(filename)
1028 os.unlink(filename)
1029 except OSError:
1029 except OSError:
1030 pass
1030 pass
1031 self.wopener.write(filename, data)
1031 self.wopener.write(filename, data)
1032
1032
1033 if self.is_exec:
1033 if self.is_exec:
1034 was_exec = self.is_exec(self.wjoin(filename))
1034 was_exec = self.is_exec(self.wjoin(filename))
1035 else:
1035 else:
1036 # On filesystems not supporting execute-bit, there is no way
1036 # On filesystems not supporting execute-bit, there is no way
1037 # to know if it is set but asking subversion. Setting it
1037 # to know if it is set but asking subversion. Setting it
1038 # systematically is just as expensive and much simpler.
1038 # systematically is just as expensive and much simpler.
1039 was_exec = 'x' not in flags
1039 was_exec = 'x' not in flags
1040
1040
1041 util.set_flags(self.wjoin(filename), False, 'x' in flags)
1041 util.setflags(self.wjoin(filename), False, 'x' in flags)
1042 if was_exec:
1042 if was_exec:
1043 if 'x' not in flags:
1043 if 'x' not in flags:
1044 self.delexec.append(filename)
1044 self.delexec.append(filename)
1045 else:
1045 else:
1046 if 'x' in flags:
1046 if 'x' in flags:
1047 self.setexec.append(filename)
1047 self.setexec.append(filename)
1048
1048
1049 def _copyfile(self, source, dest):
1049 def _copyfile(self, source, dest):
1050 # SVN's copy command pukes if the destination file exists, but
1050 # SVN's copy command pukes if the destination file exists, but
1051 # our copyfile method expects to record a copy that has
1051 # our copyfile method expects to record a copy that has
1052 # already occurred. Cross the semantic gap.
1052 # already occurred. Cross the semantic gap.
1053 wdest = self.wjoin(dest)
1053 wdest = self.wjoin(dest)
1054 exists = os.path.lexists(wdest)
1054 exists = os.path.lexists(wdest)
1055 if exists:
1055 if exists:
1056 fd, tempname = tempfile.mkstemp(
1056 fd, tempname = tempfile.mkstemp(
1057 prefix='hg-copy-', dir=os.path.dirname(wdest))
1057 prefix='hg-copy-', dir=os.path.dirname(wdest))
1058 os.close(fd)
1058 os.close(fd)
1059 os.unlink(tempname)
1059 os.unlink(tempname)
1060 os.rename(wdest, tempname)
1060 os.rename(wdest, tempname)
1061 try:
1061 try:
1062 self.run0('copy', source, dest)
1062 self.run0('copy', source, dest)
1063 finally:
1063 finally:
1064 if exists:
1064 if exists:
1065 try:
1065 try:
1066 os.unlink(wdest)
1066 os.unlink(wdest)
1067 except OSError:
1067 except OSError:
1068 pass
1068 pass
1069 os.rename(tempname, wdest)
1069 os.rename(tempname, wdest)
1070
1070
1071 def dirs_of(self, files):
1071 def dirs_of(self, files):
1072 dirs = set()
1072 dirs = set()
1073 for f in files:
1073 for f in files:
1074 if os.path.isdir(self.wjoin(f)):
1074 if os.path.isdir(self.wjoin(f)):
1075 dirs.add(f)
1075 dirs.add(f)
1076 for i in strutil.rfindall(f, '/'):
1076 for i in strutil.rfindall(f, '/'):
1077 dirs.add(f[:i])
1077 dirs.add(f[:i])
1078 return dirs
1078 return dirs
1079
1079
1080 def add_dirs(self, files):
1080 def add_dirs(self, files):
1081 add_dirs = [d for d in sorted(self.dirs_of(files))
1081 add_dirs = [d for d in sorted(self.dirs_of(files))
1082 if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
1082 if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
1083 if add_dirs:
1083 if add_dirs:
1084 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1084 self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
1085 return add_dirs
1085 return add_dirs
1086
1086
1087 def add_files(self, files):
1087 def add_files(self, files):
1088 if files:
1088 if files:
1089 self.xargs(files, 'add', quiet=True)
1089 self.xargs(files, 'add', quiet=True)
1090 return files
1090 return files
1091
1091
1092 def tidy_dirs(self, names):
1092 def tidy_dirs(self, names):
1093 deleted = []
1093 deleted = []
1094 for d in sorted(self.dirs_of(names), reverse=True):
1094 for d in sorted(self.dirs_of(names), reverse=True):
1095 wd = self.wjoin(d)
1095 wd = self.wjoin(d)
1096 if os.listdir(wd) == '.svn':
1096 if os.listdir(wd) == '.svn':
1097 self.run0('delete', d)
1097 self.run0('delete', d)
1098 deleted.append(d)
1098 deleted.append(d)
1099 return deleted
1099 return deleted
1100
1100
1101 def addchild(self, parent, child):
1101 def addchild(self, parent, child):
1102 self.childmap[parent] = child
1102 self.childmap[parent] = child
1103
1103
1104 def revid(self, rev):
1104 def revid(self, rev):
1105 return u"svn:%s@%s" % (self.uuid, rev)
1105 return u"svn:%s@%s" % (self.uuid, rev)
1106
1106
1107 def putcommit(self, files, copies, parents, commit, source, revmap):
1107 def putcommit(self, files, copies, parents, commit, source, revmap):
1108 # Apply changes to working copy
1108 # Apply changes to working copy
1109 for f, v in files:
1109 for f, v in files:
1110 try:
1110 try:
1111 data, mode = source.getfile(f, v)
1111 data, mode = source.getfile(f, v)
1112 except IOError:
1112 except IOError:
1113 self.delete.append(f)
1113 self.delete.append(f)
1114 else:
1114 else:
1115 self.putfile(f, mode, data)
1115 self.putfile(f, mode, data)
1116 if f in copies:
1116 if f in copies:
1117 self.copies.append([copies[f], f])
1117 self.copies.append([copies[f], f])
1118 files = [f[0] for f in files]
1118 files = [f[0] for f in files]
1119
1119
1120 for parent in parents:
1120 for parent in parents:
1121 try:
1121 try:
1122 return self.revid(self.childmap[parent])
1122 return self.revid(self.childmap[parent])
1123 except KeyError:
1123 except KeyError:
1124 pass
1124 pass
1125 entries = set(self.delete)
1125 entries = set(self.delete)
1126 files = frozenset(files)
1126 files = frozenset(files)
1127 entries.update(self.add_dirs(files.difference(entries)))
1127 entries.update(self.add_dirs(files.difference(entries)))
1128 if self.copies:
1128 if self.copies:
1129 for s, d in self.copies:
1129 for s, d in self.copies:
1130 self._copyfile(s, d)
1130 self._copyfile(s, d)
1131 self.copies = []
1131 self.copies = []
1132 if self.delete:
1132 if self.delete:
1133 self.xargs(self.delete, 'delete')
1133 self.xargs(self.delete, 'delete')
1134 self.delete = []
1134 self.delete = []
1135 entries.update(self.add_files(files.difference(entries)))
1135 entries.update(self.add_files(files.difference(entries)))
1136 entries.update(self.tidy_dirs(entries))
1136 entries.update(self.tidy_dirs(entries))
1137 if self.delexec:
1137 if self.delexec:
1138 self.xargs(self.delexec, 'propdel', 'svn:executable')
1138 self.xargs(self.delexec, 'propdel', 'svn:executable')
1139 self.delexec = []
1139 self.delexec = []
1140 if self.setexec:
1140 if self.setexec:
1141 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1141 self.xargs(self.setexec, 'propset', 'svn:executable', '*')
1142 self.setexec = []
1142 self.setexec = []
1143
1143
1144 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1144 fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
1145 fp = os.fdopen(fd, 'w')
1145 fp = os.fdopen(fd, 'w')
1146 fp.write(commit.desc)
1146 fp.write(commit.desc)
1147 fp.close()
1147 fp.close()
1148 try:
1148 try:
1149 output = self.run0('commit',
1149 output = self.run0('commit',
1150 username=util.shortuser(commit.author),
1150 username=util.shortuser(commit.author),
1151 file=messagefile,
1151 file=messagefile,
1152 encoding='utf-8')
1152 encoding='utf-8')
1153 try:
1153 try:
1154 rev = self.commit_re.search(output).group(1)
1154 rev = self.commit_re.search(output).group(1)
1155 except AttributeError:
1155 except AttributeError:
1156 if not files:
1156 if not files:
1157 return parents[0]
1157 return parents[0]
1158 self.ui.warn(_('unexpected svn output:\n'))
1158 self.ui.warn(_('unexpected svn output:\n'))
1159 self.ui.warn(output)
1159 self.ui.warn(output)
1160 raise util.Abort(_('unable to cope with svn output'))
1160 raise util.Abort(_('unable to cope with svn output'))
1161 if commit.rev:
1161 if commit.rev:
1162 self.run('propset', 'hg:convert-rev', commit.rev,
1162 self.run('propset', 'hg:convert-rev', commit.rev,
1163 revprop=True, revision=rev)
1163 revprop=True, revision=rev)
1164 if commit.branch and commit.branch != 'default':
1164 if commit.branch and commit.branch != 'default':
1165 self.run('propset', 'hg:convert-branch', commit.branch,
1165 self.run('propset', 'hg:convert-branch', commit.branch,
1166 revprop=True, revision=rev)
1166 revprop=True, revision=rev)
1167 for parent in parents:
1167 for parent in parents:
1168 self.addchild(parent, rev)
1168 self.addchild(parent, rev)
1169 return self.revid(rev)
1169 return self.revid(rev)
1170 finally:
1170 finally:
1171 os.unlink(messagefile)
1171 os.unlink(messagefile)
1172
1172
1173 def puttags(self, tags):
1173 def puttags(self, tags):
1174 self.ui.warn(_('writing Subversion tags is not yet implemented\n'))
1174 self.ui.warn(_('writing Subversion tags is not yet implemented\n'))
1175 return None, None
1175 return None, None
@@ -1,328 +1,328 b''
1 # extdiff.py - external diff program support for mercurial
1 # extdiff.py - external diff program support for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to allow external programs to compare revisions
8 '''command to allow external programs to compare revisions
9
9
10 The extdiff Mercurial extension allows you to use external programs
10 The extdiff Mercurial extension allows you to use external programs
11 to compare revisions, or revision with working directory. The external
11 to compare revisions, or revision with working directory. The external
12 diff programs are called with a configurable set of options and two
12 diff programs are called with a configurable set of options and two
13 non-option arguments: paths to directories containing snapshots of
13 non-option arguments: paths to directories containing snapshots of
14 files to compare.
14 files to compare.
15
15
16 The extdiff extension also allows to configure new diff commands, so
16 The extdiff extension also allows to configure new diff commands, so
17 you do not need to type :hg:`extdiff -p kdiff3` always. ::
17 you do not need to type :hg:`extdiff -p kdiff3` always. ::
18
18
19 [extdiff]
19 [extdiff]
20 # add new command that runs GNU diff(1) in 'context diff' mode
20 # add new command that runs GNU diff(1) in 'context diff' mode
21 cdiff = gdiff -Nprc5
21 cdiff = gdiff -Nprc5
22 ## or the old way:
22 ## or the old way:
23 #cmd.cdiff = gdiff
23 #cmd.cdiff = gdiff
24 #opts.cdiff = -Nprc5
24 #opts.cdiff = -Nprc5
25
25
26 # add new command called vdiff, runs kdiff3
26 # add new command called vdiff, runs kdiff3
27 vdiff = kdiff3
27 vdiff = kdiff3
28
28
29 # add new command called meld, runs meld (no need to name twice)
29 # add new command called meld, runs meld (no need to name twice)
30 meld =
30 meld =
31
31
32 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
32 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
33 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
33 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
34 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
34 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
35 # your .vimrc
35 # your .vimrc
36 vimdiff = gvim -f '+next' '+execute "DirDiff" argv(0) argv(1)'
36 vimdiff = gvim -f '+next' '+execute "DirDiff" argv(0) argv(1)'
37
37
38 Tool arguments can include variables that are expanded at runtime::
38 Tool arguments can include variables that are expanded at runtime::
39
39
40 $parent1, $plabel1 - filename, descriptive label of first parent
40 $parent1, $plabel1 - filename, descriptive label of first parent
41 $child, $clabel - filename, descriptive label of child revision
41 $child, $clabel - filename, descriptive label of child revision
42 $parent2, $plabel2 - filename, descriptive label of second parent
42 $parent2, $plabel2 - filename, descriptive label of second parent
43 $root - repository root
43 $root - repository root
44 $parent is an alias for $parent1.
44 $parent is an alias for $parent1.
45
45
46 The extdiff extension will look in your [diff-tools] and [merge-tools]
46 The extdiff extension will look in your [diff-tools] and [merge-tools]
47 sections for diff tool arguments, when none are specified in [extdiff].
47 sections for diff tool arguments, when none are specified in [extdiff].
48
48
49 ::
49 ::
50
50
51 [extdiff]
51 [extdiff]
52 kdiff3 =
52 kdiff3 =
53
53
54 [diff-tools]
54 [diff-tools]
55 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
55 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
56
56
57 You can use -I/-X and list of file or directory names like normal
57 You can use -I/-X and list of file or directory names like normal
58 :hg:`diff` command. The extdiff extension makes snapshots of only
58 :hg:`diff` command. The extdiff extension makes snapshots of only
59 needed files, so running the external diff program will actually be
59 needed files, so running the external diff program will actually be
60 pretty fast (at least faster than having to compare the entire tree).
60 pretty fast (at least faster than having to compare the entire tree).
61 '''
61 '''
62
62
63 from mercurial.i18n import _
63 from mercurial.i18n import _
64 from mercurial.node import short, nullid
64 from mercurial.node import short, nullid
65 from mercurial import cmdutil, scmutil, util, commands, encoding
65 from mercurial import cmdutil, scmutil, util, commands, encoding
66 import os, shlex, shutil, tempfile, re
66 import os, shlex, shutil, tempfile, re
67
67
68 def snapshot(ui, repo, files, node, tmproot):
68 def snapshot(ui, repo, files, node, tmproot):
69 '''snapshot files as of some revision
69 '''snapshot files as of some revision
70 if not using snapshot, -I/-X does not work and recursive diff
70 if not using snapshot, -I/-X does not work and recursive diff
71 in tools like kdiff3 and meld displays too many files.'''
71 in tools like kdiff3 and meld displays too many files.'''
72 dirname = os.path.basename(repo.root)
72 dirname = os.path.basename(repo.root)
73 if dirname == "":
73 if dirname == "":
74 dirname = "root"
74 dirname = "root"
75 if node is not None:
75 if node is not None:
76 dirname = '%s.%s' % (dirname, short(node))
76 dirname = '%s.%s' % (dirname, short(node))
77 base = os.path.join(tmproot, dirname)
77 base = os.path.join(tmproot, dirname)
78 os.mkdir(base)
78 os.mkdir(base)
79 if node is not None:
79 if node is not None:
80 ui.note(_('making snapshot of %d files from rev %s\n') %
80 ui.note(_('making snapshot of %d files from rev %s\n') %
81 (len(files), short(node)))
81 (len(files), short(node)))
82 else:
82 else:
83 ui.note(_('making snapshot of %d files from working directory\n') %
83 ui.note(_('making snapshot of %d files from working directory\n') %
84 (len(files)))
84 (len(files)))
85 wopener = scmutil.opener(base)
85 wopener = scmutil.opener(base)
86 fns_and_mtime = []
86 fns_and_mtime = []
87 ctx = repo[node]
87 ctx = repo[node]
88 for fn in files:
88 for fn in files:
89 wfn = util.pconvert(fn)
89 wfn = util.pconvert(fn)
90 if not wfn in ctx:
90 if not wfn in ctx:
91 # File doesn't exist; could be a bogus modify
91 # File doesn't exist; could be a bogus modify
92 continue
92 continue
93 ui.note(' %s\n' % wfn)
93 ui.note(' %s\n' % wfn)
94 dest = os.path.join(base, wfn)
94 dest = os.path.join(base, wfn)
95 fctx = ctx[wfn]
95 fctx = ctx[wfn]
96 data = repo.wwritedata(wfn, fctx.data())
96 data = repo.wwritedata(wfn, fctx.data())
97 if 'l' in fctx.flags():
97 if 'l' in fctx.flags():
98 wopener.symlink(data, wfn)
98 wopener.symlink(data, wfn)
99 else:
99 else:
100 wopener.write(wfn, data)
100 wopener.write(wfn, data)
101 if 'x' in fctx.flags():
101 if 'x' in fctx.flags():
102 util.set_flags(dest, False, True)
102 util.setflags(dest, False, True)
103 if node is None:
103 if node is None:
104 fns_and_mtime.append((dest, repo.wjoin(fn),
104 fns_and_mtime.append((dest, repo.wjoin(fn),
105 os.lstat(dest).st_mtime))
105 os.lstat(dest).st_mtime))
106 return dirname, fns_and_mtime
106 return dirname, fns_and_mtime
107
107
108 def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
108 def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
109 '''Do the actuall diff:
109 '''Do the actuall diff:
110
110
111 - copy to a temp structure if diffing 2 internal revisions
111 - copy to a temp structure if diffing 2 internal revisions
112 - copy to a temp structure if diffing working revision with
112 - copy to a temp structure if diffing working revision with
113 another one and more than 1 file is changed
113 another one and more than 1 file is changed
114 - just invoke the diff for a single file in the working dir
114 - just invoke the diff for a single file in the working dir
115 '''
115 '''
116
116
117 revs = opts.get('rev')
117 revs = opts.get('rev')
118 change = opts.get('change')
118 change = opts.get('change')
119 args = ' '.join(diffopts)
119 args = ' '.join(diffopts)
120 do3way = '$parent2' in args
120 do3way = '$parent2' in args
121
121
122 if revs and change:
122 if revs and change:
123 msg = _('cannot specify --rev and --change at the same time')
123 msg = _('cannot specify --rev and --change at the same time')
124 raise util.Abort(msg)
124 raise util.Abort(msg)
125 elif change:
125 elif change:
126 node2 = cmdutil.revsingle(repo, change, None).node()
126 node2 = cmdutil.revsingle(repo, change, None).node()
127 node1a, node1b = repo.changelog.parents(node2)
127 node1a, node1b = repo.changelog.parents(node2)
128 else:
128 else:
129 node1a, node2 = cmdutil.revpair(repo, revs)
129 node1a, node2 = cmdutil.revpair(repo, revs)
130 if not revs:
130 if not revs:
131 node1b = repo.dirstate.p2()
131 node1b = repo.dirstate.p2()
132 else:
132 else:
133 node1b = nullid
133 node1b = nullid
134
134
135 # Disable 3-way merge if there is only one parent
135 # Disable 3-way merge if there is only one parent
136 if do3way:
136 if do3way:
137 if node1b == nullid:
137 if node1b == nullid:
138 do3way = False
138 do3way = False
139
139
140 matcher = cmdutil.match(repo, pats, opts)
140 matcher = cmdutil.match(repo, pats, opts)
141 mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3])
141 mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3])
142 if do3way:
142 if do3way:
143 mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3])
143 mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3])
144 else:
144 else:
145 mod_b, add_b, rem_b = set(), set(), set()
145 mod_b, add_b, rem_b = set(), set(), set()
146 modadd = mod_a | add_a | mod_b | add_b
146 modadd = mod_a | add_a | mod_b | add_b
147 common = modadd | rem_a | rem_b
147 common = modadd | rem_a | rem_b
148 if not common:
148 if not common:
149 return 0
149 return 0
150
150
151 tmproot = tempfile.mkdtemp(prefix='extdiff.')
151 tmproot = tempfile.mkdtemp(prefix='extdiff.')
152 try:
152 try:
153 # Always make a copy of node1a (and node1b, if applicable)
153 # Always make a copy of node1a (and node1b, if applicable)
154 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
154 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
155 dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0]
155 dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0]
156 rev1a = '@%d' % repo[node1a].rev()
156 rev1a = '@%d' % repo[node1a].rev()
157 if do3way:
157 if do3way:
158 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
158 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
159 dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0]
159 dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0]
160 rev1b = '@%d' % repo[node1b].rev()
160 rev1b = '@%d' % repo[node1b].rev()
161 else:
161 else:
162 dir1b = None
162 dir1b = None
163 rev1b = ''
163 rev1b = ''
164
164
165 fns_and_mtime = []
165 fns_and_mtime = []
166
166
167 # If node2 in not the wc or there is >1 change, copy it
167 # If node2 in not the wc or there is >1 change, copy it
168 dir2root = ''
168 dir2root = ''
169 rev2 = ''
169 rev2 = ''
170 if node2:
170 if node2:
171 dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0]
171 dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0]
172 rev2 = '@%d' % repo[node2].rev()
172 rev2 = '@%d' % repo[node2].rev()
173 elif len(common) > 1:
173 elif len(common) > 1:
174 #we only actually need to get the files to copy back to
174 #we only actually need to get the files to copy back to
175 #the working dir in this case (because the other cases
175 #the working dir in this case (because the other cases
176 #are: diffing 2 revisions or single file -- in which case
176 #are: diffing 2 revisions or single file -- in which case
177 #the file is already directly passed to the diff tool).
177 #the file is already directly passed to the diff tool).
178 dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot)
178 dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot)
179 else:
179 else:
180 # This lets the diff tool open the changed file directly
180 # This lets the diff tool open the changed file directly
181 dir2 = ''
181 dir2 = ''
182 dir2root = repo.root
182 dir2root = repo.root
183
183
184 label1a = rev1a
184 label1a = rev1a
185 label1b = rev1b
185 label1b = rev1b
186 label2 = rev2
186 label2 = rev2
187
187
188 # If only one change, diff the files instead of the directories
188 # If only one change, diff the files instead of the directories
189 # Handle bogus modifies correctly by checking if the files exist
189 # Handle bogus modifies correctly by checking if the files exist
190 if len(common) == 1:
190 if len(common) == 1:
191 common_file = util.localpath(common.pop())
191 common_file = util.localpath(common.pop())
192 dir1a = os.path.join(tmproot, dir1a, common_file)
192 dir1a = os.path.join(tmproot, dir1a, common_file)
193 label1a = common_file + rev1a
193 label1a = common_file + rev1a
194 if not os.path.isfile(dir1a):
194 if not os.path.isfile(dir1a):
195 dir1a = os.devnull
195 dir1a = os.devnull
196 if do3way:
196 if do3way:
197 dir1b = os.path.join(tmproot, dir1b, common_file)
197 dir1b = os.path.join(tmproot, dir1b, common_file)
198 label1b = common_file + rev1b
198 label1b = common_file + rev1b
199 if not os.path.isfile(dir1b):
199 if not os.path.isfile(dir1b):
200 dir1b = os.devnull
200 dir1b = os.devnull
201 dir2 = os.path.join(dir2root, dir2, common_file)
201 dir2 = os.path.join(dir2root, dir2, common_file)
202 label2 = common_file + rev2
202 label2 = common_file + rev2
203
203
204 # Function to quote file/dir names in the argument string.
204 # Function to quote file/dir names in the argument string.
205 # When not operating in 3-way mode, an empty string is
205 # When not operating in 3-way mode, an empty string is
206 # returned for parent2
206 # returned for parent2
207 replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b,
207 replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b,
208 plabel1=label1a, plabel2=label1b,
208 plabel1=label1a, plabel2=label1b,
209 clabel=label2, child=dir2,
209 clabel=label2, child=dir2,
210 root=repo.root)
210 root=repo.root)
211 def quote(match):
211 def quote(match):
212 key = match.group()[1:]
212 key = match.group()[1:]
213 if not do3way and key == 'parent2':
213 if not do3way and key == 'parent2':
214 return ''
214 return ''
215 return util.shellquote(replace[key])
215 return util.shellquote(replace[key])
216
216
217 # Match parent2 first, so 'parent1?' will match both parent1 and parent
217 # Match parent2 first, so 'parent1?' will match both parent1 and parent
218 regex = '\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)'
218 regex = '\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)'
219 if not do3way and not re.search(regex, args):
219 if not do3way and not re.search(regex, args):
220 args += ' $parent1 $child'
220 args += ' $parent1 $child'
221 args = re.sub(regex, quote, args)
221 args = re.sub(regex, quote, args)
222 cmdline = util.shellquote(diffcmd) + ' ' + args
222 cmdline = util.shellquote(diffcmd) + ' ' + args
223
223
224 ui.debug('running %r in %s\n' % (cmdline, tmproot))
224 ui.debug('running %r in %s\n' % (cmdline, tmproot))
225 util.system(cmdline, cwd=tmproot)
225 util.system(cmdline, cwd=tmproot)
226
226
227 for copy_fn, working_fn, mtime in fns_and_mtime:
227 for copy_fn, working_fn, mtime in fns_and_mtime:
228 if os.lstat(copy_fn).st_mtime != mtime:
228 if os.lstat(copy_fn).st_mtime != mtime:
229 ui.debug('file changed while diffing. '
229 ui.debug('file changed while diffing. '
230 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
230 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
231 util.copyfile(copy_fn, working_fn)
231 util.copyfile(copy_fn, working_fn)
232
232
233 return 1
233 return 1
234 finally:
234 finally:
235 ui.note(_('cleaning up temp directory\n'))
235 ui.note(_('cleaning up temp directory\n'))
236 shutil.rmtree(tmproot)
236 shutil.rmtree(tmproot)
237
237
238 def extdiff(ui, repo, *pats, **opts):
238 def extdiff(ui, repo, *pats, **opts):
239 '''use external program to diff repository (or selected files)
239 '''use external program to diff repository (or selected files)
240
240
241 Show differences between revisions for the specified files, using
241 Show differences between revisions for the specified files, using
242 an external program. The default program used is diff, with
242 an external program. The default program used is diff, with
243 default options "-Npru".
243 default options "-Npru".
244
244
245 To select a different program, use the -p/--program option. The
245 To select a different program, use the -p/--program option. The
246 program will be passed the names of two directories to compare. To
246 program will be passed the names of two directories to compare. To
247 pass additional options to the program, use -o/--option. These
247 pass additional options to the program, use -o/--option. These
248 will be passed before the names of the directories to compare.
248 will be passed before the names of the directories to compare.
249
249
250 When two revision arguments are given, then changes are shown
250 When two revision arguments are given, then changes are shown
251 between those revisions. If only one revision is specified then
251 between those revisions. If only one revision is specified then
252 that revision is compared to the working directory, and, when no
252 that revision is compared to the working directory, and, when no
253 revisions are specified, the working directory files are compared
253 revisions are specified, the working directory files are compared
254 to its parent.'''
254 to its parent.'''
255 program = opts.get('program')
255 program = opts.get('program')
256 option = opts.get('option')
256 option = opts.get('option')
257 if not program:
257 if not program:
258 program = 'diff'
258 program = 'diff'
259 option = option or ['-Npru']
259 option = option or ['-Npru']
260 return dodiff(ui, repo, program, option, pats, opts)
260 return dodiff(ui, repo, program, option, pats, opts)
261
261
262 cmdtable = {
262 cmdtable = {
263 "extdiff":
263 "extdiff":
264 (extdiff,
264 (extdiff,
265 [('p', 'program', '',
265 [('p', 'program', '',
266 _('comparison program to run'), _('CMD')),
266 _('comparison program to run'), _('CMD')),
267 ('o', 'option', [],
267 ('o', 'option', [],
268 _('pass option to comparison program'), _('OPT')),
268 _('pass option to comparison program'), _('OPT')),
269 ('r', 'rev', [],
269 ('r', 'rev', [],
270 _('revision'), _('REV')),
270 _('revision'), _('REV')),
271 ('c', 'change', '',
271 ('c', 'change', '',
272 _('change made by revision'), _('REV')),
272 _('change made by revision'), _('REV')),
273 ] + commands.walkopts,
273 ] + commands.walkopts,
274 _('hg extdiff [OPT]... [FILE]...')),
274 _('hg extdiff [OPT]... [FILE]...')),
275 }
275 }
276
276
277 def uisetup(ui):
277 def uisetup(ui):
278 for cmd, path in ui.configitems('extdiff'):
278 for cmd, path in ui.configitems('extdiff'):
279 if cmd.startswith('cmd.'):
279 if cmd.startswith('cmd.'):
280 cmd = cmd[4:]
280 cmd = cmd[4:]
281 if not path:
281 if not path:
282 path = cmd
282 path = cmd
283 diffopts = ui.config('extdiff', 'opts.' + cmd, '')
283 diffopts = ui.config('extdiff', 'opts.' + cmd, '')
284 diffopts = diffopts and [diffopts] or []
284 diffopts = diffopts and [diffopts] or []
285 elif cmd.startswith('opts.'):
285 elif cmd.startswith('opts.'):
286 continue
286 continue
287 else:
287 else:
288 # command = path opts
288 # command = path opts
289 if path:
289 if path:
290 diffopts = shlex.split(path)
290 diffopts = shlex.split(path)
291 path = diffopts.pop(0)
291 path = diffopts.pop(0)
292 else:
292 else:
293 path, diffopts = cmd, []
293 path, diffopts = cmd, []
294 # look for diff arguments in [diff-tools] then [merge-tools]
294 # look for diff arguments in [diff-tools] then [merge-tools]
295 if diffopts == []:
295 if diffopts == []:
296 args = ui.config('diff-tools', cmd+'.diffargs') or \
296 args = ui.config('diff-tools', cmd+'.diffargs') or \
297 ui.config('merge-tools', cmd+'.diffargs')
297 ui.config('merge-tools', cmd+'.diffargs')
298 if args:
298 if args:
299 diffopts = shlex.split(args)
299 diffopts = shlex.split(args)
300 def save(cmd, path, diffopts):
300 def save(cmd, path, diffopts):
301 '''use closure to save diff command to use'''
301 '''use closure to save diff command to use'''
302 def mydiff(ui, repo, *pats, **opts):
302 def mydiff(ui, repo, *pats, **opts):
303 return dodiff(ui, repo, path, diffopts + opts['option'],
303 return dodiff(ui, repo, path, diffopts + opts['option'],
304 pats, opts)
304 pats, opts)
305 doc = _('''\
305 doc = _('''\
306 use %(path)s to diff repository (or selected files)
306 use %(path)s to diff repository (or selected files)
307
307
308 Show differences between revisions for the specified files, using
308 Show differences between revisions for the specified files, using
309 the %(path)s program.
309 the %(path)s program.
310
310
311 When two revision arguments are given, then changes are shown
311 When two revision arguments are given, then changes are shown
312 between those revisions. If only one revision is specified then
312 between those revisions. If only one revision is specified then
313 that revision is compared to the working directory, and, when no
313 that revision is compared to the working directory, and, when no
314 revisions are specified, the working directory files are compared
314 revisions are specified, the working directory files are compared
315 to its parent.\
315 to its parent.\
316 ''') % dict(path=util.uirepr(path))
316 ''') % dict(path=util.uirepr(path))
317
317
318 # We must translate the docstring right away since it is
318 # We must translate the docstring right away since it is
319 # used as a format string. The string will unfortunately
319 # used as a format string. The string will unfortunately
320 # be translated again in commands.helpcmd and this will
320 # be translated again in commands.helpcmd and this will
321 # fail when the docstring contains non-ASCII characters.
321 # fail when the docstring contains non-ASCII characters.
322 # Decoding the string to a Unicode string here (using the
322 # Decoding the string to a Unicode string here (using the
323 # right encoding) prevents that.
323 # right encoding) prevents that.
324 mydiff.__doc__ = doc.decode(encoding.encoding)
324 mydiff.__doc__ = doc.decode(encoding.encoding)
325 return mydiff
325 return mydiff
326 cmdtable[cmd] = (save(cmd, path, diffopts),
326 cmdtable[cmd] = (save(cmd, path, diffopts),
327 cmdtable['extdiff'][1][1:],
327 cmdtable['extdiff'][1][1:],
328 _('hg %s [OPTION]... [FILE]...') % cmd)
328 _('hg %s [OPTION]... [FILE]...') % cmd)
@@ -1,1397 +1,1397 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, glob, tempfile
10 import os, sys, errno, re, glob, tempfile
11 import util, scmutil, templater, patch, error, templatekw
11 import util, scmutil, templater, patch, error, templatekw
12 import match as matchmod
12 import match as matchmod
13 import similar, revset, subrepo
13 import similar, revset, subrepo
14
14
15 revrangesep = ':'
15 revrangesep = ':'
16
16
17 def parsealiases(cmd):
17 def parsealiases(cmd):
18 return cmd.lstrip("^").split("|")
18 return cmd.lstrip("^").split("|")
19
19
20 def findpossible(cmd, table, strict=False):
20 def findpossible(cmd, table, strict=False):
21 """
21 """
22 Return cmd -> (aliases, command table entry)
22 Return cmd -> (aliases, command table entry)
23 for each matching command.
23 for each matching command.
24 Return debug commands (or their aliases) only if no normal command matches.
24 Return debug commands (or their aliases) only if no normal command matches.
25 """
25 """
26 choice = {}
26 choice = {}
27 debugchoice = {}
27 debugchoice = {}
28 for e in table.keys():
28 for e in table.keys():
29 aliases = parsealiases(e)
29 aliases = parsealiases(e)
30 found = None
30 found = None
31 if cmd in aliases:
31 if cmd in aliases:
32 found = cmd
32 found = cmd
33 elif not strict:
33 elif not strict:
34 for a in aliases:
34 for a in aliases:
35 if a.startswith(cmd):
35 if a.startswith(cmd):
36 found = a
36 found = a
37 break
37 break
38 if found is not None:
38 if found is not None:
39 if aliases[0].startswith("debug") or found.startswith("debug"):
39 if aliases[0].startswith("debug") or found.startswith("debug"):
40 debugchoice[found] = (aliases, table[e])
40 debugchoice[found] = (aliases, table[e])
41 else:
41 else:
42 choice[found] = (aliases, table[e])
42 choice[found] = (aliases, table[e])
43
43
44 if not choice and debugchoice:
44 if not choice and debugchoice:
45 choice = debugchoice
45 choice = debugchoice
46
46
47 return choice
47 return choice
48
48
49 def findcmd(cmd, table, strict=True):
49 def findcmd(cmd, table, strict=True):
50 """Return (aliases, command table entry) for command string."""
50 """Return (aliases, command table entry) for command string."""
51 choice = findpossible(cmd, table, strict)
51 choice = findpossible(cmd, table, strict)
52
52
53 if cmd in choice:
53 if cmd in choice:
54 return choice[cmd]
54 return choice[cmd]
55
55
56 if len(choice) > 1:
56 if len(choice) > 1:
57 clist = choice.keys()
57 clist = choice.keys()
58 clist.sort()
58 clist.sort()
59 raise error.AmbiguousCommand(cmd, clist)
59 raise error.AmbiguousCommand(cmd, clist)
60
60
61 if choice:
61 if choice:
62 return choice.values()[0]
62 return choice.values()[0]
63
63
64 raise error.UnknownCommand(cmd)
64 raise error.UnknownCommand(cmd)
65
65
66 def findrepo(p):
66 def findrepo(p):
67 while not os.path.isdir(os.path.join(p, ".hg")):
67 while not os.path.isdir(os.path.join(p, ".hg")):
68 oldp, p = p, os.path.dirname(p)
68 oldp, p = p, os.path.dirname(p)
69 if p == oldp:
69 if p == oldp:
70 return None
70 return None
71
71
72 return p
72 return p
73
73
74 def bail_if_changed(repo):
74 def bail_if_changed(repo):
75 if repo.dirstate.p2() != nullid:
75 if repo.dirstate.p2() != nullid:
76 raise util.Abort(_('outstanding uncommitted merge'))
76 raise util.Abort(_('outstanding uncommitted merge'))
77 modified, added, removed, deleted = repo.status()[:4]
77 modified, added, removed, deleted = repo.status()[:4]
78 if modified or added or removed or deleted:
78 if modified or added or removed or deleted:
79 raise util.Abort(_("outstanding uncommitted changes"))
79 raise util.Abort(_("outstanding uncommitted changes"))
80
80
81 def logmessage(opts):
81 def logmessage(opts):
82 """ get the log message according to -m and -l option """
82 """ get the log message according to -m and -l option """
83 message = opts.get('message')
83 message = opts.get('message')
84 logfile = opts.get('logfile')
84 logfile = opts.get('logfile')
85
85
86 if message and logfile:
86 if message and logfile:
87 raise util.Abort(_('options --message and --logfile are mutually '
87 raise util.Abort(_('options --message and --logfile are mutually '
88 'exclusive'))
88 'exclusive'))
89 if not message and logfile:
89 if not message and logfile:
90 try:
90 try:
91 if logfile == '-':
91 if logfile == '-':
92 message = sys.stdin.read()
92 message = sys.stdin.read()
93 else:
93 else:
94 message = util.readfile(logfile)
94 message = util.readfile(logfile)
95 except IOError, inst:
95 except IOError, inst:
96 raise util.Abort(_("can't read commit message '%s': %s") %
96 raise util.Abort(_("can't read commit message '%s': %s") %
97 (logfile, inst.strerror))
97 (logfile, inst.strerror))
98 return message
98 return message
99
99
100 def loglimit(opts):
100 def loglimit(opts):
101 """get the log limit according to option -l/--limit"""
101 """get the log limit according to option -l/--limit"""
102 limit = opts.get('limit')
102 limit = opts.get('limit')
103 if limit:
103 if limit:
104 try:
104 try:
105 limit = int(limit)
105 limit = int(limit)
106 except ValueError:
106 except ValueError:
107 raise util.Abort(_('limit must be a positive integer'))
107 raise util.Abort(_('limit must be a positive integer'))
108 if limit <= 0:
108 if limit <= 0:
109 raise util.Abort(_('limit must be positive'))
109 raise util.Abort(_('limit must be positive'))
110 else:
110 else:
111 limit = None
111 limit = None
112 return limit
112 return limit
113
113
114 def revsingle(repo, revspec, default='.'):
114 def revsingle(repo, revspec, default='.'):
115 if not revspec:
115 if not revspec:
116 return repo[default]
116 return repo[default]
117
117
118 l = revrange(repo, [revspec])
118 l = revrange(repo, [revspec])
119 if len(l) < 1:
119 if len(l) < 1:
120 raise util.Abort(_('empty revision set'))
120 raise util.Abort(_('empty revision set'))
121 return repo[l[-1]]
121 return repo[l[-1]]
122
122
123 def revpair(repo, revs):
123 def revpair(repo, revs):
124 if not revs:
124 if not revs:
125 return repo.dirstate.p1(), None
125 return repo.dirstate.p1(), None
126
126
127 l = revrange(repo, revs)
127 l = revrange(repo, revs)
128
128
129 if len(l) == 0:
129 if len(l) == 0:
130 return repo.dirstate.p1(), None
130 return repo.dirstate.p1(), None
131
131
132 if len(l) == 1:
132 if len(l) == 1:
133 return repo.lookup(l[0]), None
133 return repo.lookup(l[0]), None
134
134
135 return repo.lookup(l[0]), repo.lookup(l[-1])
135 return repo.lookup(l[0]), repo.lookup(l[-1])
136
136
137 def revrange(repo, revs):
137 def revrange(repo, revs):
138 """Yield revision as strings from a list of revision specifications."""
138 """Yield revision as strings from a list of revision specifications."""
139
139
140 def revfix(repo, val, defval):
140 def revfix(repo, val, defval):
141 if not val and val != 0 and defval is not None:
141 if not val and val != 0 and defval is not None:
142 return defval
142 return defval
143 return repo.changelog.rev(repo.lookup(val))
143 return repo.changelog.rev(repo.lookup(val))
144
144
145 seen, l = set(), []
145 seen, l = set(), []
146 for spec in revs:
146 for spec in revs:
147 # attempt to parse old-style ranges first to deal with
147 # attempt to parse old-style ranges first to deal with
148 # things like old-tag which contain query metacharacters
148 # things like old-tag which contain query metacharacters
149 try:
149 try:
150 if isinstance(spec, int):
150 if isinstance(spec, int):
151 seen.add(spec)
151 seen.add(spec)
152 l.append(spec)
152 l.append(spec)
153 continue
153 continue
154
154
155 if revrangesep in spec:
155 if revrangesep in spec:
156 start, end = spec.split(revrangesep, 1)
156 start, end = spec.split(revrangesep, 1)
157 start = revfix(repo, start, 0)
157 start = revfix(repo, start, 0)
158 end = revfix(repo, end, len(repo) - 1)
158 end = revfix(repo, end, len(repo) - 1)
159 step = start > end and -1 or 1
159 step = start > end and -1 or 1
160 for rev in xrange(start, end + step, step):
160 for rev in xrange(start, end + step, step):
161 if rev in seen:
161 if rev in seen:
162 continue
162 continue
163 seen.add(rev)
163 seen.add(rev)
164 l.append(rev)
164 l.append(rev)
165 continue
165 continue
166 elif spec and spec in repo: # single unquoted rev
166 elif spec and spec in repo: # single unquoted rev
167 rev = revfix(repo, spec, None)
167 rev = revfix(repo, spec, None)
168 if rev in seen:
168 if rev in seen:
169 continue
169 continue
170 seen.add(rev)
170 seen.add(rev)
171 l.append(rev)
171 l.append(rev)
172 continue
172 continue
173 except error.RepoLookupError:
173 except error.RepoLookupError:
174 pass
174 pass
175
175
176 # fall through to new-style queries if old-style fails
176 # fall through to new-style queries if old-style fails
177 m = revset.match(repo.ui, spec)
177 m = revset.match(repo.ui, spec)
178 for r in m(repo, range(len(repo))):
178 for r in m(repo, range(len(repo))):
179 if r not in seen:
179 if r not in seen:
180 l.append(r)
180 l.append(r)
181 seen.update(l)
181 seen.update(l)
182
182
183 return l
183 return l
184
184
185 def make_filename(repo, pat, node,
185 def make_filename(repo, pat, node,
186 total=None, seqno=None, revwidth=None, pathname=None):
186 total=None, seqno=None, revwidth=None, pathname=None):
187 node_expander = {
187 node_expander = {
188 'H': lambda: hex(node),
188 'H': lambda: hex(node),
189 'R': lambda: str(repo.changelog.rev(node)),
189 'R': lambda: str(repo.changelog.rev(node)),
190 'h': lambda: short(node),
190 'h': lambda: short(node),
191 }
191 }
192 expander = {
192 expander = {
193 '%': lambda: '%',
193 '%': lambda: '%',
194 'b': lambda: os.path.basename(repo.root),
194 'b': lambda: os.path.basename(repo.root),
195 }
195 }
196
196
197 try:
197 try:
198 if node:
198 if node:
199 expander.update(node_expander)
199 expander.update(node_expander)
200 if node:
200 if node:
201 expander['r'] = (lambda:
201 expander['r'] = (lambda:
202 str(repo.changelog.rev(node)).zfill(revwidth or 0))
202 str(repo.changelog.rev(node)).zfill(revwidth or 0))
203 if total is not None:
203 if total is not None:
204 expander['N'] = lambda: str(total)
204 expander['N'] = lambda: str(total)
205 if seqno is not None:
205 if seqno is not None:
206 expander['n'] = lambda: str(seqno)
206 expander['n'] = lambda: str(seqno)
207 if total is not None and seqno is not None:
207 if total is not None and seqno is not None:
208 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
208 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
209 if pathname is not None:
209 if pathname is not None:
210 expander['s'] = lambda: os.path.basename(pathname)
210 expander['s'] = lambda: os.path.basename(pathname)
211 expander['d'] = lambda: os.path.dirname(pathname) or '.'
211 expander['d'] = lambda: os.path.dirname(pathname) or '.'
212 expander['p'] = lambda: pathname
212 expander['p'] = lambda: pathname
213
213
214 newname = []
214 newname = []
215 patlen = len(pat)
215 patlen = len(pat)
216 i = 0
216 i = 0
217 while i < patlen:
217 while i < patlen:
218 c = pat[i]
218 c = pat[i]
219 if c == '%':
219 if c == '%':
220 i += 1
220 i += 1
221 c = pat[i]
221 c = pat[i]
222 c = expander[c]()
222 c = expander[c]()
223 newname.append(c)
223 newname.append(c)
224 i += 1
224 i += 1
225 return ''.join(newname)
225 return ''.join(newname)
226 except KeyError, inst:
226 except KeyError, inst:
227 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
227 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
228 inst.args[0])
228 inst.args[0])
229
229
230 def make_file(repo, pat, node=None,
230 def make_file(repo, pat, node=None,
231 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
231 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
232
232
233 writable = mode not in ('r', 'rb')
233 writable = mode not in ('r', 'rb')
234
234
235 if not pat or pat == '-':
235 if not pat or pat == '-':
236 fp = writable and sys.stdout or sys.stdin
236 fp = writable and sys.stdout or sys.stdin
237 return os.fdopen(os.dup(fp.fileno()), mode)
237 return os.fdopen(os.dup(fp.fileno()), mode)
238 if hasattr(pat, 'write') and writable:
238 if hasattr(pat, 'write') and writable:
239 return pat
239 return pat
240 if hasattr(pat, 'read') and 'r' in mode:
240 if hasattr(pat, 'read') and 'r' in mode:
241 return pat
241 return pat
242 return open(make_filename(repo, pat, node, total, seqno, revwidth,
242 return open(make_filename(repo, pat, node, total, seqno, revwidth,
243 pathname),
243 pathname),
244 mode)
244 mode)
245
245
246 def expandpats(pats):
246 def expandpats(pats):
247 if not util.expandglobs:
247 if not util.expandglobs:
248 return list(pats)
248 return list(pats)
249 ret = []
249 ret = []
250 for p in pats:
250 for p in pats:
251 kind, name = matchmod._patsplit(p, None)
251 kind, name = matchmod._patsplit(p, None)
252 if kind is None:
252 if kind is None:
253 try:
253 try:
254 globbed = glob.glob(name)
254 globbed = glob.glob(name)
255 except re.error:
255 except re.error:
256 globbed = [name]
256 globbed = [name]
257 if globbed:
257 if globbed:
258 ret.extend(globbed)
258 ret.extend(globbed)
259 continue
259 continue
260 ret.append(p)
260 ret.append(p)
261 return ret
261 return ret
262
262
263 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
263 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
264 if pats == ("",):
264 if pats == ("",):
265 pats = []
265 pats = []
266 if not globbed and default == 'relpath':
266 if not globbed and default == 'relpath':
267 pats = expandpats(pats or [])
267 pats = expandpats(pats or [])
268 m = matchmod.match(repo.root, repo.getcwd(), pats,
268 m = matchmod.match(repo.root, repo.getcwd(), pats,
269 opts.get('include'), opts.get('exclude'), default,
269 opts.get('include'), opts.get('exclude'), default,
270 auditor=repo.auditor)
270 auditor=repo.auditor)
271 def badfn(f, msg):
271 def badfn(f, msg):
272 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
272 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
273 m.bad = badfn
273 m.bad = badfn
274 return m
274 return m
275
275
276 def matchall(repo):
276 def matchall(repo):
277 return matchmod.always(repo.root, repo.getcwd())
277 return matchmod.always(repo.root, repo.getcwd())
278
278
279 def matchfiles(repo, files):
279 def matchfiles(repo, files):
280 return matchmod.exact(repo.root, repo.getcwd(), files)
280 return matchmod.exact(repo.root, repo.getcwd(), files)
281
281
282 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
282 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
283 if dry_run is None:
283 if dry_run is None:
284 dry_run = opts.get('dry_run')
284 dry_run = opts.get('dry_run')
285 if similarity is None:
285 if similarity is None:
286 similarity = float(opts.get('similarity') or 0)
286 similarity = float(opts.get('similarity') or 0)
287 # we'd use status here, except handling of symlinks and ignore is tricky
287 # we'd use status here, except handling of symlinks and ignore is tricky
288 added, unknown, deleted, removed = [], [], [], []
288 added, unknown, deleted, removed = [], [], [], []
289 audit_path = scmutil.pathauditor(repo.root)
289 audit_path = scmutil.pathauditor(repo.root)
290 m = match(repo, pats, opts)
290 m = match(repo, pats, opts)
291 for abs in repo.walk(m):
291 for abs in repo.walk(m):
292 target = repo.wjoin(abs)
292 target = repo.wjoin(abs)
293 good = True
293 good = True
294 try:
294 try:
295 audit_path(abs)
295 audit_path(abs)
296 except (OSError, util.Abort):
296 except (OSError, util.Abort):
297 good = False
297 good = False
298 rel = m.rel(abs)
298 rel = m.rel(abs)
299 exact = m.exact(abs)
299 exact = m.exact(abs)
300 if good and abs not in repo.dirstate:
300 if good and abs not in repo.dirstate:
301 unknown.append(abs)
301 unknown.append(abs)
302 if repo.ui.verbose or not exact:
302 if repo.ui.verbose or not exact:
303 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
303 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
304 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
304 elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
305 or (os.path.isdir(target) and not os.path.islink(target))):
305 or (os.path.isdir(target) and not os.path.islink(target))):
306 deleted.append(abs)
306 deleted.append(abs)
307 if repo.ui.verbose or not exact:
307 if repo.ui.verbose or not exact:
308 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
308 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
309 # for finding renames
309 # for finding renames
310 elif repo.dirstate[abs] == 'r':
310 elif repo.dirstate[abs] == 'r':
311 removed.append(abs)
311 removed.append(abs)
312 elif repo.dirstate[abs] == 'a':
312 elif repo.dirstate[abs] == 'a':
313 added.append(abs)
313 added.append(abs)
314 copies = {}
314 copies = {}
315 if similarity > 0:
315 if similarity > 0:
316 for old, new, score in similar.findrenames(repo,
316 for old, new, score in similar.findrenames(repo,
317 added + unknown, removed + deleted, similarity):
317 added + unknown, removed + deleted, similarity):
318 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
318 if repo.ui.verbose or not m.exact(old) or not m.exact(new):
319 repo.ui.status(_('recording removal of %s as rename to %s '
319 repo.ui.status(_('recording removal of %s as rename to %s '
320 '(%d%% similar)\n') %
320 '(%d%% similar)\n') %
321 (m.rel(old), m.rel(new), score * 100))
321 (m.rel(old), m.rel(new), score * 100))
322 copies[new] = old
322 copies[new] = old
323
323
324 if not dry_run:
324 if not dry_run:
325 wctx = repo[None]
325 wctx = repo[None]
326 wlock = repo.wlock()
326 wlock = repo.wlock()
327 try:
327 try:
328 wctx.remove(deleted)
328 wctx.remove(deleted)
329 wctx.add(unknown)
329 wctx.add(unknown)
330 for new, old in copies.iteritems():
330 for new, old in copies.iteritems():
331 wctx.copy(old, new)
331 wctx.copy(old, new)
332 finally:
332 finally:
333 wlock.release()
333 wlock.release()
334
334
335 def updatedir(ui, repo, patches, similarity=0):
335 def updatedir(ui, repo, patches, similarity=0):
336 '''Update dirstate after patch application according to metadata'''
336 '''Update dirstate after patch application according to metadata'''
337 if not patches:
337 if not patches:
338 return
338 return
339 copies = []
339 copies = []
340 removes = set()
340 removes = set()
341 cfiles = patches.keys()
341 cfiles = patches.keys()
342 cwd = repo.getcwd()
342 cwd = repo.getcwd()
343 if cwd:
343 if cwd:
344 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
344 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
345 for f in patches:
345 for f in patches:
346 gp = patches[f]
346 gp = patches[f]
347 if not gp:
347 if not gp:
348 continue
348 continue
349 if gp.op == 'RENAME':
349 if gp.op == 'RENAME':
350 copies.append((gp.oldpath, gp.path))
350 copies.append((gp.oldpath, gp.path))
351 removes.add(gp.oldpath)
351 removes.add(gp.oldpath)
352 elif gp.op == 'COPY':
352 elif gp.op == 'COPY':
353 copies.append((gp.oldpath, gp.path))
353 copies.append((gp.oldpath, gp.path))
354 elif gp.op == 'DELETE':
354 elif gp.op == 'DELETE':
355 removes.add(gp.path)
355 removes.add(gp.path)
356
356
357 wctx = repo[None]
357 wctx = repo[None]
358 for src, dst in copies:
358 for src, dst in copies:
359 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
359 dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
360 if (not similarity) and removes:
360 if (not similarity) and removes:
361 wctx.remove(sorted(removes), True)
361 wctx.remove(sorted(removes), True)
362
362
363 for f in patches:
363 for f in patches:
364 gp = patches[f]
364 gp = patches[f]
365 if gp and gp.mode:
365 if gp and gp.mode:
366 islink, isexec = gp.mode
366 islink, isexec = gp.mode
367 dst = repo.wjoin(gp.path)
367 dst = repo.wjoin(gp.path)
368 # patch won't create empty files
368 # patch won't create empty files
369 if gp.op == 'ADD' and not os.path.lexists(dst):
369 if gp.op == 'ADD' and not os.path.lexists(dst):
370 flags = (isexec and 'x' or '') + (islink and 'l' or '')
370 flags = (isexec and 'x' or '') + (islink and 'l' or '')
371 repo.wwrite(gp.path, '', flags)
371 repo.wwrite(gp.path, '', flags)
372 util.set_flags(dst, islink, isexec)
372 util.setflags(dst, islink, isexec)
373 addremove(repo, cfiles, similarity=similarity)
373 addremove(repo, cfiles, similarity=similarity)
374 files = patches.keys()
374 files = patches.keys()
375 files.extend([r for r in removes if r not in files])
375 files.extend([r for r in removes if r not in files])
376 return sorted(files)
376 return sorted(files)
377
377
378 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
378 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
379 """Update the dirstate to reflect the intent of copying src to dst. For
379 """Update the dirstate to reflect the intent of copying src to dst. For
380 different reasons it might not end with dst being marked as copied from src.
380 different reasons it might not end with dst being marked as copied from src.
381 """
381 """
382 origsrc = repo.dirstate.copied(src) or src
382 origsrc = repo.dirstate.copied(src) or src
383 if dst == origsrc: # copying back a copy?
383 if dst == origsrc: # copying back a copy?
384 if repo.dirstate[dst] not in 'mn' and not dryrun:
384 if repo.dirstate[dst] not in 'mn' and not dryrun:
385 repo.dirstate.normallookup(dst)
385 repo.dirstate.normallookup(dst)
386 else:
386 else:
387 if repo.dirstate[origsrc] == 'a' and origsrc == src:
387 if repo.dirstate[origsrc] == 'a' and origsrc == src:
388 if not ui.quiet:
388 if not ui.quiet:
389 ui.warn(_("%s has not been committed yet, so no copy "
389 ui.warn(_("%s has not been committed yet, so no copy "
390 "data will be stored for %s.\n")
390 "data will be stored for %s.\n")
391 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
391 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
392 if repo.dirstate[dst] in '?r' and not dryrun:
392 if repo.dirstate[dst] in '?r' and not dryrun:
393 wctx.add([dst])
393 wctx.add([dst])
394 elif not dryrun:
394 elif not dryrun:
395 wctx.copy(origsrc, dst)
395 wctx.copy(origsrc, dst)
396
396
397 def copy(ui, repo, pats, opts, rename=False):
397 def copy(ui, repo, pats, opts, rename=False):
398 # called with the repo lock held
398 # called with the repo lock held
399 #
399 #
400 # hgsep => pathname that uses "/" to separate directories
400 # hgsep => pathname that uses "/" to separate directories
401 # ossep => pathname that uses os.sep to separate directories
401 # ossep => pathname that uses os.sep to separate directories
402 cwd = repo.getcwd()
402 cwd = repo.getcwd()
403 targets = {}
403 targets = {}
404 after = opts.get("after")
404 after = opts.get("after")
405 dryrun = opts.get("dry_run")
405 dryrun = opts.get("dry_run")
406 wctx = repo[None]
406 wctx = repo[None]
407
407
408 def walkpat(pat):
408 def walkpat(pat):
409 srcs = []
409 srcs = []
410 badstates = after and '?' or '?r'
410 badstates = after and '?' or '?r'
411 m = match(repo, [pat], opts, globbed=True)
411 m = match(repo, [pat], opts, globbed=True)
412 for abs in repo.walk(m):
412 for abs in repo.walk(m):
413 state = repo.dirstate[abs]
413 state = repo.dirstate[abs]
414 rel = m.rel(abs)
414 rel = m.rel(abs)
415 exact = m.exact(abs)
415 exact = m.exact(abs)
416 if state in badstates:
416 if state in badstates:
417 if exact and state == '?':
417 if exact and state == '?':
418 ui.warn(_('%s: not copying - file is not managed\n') % rel)
418 ui.warn(_('%s: not copying - file is not managed\n') % rel)
419 if exact and state == 'r':
419 if exact and state == 'r':
420 ui.warn(_('%s: not copying - file has been marked for'
420 ui.warn(_('%s: not copying - file has been marked for'
421 ' remove\n') % rel)
421 ' remove\n') % rel)
422 continue
422 continue
423 # abs: hgsep
423 # abs: hgsep
424 # rel: ossep
424 # rel: ossep
425 srcs.append((abs, rel, exact))
425 srcs.append((abs, rel, exact))
426 return srcs
426 return srcs
427
427
428 # abssrc: hgsep
428 # abssrc: hgsep
429 # relsrc: ossep
429 # relsrc: ossep
430 # otarget: ossep
430 # otarget: ossep
431 def copyfile(abssrc, relsrc, otarget, exact):
431 def copyfile(abssrc, relsrc, otarget, exact):
432 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
432 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
433 reltarget = repo.pathto(abstarget, cwd)
433 reltarget = repo.pathto(abstarget, cwd)
434 target = repo.wjoin(abstarget)
434 target = repo.wjoin(abstarget)
435 src = repo.wjoin(abssrc)
435 src = repo.wjoin(abssrc)
436 state = repo.dirstate[abstarget]
436 state = repo.dirstate[abstarget]
437
437
438 scmutil.checkportable(ui, abstarget)
438 scmutil.checkportable(ui, abstarget)
439
439
440 # check for collisions
440 # check for collisions
441 prevsrc = targets.get(abstarget)
441 prevsrc = targets.get(abstarget)
442 if prevsrc is not None:
442 if prevsrc is not None:
443 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
443 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
444 (reltarget, repo.pathto(abssrc, cwd),
444 (reltarget, repo.pathto(abssrc, cwd),
445 repo.pathto(prevsrc, cwd)))
445 repo.pathto(prevsrc, cwd)))
446 return
446 return
447
447
448 # check for overwrites
448 # check for overwrites
449 exists = os.path.lexists(target)
449 exists = os.path.lexists(target)
450 if not after and exists or after and state in 'mn':
450 if not after and exists or after and state in 'mn':
451 if not opts['force']:
451 if not opts['force']:
452 ui.warn(_('%s: not overwriting - file exists\n') %
452 ui.warn(_('%s: not overwriting - file exists\n') %
453 reltarget)
453 reltarget)
454 return
454 return
455
455
456 if after:
456 if after:
457 if not exists:
457 if not exists:
458 if rename:
458 if rename:
459 ui.warn(_('%s: not recording move - %s does not exist\n') %
459 ui.warn(_('%s: not recording move - %s does not exist\n') %
460 (relsrc, reltarget))
460 (relsrc, reltarget))
461 else:
461 else:
462 ui.warn(_('%s: not recording copy - %s does not exist\n') %
462 ui.warn(_('%s: not recording copy - %s does not exist\n') %
463 (relsrc, reltarget))
463 (relsrc, reltarget))
464 return
464 return
465 elif not dryrun:
465 elif not dryrun:
466 try:
466 try:
467 if exists:
467 if exists:
468 os.unlink(target)
468 os.unlink(target)
469 targetdir = os.path.dirname(target) or '.'
469 targetdir = os.path.dirname(target) or '.'
470 if not os.path.isdir(targetdir):
470 if not os.path.isdir(targetdir):
471 os.makedirs(targetdir)
471 os.makedirs(targetdir)
472 util.copyfile(src, target)
472 util.copyfile(src, target)
473 except IOError, inst:
473 except IOError, inst:
474 if inst.errno == errno.ENOENT:
474 if inst.errno == errno.ENOENT:
475 ui.warn(_('%s: deleted in working copy\n') % relsrc)
475 ui.warn(_('%s: deleted in working copy\n') % relsrc)
476 else:
476 else:
477 ui.warn(_('%s: cannot copy - %s\n') %
477 ui.warn(_('%s: cannot copy - %s\n') %
478 (relsrc, inst.strerror))
478 (relsrc, inst.strerror))
479 return True # report a failure
479 return True # report a failure
480
480
481 if ui.verbose or not exact:
481 if ui.verbose or not exact:
482 if rename:
482 if rename:
483 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
483 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
484 else:
484 else:
485 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
485 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
486
486
487 targets[abstarget] = abssrc
487 targets[abstarget] = abssrc
488
488
489 # fix up dirstate
489 # fix up dirstate
490 dirstatecopy(ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd)
490 dirstatecopy(ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd)
491 if rename and not dryrun:
491 if rename and not dryrun:
492 wctx.remove([abssrc], not after)
492 wctx.remove([abssrc], not after)
493
493
494 # pat: ossep
494 # pat: ossep
495 # dest ossep
495 # dest ossep
496 # srcs: list of (hgsep, hgsep, ossep, bool)
496 # srcs: list of (hgsep, hgsep, ossep, bool)
497 # return: function that takes hgsep and returns ossep
497 # return: function that takes hgsep and returns ossep
498 def targetpathfn(pat, dest, srcs):
498 def targetpathfn(pat, dest, srcs):
499 if os.path.isdir(pat):
499 if os.path.isdir(pat):
500 abspfx = scmutil.canonpath(repo.root, cwd, pat)
500 abspfx = scmutil.canonpath(repo.root, cwd, pat)
501 abspfx = util.localpath(abspfx)
501 abspfx = util.localpath(abspfx)
502 if destdirexists:
502 if destdirexists:
503 striplen = len(os.path.split(abspfx)[0])
503 striplen = len(os.path.split(abspfx)[0])
504 else:
504 else:
505 striplen = len(abspfx)
505 striplen = len(abspfx)
506 if striplen:
506 if striplen:
507 striplen += len(os.sep)
507 striplen += len(os.sep)
508 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
508 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
509 elif destdirexists:
509 elif destdirexists:
510 res = lambda p: os.path.join(dest,
510 res = lambda p: os.path.join(dest,
511 os.path.basename(util.localpath(p)))
511 os.path.basename(util.localpath(p)))
512 else:
512 else:
513 res = lambda p: dest
513 res = lambda p: dest
514 return res
514 return res
515
515
516 # pat: ossep
516 # pat: ossep
517 # dest ossep
517 # dest ossep
518 # srcs: list of (hgsep, hgsep, ossep, bool)
518 # srcs: list of (hgsep, hgsep, ossep, bool)
519 # return: function that takes hgsep and returns ossep
519 # return: function that takes hgsep and returns ossep
520 def targetpathafterfn(pat, dest, srcs):
520 def targetpathafterfn(pat, dest, srcs):
521 if matchmod.patkind(pat):
521 if matchmod.patkind(pat):
522 # a mercurial pattern
522 # a mercurial pattern
523 res = lambda p: os.path.join(dest,
523 res = lambda p: os.path.join(dest,
524 os.path.basename(util.localpath(p)))
524 os.path.basename(util.localpath(p)))
525 else:
525 else:
526 abspfx = scmutil.canonpath(repo.root, cwd, pat)
526 abspfx = scmutil.canonpath(repo.root, cwd, pat)
527 if len(abspfx) < len(srcs[0][0]):
527 if len(abspfx) < len(srcs[0][0]):
528 # A directory. Either the target path contains the last
528 # A directory. Either the target path contains the last
529 # component of the source path or it does not.
529 # component of the source path or it does not.
530 def evalpath(striplen):
530 def evalpath(striplen):
531 score = 0
531 score = 0
532 for s in srcs:
532 for s in srcs:
533 t = os.path.join(dest, util.localpath(s[0])[striplen:])
533 t = os.path.join(dest, util.localpath(s[0])[striplen:])
534 if os.path.lexists(t):
534 if os.path.lexists(t):
535 score += 1
535 score += 1
536 return score
536 return score
537
537
538 abspfx = util.localpath(abspfx)
538 abspfx = util.localpath(abspfx)
539 striplen = len(abspfx)
539 striplen = len(abspfx)
540 if striplen:
540 if striplen:
541 striplen += len(os.sep)
541 striplen += len(os.sep)
542 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
542 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
543 score = evalpath(striplen)
543 score = evalpath(striplen)
544 striplen1 = len(os.path.split(abspfx)[0])
544 striplen1 = len(os.path.split(abspfx)[0])
545 if striplen1:
545 if striplen1:
546 striplen1 += len(os.sep)
546 striplen1 += len(os.sep)
547 if evalpath(striplen1) > score:
547 if evalpath(striplen1) > score:
548 striplen = striplen1
548 striplen = striplen1
549 res = lambda p: os.path.join(dest,
549 res = lambda p: os.path.join(dest,
550 util.localpath(p)[striplen:])
550 util.localpath(p)[striplen:])
551 else:
551 else:
552 # a file
552 # a file
553 if destdirexists:
553 if destdirexists:
554 res = lambda p: os.path.join(dest,
554 res = lambda p: os.path.join(dest,
555 os.path.basename(util.localpath(p)))
555 os.path.basename(util.localpath(p)))
556 else:
556 else:
557 res = lambda p: dest
557 res = lambda p: dest
558 return res
558 return res
559
559
560
560
561 pats = expandpats(pats)
561 pats = expandpats(pats)
562 if not pats:
562 if not pats:
563 raise util.Abort(_('no source or destination specified'))
563 raise util.Abort(_('no source or destination specified'))
564 if len(pats) == 1:
564 if len(pats) == 1:
565 raise util.Abort(_('no destination specified'))
565 raise util.Abort(_('no destination specified'))
566 dest = pats.pop()
566 dest = pats.pop()
567 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
567 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
568 if not destdirexists:
568 if not destdirexists:
569 if len(pats) > 1 or matchmod.patkind(pats[0]):
569 if len(pats) > 1 or matchmod.patkind(pats[0]):
570 raise util.Abort(_('with multiple sources, destination must be an '
570 raise util.Abort(_('with multiple sources, destination must be an '
571 'existing directory'))
571 'existing directory'))
572 if util.endswithsep(dest):
572 if util.endswithsep(dest):
573 raise util.Abort(_('destination %s is not a directory') % dest)
573 raise util.Abort(_('destination %s is not a directory') % dest)
574
574
575 tfn = targetpathfn
575 tfn = targetpathfn
576 if after:
576 if after:
577 tfn = targetpathafterfn
577 tfn = targetpathafterfn
578 copylist = []
578 copylist = []
579 for pat in pats:
579 for pat in pats:
580 srcs = walkpat(pat)
580 srcs = walkpat(pat)
581 if not srcs:
581 if not srcs:
582 continue
582 continue
583 copylist.append((tfn(pat, dest, srcs), srcs))
583 copylist.append((tfn(pat, dest, srcs), srcs))
584 if not copylist:
584 if not copylist:
585 raise util.Abort(_('no files to copy'))
585 raise util.Abort(_('no files to copy'))
586
586
587 errors = 0
587 errors = 0
588 for targetpath, srcs in copylist:
588 for targetpath, srcs in copylist:
589 for abssrc, relsrc, exact in srcs:
589 for abssrc, relsrc, exact in srcs:
590 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
590 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
591 errors += 1
591 errors += 1
592
592
593 if errors:
593 if errors:
594 ui.warn(_('(consider using --after)\n'))
594 ui.warn(_('(consider using --after)\n'))
595
595
596 return errors != 0
596 return errors != 0
597
597
598 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
598 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
599 runargs=None, appendpid=False):
599 runargs=None, appendpid=False):
600 '''Run a command as a service.'''
600 '''Run a command as a service.'''
601
601
602 if opts['daemon'] and not opts['daemon_pipefds']:
602 if opts['daemon'] and not opts['daemon_pipefds']:
603 # Signal child process startup with file removal
603 # Signal child process startup with file removal
604 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
604 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
605 os.close(lockfd)
605 os.close(lockfd)
606 try:
606 try:
607 if not runargs:
607 if not runargs:
608 runargs = util.hgcmd() + sys.argv[1:]
608 runargs = util.hgcmd() + sys.argv[1:]
609 runargs.append('--daemon-pipefds=%s' % lockpath)
609 runargs.append('--daemon-pipefds=%s' % lockpath)
610 # Don't pass --cwd to the child process, because we've already
610 # Don't pass --cwd to the child process, because we've already
611 # changed directory.
611 # changed directory.
612 for i in xrange(1, len(runargs)):
612 for i in xrange(1, len(runargs)):
613 if runargs[i].startswith('--cwd='):
613 if runargs[i].startswith('--cwd='):
614 del runargs[i]
614 del runargs[i]
615 break
615 break
616 elif runargs[i].startswith('--cwd'):
616 elif runargs[i].startswith('--cwd'):
617 del runargs[i:i + 2]
617 del runargs[i:i + 2]
618 break
618 break
619 def condfn():
619 def condfn():
620 return not os.path.exists(lockpath)
620 return not os.path.exists(lockpath)
621 pid = util.rundetached(runargs, condfn)
621 pid = util.rundetached(runargs, condfn)
622 if pid < 0:
622 if pid < 0:
623 raise util.Abort(_('child process failed to start'))
623 raise util.Abort(_('child process failed to start'))
624 finally:
624 finally:
625 try:
625 try:
626 os.unlink(lockpath)
626 os.unlink(lockpath)
627 except OSError, e:
627 except OSError, e:
628 if e.errno != errno.ENOENT:
628 if e.errno != errno.ENOENT:
629 raise
629 raise
630 if parentfn:
630 if parentfn:
631 return parentfn(pid)
631 return parentfn(pid)
632 else:
632 else:
633 return
633 return
634
634
635 if initfn:
635 if initfn:
636 initfn()
636 initfn()
637
637
638 if opts['pid_file']:
638 if opts['pid_file']:
639 mode = appendpid and 'a' or 'w'
639 mode = appendpid and 'a' or 'w'
640 fp = open(opts['pid_file'], mode)
640 fp = open(opts['pid_file'], mode)
641 fp.write(str(os.getpid()) + '\n')
641 fp.write(str(os.getpid()) + '\n')
642 fp.close()
642 fp.close()
643
643
644 if opts['daemon_pipefds']:
644 if opts['daemon_pipefds']:
645 lockpath = opts['daemon_pipefds']
645 lockpath = opts['daemon_pipefds']
646 try:
646 try:
647 os.setsid()
647 os.setsid()
648 except AttributeError:
648 except AttributeError:
649 pass
649 pass
650 os.unlink(lockpath)
650 os.unlink(lockpath)
651 util.hidewindow()
651 util.hidewindow()
652 sys.stdout.flush()
652 sys.stdout.flush()
653 sys.stderr.flush()
653 sys.stderr.flush()
654
654
655 nullfd = os.open(util.nulldev, os.O_RDWR)
655 nullfd = os.open(util.nulldev, os.O_RDWR)
656 logfilefd = nullfd
656 logfilefd = nullfd
657 if logfile:
657 if logfile:
658 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
658 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
659 os.dup2(nullfd, 0)
659 os.dup2(nullfd, 0)
660 os.dup2(logfilefd, 1)
660 os.dup2(logfilefd, 1)
661 os.dup2(logfilefd, 2)
661 os.dup2(logfilefd, 2)
662 if nullfd not in (0, 1, 2):
662 if nullfd not in (0, 1, 2):
663 os.close(nullfd)
663 os.close(nullfd)
664 if logfile and logfilefd not in (0, 1, 2):
664 if logfile and logfilefd not in (0, 1, 2):
665 os.close(logfilefd)
665 os.close(logfilefd)
666
666
667 if runfn:
667 if runfn:
668 return runfn()
668 return runfn()
669
669
670 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
670 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
671 opts=None):
671 opts=None):
672 '''export changesets as hg patches.'''
672 '''export changesets as hg patches.'''
673
673
674 total = len(revs)
674 total = len(revs)
675 revwidth = max([len(str(rev)) for rev in revs])
675 revwidth = max([len(str(rev)) for rev in revs])
676
676
677 def single(rev, seqno, fp):
677 def single(rev, seqno, fp):
678 ctx = repo[rev]
678 ctx = repo[rev]
679 node = ctx.node()
679 node = ctx.node()
680 parents = [p.node() for p in ctx.parents() if p]
680 parents = [p.node() for p in ctx.parents() if p]
681 branch = ctx.branch()
681 branch = ctx.branch()
682 if switch_parent:
682 if switch_parent:
683 parents.reverse()
683 parents.reverse()
684 prev = (parents and parents[0]) or nullid
684 prev = (parents and parents[0]) or nullid
685
685
686 shouldclose = False
686 shouldclose = False
687 if not fp:
687 if not fp:
688 fp = make_file(repo, template, node, total=total, seqno=seqno,
688 fp = make_file(repo, template, node, total=total, seqno=seqno,
689 revwidth=revwidth, mode='ab')
689 revwidth=revwidth, mode='ab')
690 if fp != template:
690 if fp != template:
691 shouldclose = True
691 shouldclose = True
692 if fp != sys.stdout and hasattr(fp, 'name'):
692 if fp != sys.stdout and hasattr(fp, 'name'):
693 repo.ui.note("%s\n" % fp.name)
693 repo.ui.note("%s\n" % fp.name)
694
694
695 fp.write("# HG changeset patch\n")
695 fp.write("# HG changeset patch\n")
696 fp.write("# User %s\n" % ctx.user())
696 fp.write("# User %s\n" % ctx.user())
697 fp.write("# Date %d %d\n" % ctx.date())
697 fp.write("# Date %d %d\n" % ctx.date())
698 if branch and branch != 'default':
698 if branch and branch != 'default':
699 fp.write("# Branch %s\n" % branch)
699 fp.write("# Branch %s\n" % branch)
700 fp.write("# Node ID %s\n" % hex(node))
700 fp.write("# Node ID %s\n" % hex(node))
701 fp.write("# Parent %s\n" % hex(prev))
701 fp.write("# Parent %s\n" % hex(prev))
702 if len(parents) > 1:
702 if len(parents) > 1:
703 fp.write("# Parent %s\n" % hex(parents[1]))
703 fp.write("# Parent %s\n" % hex(parents[1]))
704 fp.write(ctx.description().rstrip())
704 fp.write(ctx.description().rstrip())
705 fp.write("\n\n")
705 fp.write("\n\n")
706
706
707 for chunk in patch.diff(repo, prev, node, opts=opts):
707 for chunk in patch.diff(repo, prev, node, opts=opts):
708 fp.write(chunk)
708 fp.write(chunk)
709
709
710 if shouldclose:
710 if shouldclose:
711 fp.close()
711 fp.close()
712
712
713 for seqno, rev in enumerate(revs):
713 for seqno, rev in enumerate(revs):
714 single(rev, seqno + 1, fp)
714 single(rev, seqno + 1, fp)
715
715
716 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
716 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
717 changes=None, stat=False, fp=None, prefix='',
717 changes=None, stat=False, fp=None, prefix='',
718 listsubrepos=False):
718 listsubrepos=False):
719 '''show diff or diffstat.'''
719 '''show diff or diffstat.'''
720 if fp is None:
720 if fp is None:
721 write = ui.write
721 write = ui.write
722 else:
722 else:
723 def write(s, **kw):
723 def write(s, **kw):
724 fp.write(s)
724 fp.write(s)
725
725
726 if stat:
726 if stat:
727 diffopts = diffopts.copy(context=0)
727 diffopts = diffopts.copy(context=0)
728 width = 80
728 width = 80
729 if not ui.plain():
729 if not ui.plain():
730 width = ui.termwidth()
730 width = ui.termwidth()
731 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
731 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
732 prefix=prefix)
732 prefix=prefix)
733 for chunk, label in patch.diffstatui(util.iterlines(chunks),
733 for chunk, label in patch.diffstatui(util.iterlines(chunks),
734 width=width,
734 width=width,
735 git=diffopts.git):
735 git=diffopts.git):
736 write(chunk, label=label)
736 write(chunk, label=label)
737 else:
737 else:
738 for chunk, label in patch.diffui(repo, node1, node2, match,
738 for chunk, label in patch.diffui(repo, node1, node2, match,
739 changes, diffopts, prefix=prefix):
739 changes, diffopts, prefix=prefix):
740 write(chunk, label=label)
740 write(chunk, label=label)
741
741
742 if listsubrepos:
742 if listsubrepos:
743 ctx1 = repo[node1]
743 ctx1 = repo[node1]
744 ctx2 = repo[node2]
744 ctx2 = repo[node2]
745 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
745 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
746 if node2 is not None:
746 if node2 is not None:
747 node2 = ctx2.substate[subpath][1]
747 node2 = ctx2.substate[subpath][1]
748 submatch = matchmod.narrowmatcher(subpath, match)
748 submatch = matchmod.narrowmatcher(subpath, match)
749 sub.diff(diffopts, node2, submatch, changes=changes,
749 sub.diff(diffopts, node2, submatch, changes=changes,
750 stat=stat, fp=fp, prefix=prefix)
750 stat=stat, fp=fp, prefix=prefix)
751
751
752 class changeset_printer(object):
752 class changeset_printer(object):
753 '''show changeset information when templating not requested.'''
753 '''show changeset information when templating not requested.'''
754
754
755 def __init__(self, ui, repo, patch, diffopts, buffered):
755 def __init__(self, ui, repo, patch, diffopts, buffered):
756 self.ui = ui
756 self.ui = ui
757 self.repo = repo
757 self.repo = repo
758 self.buffered = buffered
758 self.buffered = buffered
759 self.patch = patch
759 self.patch = patch
760 self.diffopts = diffopts
760 self.diffopts = diffopts
761 self.header = {}
761 self.header = {}
762 self.hunk = {}
762 self.hunk = {}
763 self.lastheader = None
763 self.lastheader = None
764 self.footer = None
764 self.footer = None
765
765
766 def flush(self, rev):
766 def flush(self, rev):
767 if rev in self.header:
767 if rev in self.header:
768 h = self.header[rev]
768 h = self.header[rev]
769 if h != self.lastheader:
769 if h != self.lastheader:
770 self.lastheader = h
770 self.lastheader = h
771 self.ui.write(h)
771 self.ui.write(h)
772 del self.header[rev]
772 del self.header[rev]
773 if rev in self.hunk:
773 if rev in self.hunk:
774 self.ui.write(self.hunk[rev])
774 self.ui.write(self.hunk[rev])
775 del self.hunk[rev]
775 del self.hunk[rev]
776 return 1
776 return 1
777 return 0
777 return 0
778
778
779 def close(self):
779 def close(self):
780 if self.footer:
780 if self.footer:
781 self.ui.write(self.footer)
781 self.ui.write(self.footer)
782
782
783 def show(self, ctx, copies=None, matchfn=None, **props):
783 def show(self, ctx, copies=None, matchfn=None, **props):
784 if self.buffered:
784 if self.buffered:
785 self.ui.pushbuffer()
785 self.ui.pushbuffer()
786 self._show(ctx, copies, matchfn, props)
786 self._show(ctx, copies, matchfn, props)
787 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
787 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
788 else:
788 else:
789 self._show(ctx, copies, matchfn, props)
789 self._show(ctx, copies, matchfn, props)
790
790
791 def _show(self, ctx, copies, matchfn, props):
791 def _show(self, ctx, copies, matchfn, props):
792 '''show a single changeset or file revision'''
792 '''show a single changeset or file revision'''
793 changenode = ctx.node()
793 changenode = ctx.node()
794 rev = ctx.rev()
794 rev = ctx.rev()
795
795
796 if self.ui.quiet:
796 if self.ui.quiet:
797 self.ui.write("%d:%s\n" % (rev, short(changenode)),
797 self.ui.write("%d:%s\n" % (rev, short(changenode)),
798 label='log.node')
798 label='log.node')
799 return
799 return
800
800
801 log = self.repo.changelog
801 log = self.repo.changelog
802 date = util.datestr(ctx.date())
802 date = util.datestr(ctx.date())
803
803
804 hexfunc = self.ui.debugflag and hex or short
804 hexfunc = self.ui.debugflag and hex or short
805
805
806 parents = [(p, hexfunc(log.node(p)))
806 parents = [(p, hexfunc(log.node(p)))
807 for p in self._meaningful_parentrevs(log, rev)]
807 for p in self._meaningful_parentrevs(log, rev)]
808
808
809 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
809 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
810 label='log.changeset')
810 label='log.changeset')
811
811
812 branch = ctx.branch()
812 branch = ctx.branch()
813 # don't show the default branch name
813 # don't show the default branch name
814 if branch != 'default':
814 if branch != 'default':
815 self.ui.write(_("branch: %s\n") % branch,
815 self.ui.write(_("branch: %s\n") % branch,
816 label='log.branch')
816 label='log.branch')
817 for bookmark in self.repo.nodebookmarks(changenode):
817 for bookmark in self.repo.nodebookmarks(changenode):
818 self.ui.write(_("bookmark: %s\n") % bookmark,
818 self.ui.write(_("bookmark: %s\n") % bookmark,
819 label='log.bookmark')
819 label='log.bookmark')
820 for tag in self.repo.nodetags(changenode):
820 for tag in self.repo.nodetags(changenode):
821 self.ui.write(_("tag: %s\n") % tag,
821 self.ui.write(_("tag: %s\n") % tag,
822 label='log.tag')
822 label='log.tag')
823 for parent in parents:
823 for parent in parents:
824 self.ui.write(_("parent: %d:%s\n") % parent,
824 self.ui.write(_("parent: %d:%s\n") % parent,
825 label='log.parent')
825 label='log.parent')
826
826
827 if self.ui.debugflag:
827 if self.ui.debugflag:
828 mnode = ctx.manifestnode()
828 mnode = ctx.manifestnode()
829 self.ui.write(_("manifest: %d:%s\n") %
829 self.ui.write(_("manifest: %d:%s\n") %
830 (self.repo.manifest.rev(mnode), hex(mnode)),
830 (self.repo.manifest.rev(mnode), hex(mnode)),
831 label='ui.debug log.manifest')
831 label='ui.debug log.manifest')
832 self.ui.write(_("user: %s\n") % ctx.user(),
832 self.ui.write(_("user: %s\n") % ctx.user(),
833 label='log.user')
833 label='log.user')
834 self.ui.write(_("date: %s\n") % date,
834 self.ui.write(_("date: %s\n") % date,
835 label='log.date')
835 label='log.date')
836
836
837 if self.ui.debugflag:
837 if self.ui.debugflag:
838 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
838 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
839 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
839 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
840 files):
840 files):
841 if value:
841 if value:
842 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
842 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
843 label='ui.debug log.files')
843 label='ui.debug log.files')
844 elif ctx.files() and self.ui.verbose:
844 elif ctx.files() and self.ui.verbose:
845 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
845 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
846 label='ui.note log.files')
846 label='ui.note log.files')
847 if copies and self.ui.verbose:
847 if copies and self.ui.verbose:
848 copies = ['%s (%s)' % c for c in copies]
848 copies = ['%s (%s)' % c for c in copies]
849 self.ui.write(_("copies: %s\n") % ' '.join(copies),
849 self.ui.write(_("copies: %s\n") % ' '.join(copies),
850 label='ui.note log.copies')
850 label='ui.note log.copies')
851
851
852 extra = ctx.extra()
852 extra = ctx.extra()
853 if extra and self.ui.debugflag:
853 if extra and self.ui.debugflag:
854 for key, value in sorted(extra.items()):
854 for key, value in sorted(extra.items()):
855 self.ui.write(_("extra: %s=%s\n")
855 self.ui.write(_("extra: %s=%s\n")
856 % (key, value.encode('string_escape')),
856 % (key, value.encode('string_escape')),
857 label='ui.debug log.extra')
857 label='ui.debug log.extra')
858
858
859 description = ctx.description().strip()
859 description = ctx.description().strip()
860 if description:
860 if description:
861 if self.ui.verbose:
861 if self.ui.verbose:
862 self.ui.write(_("description:\n"),
862 self.ui.write(_("description:\n"),
863 label='ui.note log.description')
863 label='ui.note log.description')
864 self.ui.write(description,
864 self.ui.write(description,
865 label='ui.note log.description')
865 label='ui.note log.description')
866 self.ui.write("\n\n")
866 self.ui.write("\n\n")
867 else:
867 else:
868 self.ui.write(_("summary: %s\n") %
868 self.ui.write(_("summary: %s\n") %
869 description.splitlines()[0],
869 description.splitlines()[0],
870 label='log.summary')
870 label='log.summary')
871 self.ui.write("\n")
871 self.ui.write("\n")
872
872
873 self.showpatch(changenode, matchfn)
873 self.showpatch(changenode, matchfn)
874
874
875 def showpatch(self, node, matchfn):
875 def showpatch(self, node, matchfn):
876 if not matchfn:
876 if not matchfn:
877 matchfn = self.patch
877 matchfn = self.patch
878 if matchfn:
878 if matchfn:
879 stat = self.diffopts.get('stat')
879 stat = self.diffopts.get('stat')
880 diff = self.diffopts.get('patch')
880 diff = self.diffopts.get('patch')
881 diffopts = patch.diffopts(self.ui, self.diffopts)
881 diffopts = patch.diffopts(self.ui, self.diffopts)
882 prev = self.repo.changelog.parents(node)[0]
882 prev = self.repo.changelog.parents(node)[0]
883 if stat:
883 if stat:
884 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
884 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
885 match=matchfn, stat=True)
885 match=matchfn, stat=True)
886 if diff:
886 if diff:
887 if stat:
887 if stat:
888 self.ui.write("\n")
888 self.ui.write("\n")
889 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
889 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
890 match=matchfn, stat=False)
890 match=matchfn, stat=False)
891 self.ui.write("\n")
891 self.ui.write("\n")
892
892
893 def _meaningful_parentrevs(self, log, rev):
893 def _meaningful_parentrevs(self, log, rev):
894 """Return list of meaningful (or all if debug) parentrevs for rev.
894 """Return list of meaningful (or all if debug) parentrevs for rev.
895
895
896 For merges (two non-nullrev revisions) both parents are meaningful.
896 For merges (two non-nullrev revisions) both parents are meaningful.
897 Otherwise the first parent revision is considered meaningful if it
897 Otherwise the first parent revision is considered meaningful if it
898 is not the preceding revision.
898 is not the preceding revision.
899 """
899 """
900 parents = log.parentrevs(rev)
900 parents = log.parentrevs(rev)
901 if not self.ui.debugflag and parents[1] == nullrev:
901 if not self.ui.debugflag and parents[1] == nullrev:
902 if parents[0] >= rev - 1:
902 if parents[0] >= rev - 1:
903 parents = []
903 parents = []
904 else:
904 else:
905 parents = [parents[0]]
905 parents = [parents[0]]
906 return parents
906 return parents
907
907
908
908
909 class changeset_templater(changeset_printer):
909 class changeset_templater(changeset_printer):
910 '''format changeset information.'''
910 '''format changeset information.'''
911
911
912 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
912 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
913 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
913 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
914 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
914 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
915 defaulttempl = {
915 defaulttempl = {
916 'parent': '{rev}:{node|formatnode} ',
916 'parent': '{rev}:{node|formatnode} ',
917 'manifest': '{rev}:{node|formatnode}',
917 'manifest': '{rev}:{node|formatnode}',
918 'file_copy': '{name} ({source})',
918 'file_copy': '{name} ({source})',
919 'extra': '{key}={value|stringescape}'
919 'extra': '{key}={value|stringescape}'
920 }
920 }
921 # filecopy is preserved for compatibility reasons
921 # filecopy is preserved for compatibility reasons
922 defaulttempl['filecopy'] = defaulttempl['file_copy']
922 defaulttempl['filecopy'] = defaulttempl['file_copy']
923 self.t = templater.templater(mapfile, {'formatnode': formatnode},
923 self.t = templater.templater(mapfile, {'formatnode': formatnode},
924 cache=defaulttempl)
924 cache=defaulttempl)
925 self.cache = {}
925 self.cache = {}
926
926
927 def use_template(self, t):
927 def use_template(self, t):
928 '''set template string to use'''
928 '''set template string to use'''
929 self.t.cache['changeset'] = t
929 self.t.cache['changeset'] = t
930
930
931 def _meaningful_parentrevs(self, ctx):
931 def _meaningful_parentrevs(self, ctx):
932 """Return list of meaningful (or all if debug) parentrevs for rev.
932 """Return list of meaningful (or all if debug) parentrevs for rev.
933 """
933 """
934 parents = ctx.parents()
934 parents = ctx.parents()
935 if len(parents) > 1:
935 if len(parents) > 1:
936 return parents
936 return parents
937 if self.ui.debugflag:
937 if self.ui.debugflag:
938 return [parents[0], self.repo['null']]
938 return [parents[0], self.repo['null']]
939 if parents[0].rev() >= ctx.rev() - 1:
939 if parents[0].rev() >= ctx.rev() - 1:
940 return []
940 return []
941 return parents
941 return parents
942
942
943 def _show(self, ctx, copies, matchfn, props):
943 def _show(self, ctx, copies, matchfn, props):
944 '''show a single changeset or file revision'''
944 '''show a single changeset or file revision'''
945
945
946 showlist = templatekw.showlist
946 showlist = templatekw.showlist
947
947
948 # showparents() behaviour depends on ui trace level which
948 # showparents() behaviour depends on ui trace level which
949 # causes unexpected behaviours at templating level and makes
949 # causes unexpected behaviours at templating level and makes
950 # it harder to extract it in a standalone function. Its
950 # it harder to extract it in a standalone function. Its
951 # behaviour cannot be changed so leave it here for now.
951 # behaviour cannot be changed so leave it here for now.
952 def showparents(**args):
952 def showparents(**args):
953 ctx = args['ctx']
953 ctx = args['ctx']
954 parents = [[('rev', p.rev()), ('node', p.hex())]
954 parents = [[('rev', p.rev()), ('node', p.hex())]
955 for p in self._meaningful_parentrevs(ctx)]
955 for p in self._meaningful_parentrevs(ctx)]
956 return showlist('parent', parents, **args)
956 return showlist('parent', parents, **args)
957
957
958 props = props.copy()
958 props = props.copy()
959 props.update(templatekw.keywords)
959 props.update(templatekw.keywords)
960 props['parents'] = showparents
960 props['parents'] = showparents
961 props['templ'] = self.t
961 props['templ'] = self.t
962 props['ctx'] = ctx
962 props['ctx'] = ctx
963 props['repo'] = self.repo
963 props['repo'] = self.repo
964 props['revcache'] = {'copies': copies}
964 props['revcache'] = {'copies': copies}
965 props['cache'] = self.cache
965 props['cache'] = self.cache
966
966
967 # find correct templates for current mode
967 # find correct templates for current mode
968
968
969 tmplmodes = [
969 tmplmodes = [
970 (True, None),
970 (True, None),
971 (self.ui.verbose, 'verbose'),
971 (self.ui.verbose, 'verbose'),
972 (self.ui.quiet, 'quiet'),
972 (self.ui.quiet, 'quiet'),
973 (self.ui.debugflag, 'debug'),
973 (self.ui.debugflag, 'debug'),
974 ]
974 ]
975
975
976 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
976 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
977 for mode, postfix in tmplmodes:
977 for mode, postfix in tmplmodes:
978 for type in types:
978 for type in types:
979 cur = postfix and ('%s_%s' % (type, postfix)) or type
979 cur = postfix and ('%s_%s' % (type, postfix)) or type
980 if mode and cur in self.t:
980 if mode and cur in self.t:
981 types[type] = cur
981 types[type] = cur
982
982
983 try:
983 try:
984
984
985 # write header
985 # write header
986 if types['header']:
986 if types['header']:
987 h = templater.stringify(self.t(types['header'], **props))
987 h = templater.stringify(self.t(types['header'], **props))
988 if self.buffered:
988 if self.buffered:
989 self.header[ctx.rev()] = h
989 self.header[ctx.rev()] = h
990 else:
990 else:
991 if self.lastheader != h:
991 if self.lastheader != h:
992 self.lastheader = h
992 self.lastheader = h
993 self.ui.write(h)
993 self.ui.write(h)
994
994
995 # write changeset metadata, then patch if requested
995 # write changeset metadata, then patch if requested
996 key = types['changeset']
996 key = types['changeset']
997 self.ui.write(templater.stringify(self.t(key, **props)))
997 self.ui.write(templater.stringify(self.t(key, **props)))
998 self.showpatch(ctx.node(), matchfn)
998 self.showpatch(ctx.node(), matchfn)
999
999
1000 if types['footer']:
1000 if types['footer']:
1001 if not self.footer:
1001 if not self.footer:
1002 self.footer = templater.stringify(self.t(types['footer'],
1002 self.footer = templater.stringify(self.t(types['footer'],
1003 **props))
1003 **props))
1004
1004
1005 except KeyError, inst:
1005 except KeyError, inst:
1006 msg = _("%s: no key named '%s'")
1006 msg = _("%s: no key named '%s'")
1007 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1007 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1008 except SyntaxError, inst:
1008 except SyntaxError, inst:
1009 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1009 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1010
1010
1011 def show_changeset(ui, repo, opts, buffered=False):
1011 def show_changeset(ui, repo, opts, buffered=False):
1012 """show one changeset using template or regular display.
1012 """show one changeset using template or regular display.
1013
1013
1014 Display format will be the first non-empty hit of:
1014 Display format will be the first non-empty hit of:
1015 1. option 'template'
1015 1. option 'template'
1016 2. option 'style'
1016 2. option 'style'
1017 3. [ui] setting 'logtemplate'
1017 3. [ui] setting 'logtemplate'
1018 4. [ui] setting 'style'
1018 4. [ui] setting 'style'
1019 If all of these values are either the unset or the empty string,
1019 If all of these values are either the unset or the empty string,
1020 regular display via changeset_printer() is done.
1020 regular display via changeset_printer() is done.
1021 """
1021 """
1022 # options
1022 # options
1023 patch = False
1023 patch = False
1024 if opts.get('patch') or opts.get('stat'):
1024 if opts.get('patch') or opts.get('stat'):
1025 patch = matchall(repo)
1025 patch = matchall(repo)
1026
1026
1027 tmpl = opts.get('template')
1027 tmpl = opts.get('template')
1028 style = None
1028 style = None
1029 if tmpl:
1029 if tmpl:
1030 tmpl = templater.parsestring(tmpl, quoted=False)
1030 tmpl = templater.parsestring(tmpl, quoted=False)
1031 else:
1031 else:
1032 style = opts.get('style')
1032 style = opts.get('style')
1033
1033
1034 # ui settings
1034 # ui settings
1035 if not (tmpl or style):
1035 if not (tmpl or style):
1036 tmpl = ui.config('ui', 'logtemplate')
1036 tmpl = ui.config('ui', 'logtemplate')
1037 if tmpl:
1037 if tmpl:
1038 tmpl = templater.parsestring(tmpl)
1038 tmpl = templater.parsestring(tmpl)
1039 else:
1039 else:
1040 style = util.expandpath(ui.config('ui', 'style', ''))
1040 style = util.expandpath(ui.config('ui', 'style', ''))
1041
1041
1042 if not (tmpl or style):
1042 if not (tmpl or style):
1043 return changeset_printer(ui, repo, patch, opts, buffered)
1043 return changeset_printer(ui, repo, patch, opts, buffered)
1044
1044
1045 mapfile = None
1045 mapfile = None
1046 if style and not tmpl:
1046 if style and not tmpl:
1047 mapfile = style
1047 mapfile = style
1048 if not os.path.split(mapfile)[0]:
1048 if not os.path.split(mapfile)[0]:
1049 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1049 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1050 or templater.templatepath(mapfile))
1050 or templater.templatepath(mapfile))
1051 if mapname:
1051 if mapname:
1052 mapfile = mapname
1052 mapfile = mapname
1053
1053
1054 try:
1054 try:
1055 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
1055 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
1056 except SyntaxError, inst:
1056 except SyntaxError, inst:
1057 raise util.Abort(inst.args[0])
1057 raise util.Abort(inst.args[0])
1058 if tmpl:
1058 if tmpl:
1059 t.use_template(tmpl)
1059 t.use_template(tmpl)
1060 return t
1060 return t
1061
1061
1062 def finddate(ui, repo, date):
1062 def finddate(ui, repo, date):
1063 """Find the tipmost changeset that matches the given date spec"""
1063 """Find the tipmost changeset that matches the given date spec"""
1064
1064
1065 df = util.matchdate(date)
1065 df = util.matchdate(date)
1066 m = matchall(repo)
1066 m = matchall(repo)
1067 results = {}
1067 results = {}
1068
1068
1069 def prep(ctx, fns):
1069 def prep(ctx, fns):
1070 d = ctx.date()
1070 d = ctx.date()
1071 if df(d[0]):
1071 if df(d[0]):
1072 results[ctx.rev()] = d
1072 results[ctx.rev()] = d
1073
1073
1074 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1074 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1075 rev = ctx.rev()
1075 rev = ctx.rev()
1076 if rev in results:
1076 if rev in results:
1077 ui.status(_("Found revision %s from %s\n") %
1077 ui.status(_("Found revision %s from %s\n") %
1078 (rev, util.datestr(results[rev])))
1078 (rev, util.datestr(results[rev])))
1079 return str(rev)
1079 return str(rev)
1080
1080
1081 raise util.Abort(_("revision matching date not found"))
1081 raise util.Abort(_("revision matching date not found"))
1082
1082
1083 def walkchangerevs(repo, match, opts, prepare):
1083 def walkchangerevs(repo, match, opts, prepare):
1084 '''Iterate over files and the revs in which they changed.
1084 '''Iterate over files and the revs in which they changed.
1085
1085
1086 Callers most commonly need to iterate backwards over the history
1086 Callers most commonly need to iterate backwards over the history
1087 in which they are interested. Doing so has awful (quadratic-looking)
1087 in which they are interested. Doing so has awful (quadratic-looking)
1088 performance, so we use iterators in a "windowed" way.
1088 performance, so we use iterators in a "windowed" way.
1089
1089
1090 We walk a window of revisions in the desired order. Within the
1090 We walk a window of revisions in the desired order. Within the
1091 window, we first walk forwards to gather data, then in the desired
1091 window, we first walk forwards to gather data, then in the desired
1092 order (usually backwards) to display it.
1092 order (usually backwards) to display it.
1093
1093
1094 This function returns an iterator yielding contexts. Before
1094 This function returns an iterator yielding contexts. Before
1095 yielding each context, the iterator will first call the prepare
1095 yielding each context, the iterator will first call the prepare
1096 function on each context in the window in forward order.'''
1096 function on each context in the window in forward order.'''
1097
1097
1098 def increasing_windows(start, end, windowsize=8, sizelimit=512):
1098 def increasing_windows(start, end, windowsize=8, sizelimit=512):
1099 if start < end:
1099 if start < end:
1100 while start < end:
1100 while start < end:
1101 yield start, min(windowsize, end - start)
1101 yield start, min(windowsize, end - start)
1102 start += windowsize
1102 start += windowsize
1103 if windowsize < sizelimit:
1103 if windowsize < sizelimit:
1104 windowsize *= 2
1104 windowsize *= 2
1105 else:
1105 else:
1106 while start > end:
1106 while start > end:
1107 yield start, min(windowsize, start - end - 1)
1107 yield start, min(windowsize, start - end - 1)
1108 start -= windowsize
1108 start -= windowsize
1109 if windowsize < sizelimit:
1109 if windowsize < sizelimit:
1110 windowsize *= 2
1110 windowsize *= 2
1111
1111
1112 follow = opts.get('follow') or opts.get('follow_first')
1112 follow = opts.get('follow') or opts.get('follow_first')
1113
1113
1114 if not len(repo):
1114 if not len(repo):
1115 return []
1115 return []
1116
1116
1117 if follow:
1117 if follow:
1118 defrange = '%s:0' % repo['.'].rev()
1118 defrange = '%s:0' % repo['.'].rev()
1119 else:
1119 else:
1120 defrange = '-1:0'
1120 defrange = '-1:0'
1121 revs = revrange(repo, opts['rev'] or [defrange])
1121 revs = revrange(repo, opts['rev'] or [defrange])
1122 if not revs:
1122 if not revs:
1123 return []
1123 return []
1124 wanted = set()
1124 wanted = set()
1125 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1125 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1126 fncache = {}
1126 fncache = {}
1127 change = util.cachefunc(repo.changectx)
1127 change = util.cachefunc(repo.changectx)
1128
1128
1129 # First step is to fill wanted, the set of revisions that we want to yield.
1129 # First step is to fill wanted, the set of revisions that we want to yield.
1130 # When it does not induce extra cost, we also fill fncache for revisions in
1130 # When it does not induce extra cost, we also fill fncache for revisions in
1131 # wanted: a cache of filenames that were changed (ctx.files()) and that
1131 # wanted: a cache of filenames that were changed (ctx.files()) and that
1132 # match the file filtering conditions.
1132 # match the file filtering conditions.
1133
1133
1134 if not slowpath and not match.files():
1134 if not slowpath and not match.files():
1135 # No files, no patterns. Display all revs.
1135 # No files, no patterns. Display all revs.
1136 wanted = set(revs)
1136 wanted = set(revs)
1137 copies = []
1137 copies = []
1138
1138
1139 if not slowpath:
1139 if not slowpath:
1140 # We only have to read through the filelog to find wanted revisions
1140 # We only have to read through the filelog to find wanted revisions
1141
1141
1142 minrev, maxrev = min(revs), max(revs)
1142 minrev, maxrev = min(revs), max(revs)
1143 def filerevgen(filelog, last):
1143 def filerevgen(filelog, last):
1144 """
1144 """
1145 Only files, no patterns. Check the history of each file.
1145 Only files, no patterns. Check the history of each file.
1146
1146
1147 Examines filelog entries within minrev, maxrev linkrev range
1147 Examines filelog entries within minrev, maxrev linkrev range
1148 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1148 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1149 tuples in backwards order
1149 tuples in backwards order
1150 """
1150 """
1151 cl_count = len(repo)
1151 cl_count = len(repo)
1152 revs = []
1152 revs = []
1153 for j in xrange(0, last + 1):
1153 for j in xrange(0, last + 1):
1154 linkrev = filelog.linkrev(j)
1154 linkrev = filelog.linkrev(j)
1155 if linkrev < minrev:
1155 if linkrev < minrev:
1156 continue
1156 continue
1157 # only yield rev for which we have the changelog, it can
1157 # only yield rev for which we have the changelog, it can
1158 # happen while doing "hg log" during a pull or commit
1158 # happen while doing "hg log" during a pull or commit
1159 if linkrev >= cl_count:
1159 if linkrev >= cl_count:
1160 break
1160 break
1161
1161
1162 parentlinkrevs = []
1162 parentlinkrevs = []
1163 for p in filelog.parentrevs(j):
1163 for p in filelog.parentrevs(j):
1164 if p != nullrev:
1164 if p != nullrev:
1165 parentlinkrevs.append(filelog.linkrev(p))
1165 parentlinkrevs.append(filelog.linkrev(p))
1166 n = filelog.node(j)
1166 n = filelog.node(j)
1167 revs.append((linkrev, parentlinkrevs,
1167 revs.append((linkrev, parentlinkrevs,
1168 follow and filelog.renamed(n)))
1168 follow and filelog.renamed(n)))
1169
1169
1170 return reversed(revs)
1170 return reversed(revs)
1171 def iterfiles():
1171 def iterfiles():
1172 for filename in match.files():
1172 for filename in match.files():
1173 yield filename, None
1173 yield filename, None
1174 for filename_node in copies:
1174 for filename_node in copies:
1175 yield filename_node
1175 yield filename_node
1176 for file_, node in iterfiles():
1176 for file_, node in iterfiles():
1177 filelog = repo.file(file_)
1177 filelog = repo.file(file_)
1178 if not len(filelog):
1178 if not len(filelog):
1179 if node is None:
1179 if node is None:
1180 # A zero count may be a directory or deleted file, so
1180 # A zero count may be a directory or deleted file, so
1181 # try to find matching entries on the slow path.
1181 # try to find matching entries on the slow path.
1182 if follow:
1182 if follow:
1183 raise util.Abort(
1183 raise util.Abort(
1184 _('cannot follow nonexistent file: "%s"') % file_)
1184 _('cannot follow nonexistent file: "%s"') % file_)
1185 slowpath = True
1185 slowpath = True
1186 break
1186 break
1187 else:
1187 else:
1188 continue
1188 continue
1189
1189
1190 if node is None:
1190 if node is None:
1191 last = len(filelog) - 1
1191 last = len(filelog) - 1
1192 else:
1192 else:
1193 last = filelog.rev(node)
1193 last = filelog.rev(node)
1194
1194
1195
1195
1196 # keep track of all ancestors of the file
1196 # keep track of all ancestors of the file
1197 ancestors = set([filelog.linkrev(last)])
1197 ancestors = set([filelog.linkrev(last)])
1198
1198
1199 # iterate from latest to oldest revision
1199 # iterate from latest to oldest revision
1200 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1200 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1201 if not follow:
1201 if not follow:
1202 if rev > maxrev:
1202 if rev > maxrev:
1203 continue
1203 continue
1204 else:
1204 else:
1205 # Note that last might not be the first interesting
1205 # Note that last might not be the first interesting
1206 # rev to us:
1206 # rev to us:
1207 # if the file has been changed after maxrev, we'll
1207 # if the file has been changed after maxrev, we'll
1208 # have linkrev(last) > maxrev, and we still need
1208 # have linkrev(last) > maxrev, and we still need
1209 # to explore the file graph
1209 # to explore the file graph
1210 if rev not in ancestors:
1210 if rev not in ancestors:
1211 continue
1211 continue
1212 # XXX insert 1327 fix here
1212 # XXX insert 1327 fix here
1213 if flparentlinkrevs:
1213 if flparentlinkrevs:
1214 ancestors.update(flparentlinkrevs)
1214 ancestors.update(flparentlinkrevs)
1215
1215
1216 fncache.setdefault(rev, []).append(file_)
1216 fncache.setdefault(rev, []).append(file_)
1217 wanted.add(rev)
1217 wanted.add(rev)
1218 if copied:
1218 if copied:
1219 copies.append(copied)
1219 copies.append(copied)
1220 if slowpath:
1220 if slowpath:
1221 # We have to read the changelog to match filenames against
1221 # We have to read the changelog to match filenames against
1222 # changed files
1222 # changed files
1223
1223
1224 if follow:
1224 if follow:
1225 raise util.Abort(_('can only follow copies/renames for explicit '
1225 raise util.Abort(_('can only follow copies/renames for explicit '
1226 'filenames'))
1226 'filenames'))
1227
1227
1228 # The slow path checks files modified in every changeset.
1228 # The slow path checks files modified in every changeset.
1229 for i in sorted(revs):
1229 for i in sorted(revs):
1230 ctx = change(i)
1230 ctx = change(i)
1231 matches = filter(match, ctx.files())
1231 matches = filter(match, ctx.files())
1232 if matches:
1232 if matches:
1233 fncache[i] = matches
1233 fncache[i] = matches
1234 wanted.add(i)
1234 wanted.add(i)
1235
1235
1236 class followfilter(object):
1236 class followfilter(object):
1237 def __init__(self, onlyfirst=False):
1237 def __init__(self, onlyfirst=False):
1238 self.startrev = nullrev
1238 self.startrev = nullrev
1239 self.roots = set()
1239 self.roots = set()
1240 self.onlyfirst = onlyfirst
1240 self.onlyfirst = onlyfirst
1241
1241
1242 def match(self, rev):
1242 def match(self, rev):
1243 def realparents(rev):
1243 def realparents(rev):
1244 if self.onlyfirst:
1244 if self.onlyfirst:
1245 return repo.changelog.parentrevs(rev)[0:1]
1245 return repo.changelog.parentrevs(rev)[0:1]
1246 else:
1246 else:
1247 return filter(lambda x: x != nullrev,
1247 return filter(lambda x: x != nullrev,
1248 repo.changelog.parentrevs(rev))
1248 repo.changelog.parentrevs(rev))
1249
1249
1250 if self.startrev == nullrev:
1250 if self.startrev == nullrev:
1251 self.startrev = rev
1251 self.startrev = rev
1252 return True
1252 return True
1253
1253
1254 if rev > self.startrev:
1254 if rev > self.startrev:
1255 # forward: all descendants
1255 # forward: all descendants
1256 if not self.roots:
1256 if not self.roots:
1257 self.roots.add(self.startrev)
1257 self.roots.add(self.startrev)
1258 for parent in realparents(rev):
1258 for parent in realparents(rev):
1259 if parent in self.roots:
1259 if parent in self.roots:
1260 self.roots.add(rev)
1260 self.roots.add(rev)
1261 return True
1261 return True
1262 else:
1262 else:
1263 # backwards: all parents
1263 # backwards: all parents
1264 if not self.roots:
1264 if not self.roots:
1265 self.roots.update(realparents(self.startrev))
1265 self.roots.update(realparents(self.startrev))
1266 if rev in self.roots:
1266 if rev in self.roots:
1267 self.roots.remove(rev)
1267 self.roots.remove(rev)
1268 self.roots.update(realparents(rev))
1268 self.roots.update(realparents(rev))
1269 return True
1269 return True
1270
1270
1271 return False
1271 return False
1272
1272
1273 # it might be worthwhile to do this in the iterator if the rev range
1273 # it might be worthwhile to do this in the iterator if the rev range
1274 # is descending and the prune args are all within that range
1274 # is descending and the prune args are all within that range
1275 for rev in opts.get('prune', ()):
1275 for rev in opts.get('prune', ()):
1276 rev = repo.changelog.rev(repo.lookup(rev))
1276 rev = repo.changelog.rev(repo.lookup(rev))
1277 ff = followfilter()
1277 ff = followfilter()
1278 stop = min(revs[0], revs[-1])
1278 stop = min(revs[0], revs[-1])
1279 for x in xrange(rev, stop - 1, -1):
1279 for x in xrange(rev, stop - 1, -1):
1280 if ff.match(x):
1280 if ff.match(x):
1281 wanted.discard(x)
1281 wanted.discard(x)
1282
1282
1283 # Now that wanted is correctly initialized, we can iterate over the
1283 # Now that wanted is correctly initialized, we can iterate over the
1284 # revision range, yielding only revisions in wanted.
1284 # revision range, yielding only revisions in wanted.
1285 def iterate():
1285 def iterate():
1286 if follow and not match.files():
1286 if follow and not match.files():
1287 ff = followfilter(onlyfirst=opts.get('follow_first'))
1287 ff = followfilter(onlyfirst=opts.get('follow_first'))
1288 def want(rev):
1288 def want(rev):
1289 return ff.match(rev) and rev in wanted
1289 return ff.match(rev) and rev in wanted
1290 else:
1290 else:
1291 def want(rev):
1291 def want(rev):
1292 return rev in wanted
1292 return rev in wanted
1293
1293
1294 for i, window in increasing_windows(0, len(revs)):
1294 for i, window in increasing_windows(0, len(revs)):
1295 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1295 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1296 for rev in sorted(nrevs):
1296 for rev in sorted(nrevs):
1297 fns = fncache.get(rev)
1297 fns = fncache.get(rev)
1298 ctx = change(rev)
1298 ctx = change(rev)
1299 if not fns:
1299 if not fns:
1300 def fns_generator():
1300 def fns_generator():
1301 for f in ctx.files():
1301 for f in ctx.files():
1302 if match(f):
1302 if match(f):
1303 yield f
1303 yield f
1304 fns = fns_generator()
1304 fns = fns_generator()
1305 prepare(ctx, fns)
1305 prepare(ctx, fns)
1306 for rev in nrevs:
1306 for rev in nrevs:
1307 yield change(rev)
1307 yield change(rev)
1308 return iterate()
1308 return iterate()
1309
1309
1310 def add(ui, repo, match, dryrun, listsubrepos, prefix):
1310 def add(ui, repo, match, dryrun, listsubrepos, prefix):
1311 join = lambda f: os.path.join(prefix, f)
1311 join = lambda f: os.path.join(prefix, f)
1312 bad = []
1312 bad = []
1313 oldbad = match.bad
1313 oldbad = match.bad
1314 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1314 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1315 names = []
1315 names = []
1316 wctx = repo[None]
1316 wctx = repo[None]
1317 cca = None
1317 cca = None
1318 abort, warn = scmutil.checkportabilityalert(ui)
1318 abort, warn = scmutil.checkportabilityalert(ui)
1319 if abort or warn:
1319 if abort or warn:
1320 cca = scmutil.casecollisionauditor(ui, abort, wctx)
1320 cca = scmutil.casecollisionauditor(ui, abort, wctx)
1321 for f in repo.walk(match):
1321 for f in repo.walk(match):
1322 exact = match.exact(f)
1322 exact = match.exact(f)
1323 if exact or f not in repo.dirstate:
1323 if exact or f not in repo.dirstate:
1324 if cca:
1324 if cca:
1325 cca(f)
1325 cca(f)
1326 names.append(f)
1326 names.append(f)
1327 if ui.verbose or not exact:
1327 if ui.verbose or not exact:
1328 ui.status(_('adding %s\n') % match.rel(join(f)))
1328 ui.status(_('adding %s\n') % match.rel(join(f)))
1329
1329
1330 if listsubrepos:
1330 if listsubrepos:
1331 for subpath in wctx.substate:
1331 for subpath in wctx.substate:
1332 sub = wctx.sub(subpath)
1332 sub = wctx.sub(subpath)
1333 try:
1333 try:
1334 submatch = matchmod.narrowmatcher(subpath, match)
1334 submatch = matchmod.narrowmatcher(subpath, match)
1335 bad.extend(sub.add(ui, submatch, dryrun, prefix))
1335 bad.extend(sub.add(ui, submatch, dryrun, prefix))
1336 except error.LookupError:
1336 except error.LookupError:
1337 ui.status(_("skipping missing subrepository: %s\n")
1337 ui.status(_("skipping missing subrepository: %s\n")
1338 % join(subpath))
1338 % join(subpath))
1339
1339
1340 if not dryrun:
1340 if not dryrun:
1341 rejected = wctx.add(names, prefix)
1341 rejected = wctx.add(names, prefix)
1342 bad.extend(f for f in rejected if f in match.files())
1342 bad.extend(f for f in rejected if f in match.files())
1343 return bad
1343 return bad
1344
1344
1345 def commit(ui, repo, commitfunc, pats, opts):
1345 def commit(ui, repo, commitfunc, pats, opts):
1346 '''commit the specified files or all outstanding changes'''
1346 '''commit the specified files or all outstanding changes'''
1347 date = opts.get('date')
1347 date = opts.get('date')
1348 if date:
1348 if date:
1349 opts['date'] = util.parsedate(date)
1349 opts['date'] = util.parsedate(date)
1350 message = logmessage(opts)
1350 message = logmessage(opts)
1351
1351
1352 # extract addremove carefully -- this function can be called from a command
1352 # extract addremove carefully -- this function can be called from a command
1353 # that doesn't support addremove
1353 # that doesn't support addremove
1354 if opts.get('addremove'):
1354 if opts.get('addremove'):
1355 addremove(repo, pats, opts)
1355 addremove(repo, pats, opts)
1356
1356
1357 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1357 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1358
1358
1359 def commiteditor(repo, ctx, subs):
1359 def commiteditor(repo, ctx, subs):
1360 if ctx.description():
1360 if ctx.description():
1361 return ctx.description()
1361 return ctx.description()
1362 return commitforceeditor(repo, ctx, subs)
1362 return commitforceeditor(repo, ctx, subs)
1363
1363
1364 def commitforceeditor(repo, ctx, subs):
1364 def commitforceeditor(repo, ctx, subs):
1365 edittext = []
1365 edittext = []
1366 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1366 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1367 if ctx.description():
1367 if ctx.description():
1368 edittext.append(ctx.description())
1368 edittext.append(ctx.description())
1369 edittext.append("")
1369 edittext.append("")
1370 edittext.append("") # Empty line between message and comments.
1370 edittext.append("") # Empty line between message and comments.
1371 edittext.append(_("HG: Enter commit message."
1371 edittext.append(_("HG: Enter commit message."
1372 " Lines beginning with 'HG:' are removed."))
1372 " Lines beginning with 'HG:' are removed."))
1373 edittext.append(_("HG: Leave message empty to abort commit."))
1373 edittext.append(_("HG: Leave message empty to abort commit."))
1374 edittext.append("HG: --")
1374 edittext.append("HG: --")
1375 edittext.append(_("HG: user: %s") % ctx.user())
1375 edittext.append(_("HG: user: %s") % ctx.user())
1376 if ctx.p2():
1376 if ctx.p2():
1377 edittext.append(_("HG: branch merge"))
1377 edittext.append(_("HG: branch merge"))
1378 if ctx.branch():
1378 if ctx.branch():
1379 edittext.append(_("HG: branch '%s'") % ctx.branch())
1379 edittext.append(_("HG: branch '%s'") % ctx.branch())
1380 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1380 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1381 edittext.extend([_("HG: added %s") % f for f in added])
1381 edittext.extend([_("HG: added %s") % f for f in added])
1382 edittext.extend([_("HG: changed %s") % f for f in modified])
1382 edittext.extend([_("HG: changed %s") % f for f in modified])
1383 edittext.extend([_("HG: removed %s") % f for f in removed])
1383 edittext.extend([_("HG: removed %s") % f for f in removed])
1384 if not added and not modified and not removed:
1384 if not added and not modified and not removed:
1385 edittext.append(_("HG: no files changed"))
1385 edittext.append(_("HG: no files changed"))
1386 edittext.append("")
1386 edittext.append("")
1387 # run editor in the repository root
1387 # run editor in the repository root
1388 olddir = os.getcwd()
1388 olddir = os.getcwd()
1389 os.chdir(repo.root)
1389 os.chdir(repo.root)
1390 text = repo.ui.edit("\n".join(edittext), ctx.user())
1390 text = repo.ui.edit("\n".join(edittext), ctx.user())
1391 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1391 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1392 os.chdir(olddir)
1392 os.chdir(olddir)
1393
1393
1394 if not text.strip():
1394 if not text.strip():
1395 raise util.Abort(_("empty commit message"))
1395 raise util.Abort(_("empty commit message"))
1396
1396
1397 return text
1397 return text
@@ -1,1957 +1,1957 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error
13 import scmutil, util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 'known', 'getbundle'))
23 'known', 'getbundle'))
24 supportedformats = set(('revlogv1',))
24 supportedformats = set(('revlogv1',))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.opener = scmutil.opener(self.path)
34 self.opener = scmutil.opener(self.path)
35 self.wopener = scmutil.opener(self.root)
35 self.wopener = scmutil.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 util.makedir(self.path, notindexed=True)
49 util.makedir(self.path, notindexed=True)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener.append(
59 self.opener.append(
60 "00changelog.i",
60 "00changelog.i",
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 else:
64 else:
65 raise error.RepoError(_("repository %s not found") % path)
65 raise error.RepoError(_("repository %s not found") % path)
66 elif create:
66 elif create:
67 raise error.RepoError(_("repository %s already exists") % path)
67 raise error.RepoError(_("repository %s already exists") % path)
68 else:
68 else:
69 # find requirements
69 # find requirements
70 requirements = set()
70 requirements = set()
71 try:
71 try:
72 requirements = set(self.opener.read("requires").splitlines())
72 requirements = set(self.opener.read("requires").splitlines())
73 except IOError, inst:
73 except IOError, inst:
74 if inst.errno != errno.ENOENT:
74 if inst.errno != errno.ENOENT:
75 raise
75 raise
76 for r in requirements - self.supported:
76 for r in requirements - self.supported:
77 raise error.RequirementError(
77 raise error.RequirementError(
78 _("requirement '%s' not supported") % r)
78 _("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener.read("sharedpath"))
82 s = os.path.realpath(self.opener.read("sharedpath"))
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None
108 self._branchcache = None
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118
118
119 def _writerequirements(self):
119 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
120 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
121 for r in self.requirements:
122 reqfile.write("%s\n" % r)
122 reqfile.write("%s\n" % r)
123 reqfile.close()
123 reqfile.close()
124
124
125 def _checknested(self, path):
125 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
126 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
127 if not path.startswith(self.root):
128 return False
128 return False
129 subpath = path[len(self.root) + 1:]
129 subpath = path[len(self.root) + 1:]
130
130
131 # XXX: Checking against the current working copy is wrong in
131 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
132 # the sense that it can reject things like
133 #
133 #
134 # $ hg cat -r 10 sub/x.txt
134 # $ hg cat -r 10 sub/x.txt
135 #
135 #
136 # if sub/ is no longer a subrepository in the working copy
136 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
137 # parent revision.
138 #
138 #
139 # However, it can of course also allow things that would have
139 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
140 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
141 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
142 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
143 # panics when it sees sub/.hg/.
144 #
144 #
145 # All in all, checking against the working copy seems sensible
145 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
146 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
147 # the filesystem *now*.
148 ctx = self[None]
148 ctx = self[None]
149 parts = util.splitpath(subpath)
149 parts = util.splitpath(subpath)
150 while parts:
150 while parts:
151 prefix = os.sep.join(parts)
151 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
152 if prefix in ctx.substate:
153 if prefix == subpath:
153 if prefix == subpath:
154 return True
154 return True
155 else:
155 else:
156 sub = ctx.sub(prefix)
156 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
157 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
158 else:
159 parts.pop()
159 parts.pop()
160 return False
160 return False
161
161
162 @util.propertycache
162 @util.propertycache
163 def _bookmarks(self):
163 def _bookmarks(self):
164 return bookmarks.read(self)
164 return bookmarks.read(self)
165
165
166 @util.propertycache
166 @util.propertycache
167 def _bookmarkcurrent(self):
167 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
168 return bookmarks.readcurrent(self)
169
169
170 @propertycache
170 @propertycache
171 def changelog(self):
171 def changelog(self):
172 c = changelog.changelog(self.sopener)
172 c = changelog.changelog(self.sopener)
173 if 'HG_PENDING' in os.environ:
173 if 'HG_PENDING' in os.environ:
174 p = os.environ['HG_PENDING']
174 p = os.environ['HG_PENDING']
175 if p.startswith(self.root):
175 if p.startswith(self.root):
176 c.readpending('00changelog.i.a')
176 c.readpending('00changelog.i.a')
177 self.sopener.options['defversion'] = c.version
177 self.sopener.options['defversion'] = c.version
178 return c
178 return c
179
179
180 @propertycache
180 @propertycache
181 def manifest(self):
181 def manifest(self):
182 return manifest.manifest(self.sopener)
182 return manifest.manifest(self.sopener)
183
183
184 @propertycache
184 @propertycache
185 def dirstate(self):
185 def dirstate(self):
186 warned = [0]
186 warned = [0]
187 def validate(node):
187 def validate(node):
188 try:
188 try:
189 self.changelog.rev(node)
189 self.changelog.rev(node)
190 return node
190 return node
191 except error.LookupError:
191 except error.LookupError:
192 if not warned[0]:
192 if not warned[0]:
193 warned[0] = True
193 warned[0] = True
194 self.ui.warn(_("warning: ignoring unknown"
194 self.ui.warn(_("warning: ignoring unknown"
195 " working parent %s!\n") % short(node))
195 " working parent %s!\n") % short(node))
196 return nullid
196 return nullid
197
197
198 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
199
199
200 def __getitem__(self, changeid):
200 def __getitem__(self, changeid):
201 if changeid is None:
201 if changeid is None:
202 return context.workingctx(self)
202 return context.workingctx(self)
203 return context.changectx(self, changeid)
203 return context.changectx(self, changeid)
204
204
205 def __contains__(self, changeid):
205 def __contains__(self, changeid):
206 try:
206 try:
207 return bool(self.lookup(changeid))
207 return bool(self.lookup(changeid))
208 except error.RepoLookupError:
208 except error.RepoLookupError:
209 return False
209 return False
210
210
211 def __nonzero__(self):
211 def __nonzero__(self):
212 return True
212 return True
213
213
214 def __len__(self):
214 def __len__(self):
215 return len(self.changelog)
215 return len(self.changelog)
216
216
217 def __iter__(self):
217 def __iter__(self):
218 for i in xrange(len(self)):
218 for i in xrange(len(self)):
219 yield i
219 yield i
220
220
221 def url(self):
221 def url(self):
222 return 'file:' + self.root
222 return 'file:' + self.root
223
223
224 def hook(self, name, throw=False, **args):
224 def hook(self, name, throw=False, **args):
225 return hook.hook(self.ui, self, name, throw, **args)
225 return hook.hook(self.ui, self, name, throw, **args)
226
226
227 tag_disallowed = ':\r\n'
227 tag_disallowed = ':\r\n'
228
228
229 def _tag(self, names, node, message, local, user, date, extra={}):
229 def _tag(self, names, node, message, local, user, date, extra={}):
230 if isinstance(names, str):
230 if isinstance(names, str):
231 allchars = names
231 allchars = names
232 names = (names,)
232 names = (names,)
233 else:
233 else:
234 allchars = ''.join(names)
234 allchars = ''.join(names)
235 for c in self.tag_disallowed:
235 for c in self.tag_disallowed:
236 if c in allchars:
236 if c in allchars:
237 raise util.Abort(_('%r cannot be used in a tag name') % c)
237 raise util.Abort(_('%r cannot be used in a tag name') % c)
238
238
239 branches = self.branchmap()
239 branches = self.branchmap()
240 for name in names:
240 for name in names:
241 self.hook('pretag', throw=True, node=hex(node), tag=name,
241 self.hook('pretag', throw=True, node=hex(node), tag=name,
242 local=local)
242 local=local)
243 if name in branches:
243 if name in branches:
244 self.ui.warn(_("warning: tag %s conflicts with existing"
244 self.ui.warn(_("warning: tag %s conflicts with existing"
245 " branch name\n") % name)
245 " branch name\n") % name)
246
246
247 def writetags(fp, names, munge, prevtags):
247 def writetags(fp, names, munge, prevtags):
248 fp.seek(0, 2)
248 fp.seek(0, 2)
249 if prevtags and prevtags[-1] != '\n':
249 if prevtags and prevtags[-1] != '\n':
250 fp.write('\n')
250 fp.write('\n')
251 for name in names:
251 for name in names:
252 m = munge and munge(name) or name
252 m = munge and munge(name) or name
253 if self._tagtypes and name in self._tagtypes:
253 if self._tagtypes and name in self._tagtypes:
254 old = self._tags.get(name, nullid)
254 old = self._tags.get(name, nullid)
255 fp.write('%s %s\n' % (hex(old), m))
255 fp.write('%s %s\n' % (hex(old), m))
256 fp.write('%s %s\n' % (hex(node), m))
256 fp.write('%s %s\n' % (hex(node), m))
257 fp.close()
257 fp.close()
258
258
259 prevtags = ''
259 prevtags = ''
260 if local:
260 if local:
261 try:
261 try:
262 fp = self.opener('localtags', 'r+')
262 fp = self.opener('localtags', 'r+')
263 except IOError:
263 except IOError:
264 fp = self.opener('localtags', 'a')
264 fp = self.opener('localtags', 'a')
265 else:
265 else:
266 prevtags = fp.read()
266 prevtags = fp.read()
267
267
268 # local tags are stored in the current charset
268 # local tags are stored in the current charset
269 writetags(fp, names, None, prevtags)
269 writetags(fp, names, None, prevtags)
270 for name in names:
270 for name in names:
271 self.hook('tag', node=hex(node), tag=name, local=local)
271 self.hook('tag', node=hex(node), tag=name, local=local)
272 return
272 return
273
273
274 try:
274 try:
275 fp = self.wfile('.hgtags', 'rb+')
275 fp = self.wfile('.hgtags', 'rb+')
276 except IOError:
276 except IOError:
277 fp = self.wfile('.hgtags', 'ab')
277 fp = self.wfile('.hgtags', 'ab')
278 else:
278 else:
279 prevtags = fp.read()
279 prevtags = fp.read()
280
280
281 # committed tags are stored in UTF-8
281 # committed tags are stored in UTF-8
282 writetags(fp, names, encoding.fromlocal, prevtags)
282 writetags(fp, names, encoding.fromlocal, prevtags)
283
283
284 fp.close()
284 fp.close()
285
285
286 if '.hgtags' not in self.dirstate:
286 if '.hgtags' not in self.dirstate:
287 self[None].add(['.hgtags'])
287 self[None].add(['.hgtags'])
288
288
289 m = matchmod.exact(self.root, '', ['.hgtags'])
289 m = matchmod.exact(self.root, '', ['.hgtags'])
290 tagnode = self.commit(message, user, date, extra=extra, match=m)
290 tagnode = self.commit(message, user, date, extra=extra, match=m)
291
291
292 for name in names:
292 for name in names:
293 self.hook('tag', node=hex(node), tag=name, local=local)
293 self.hook('tag', node=hex(node), tag=name, local=local)
294
294
295 return tagnode
295 return tagnode
296
296
297 def tag(self, names, node, message, local, user, date):
297 def tag(self, names, node, message, local, user, date):
298 '''tag a revision with one or more symbolic names.
298 '''tag a revision with one or more symbolic names.
299
299
300 names is a list of strings or, when adding a single tag, names may be a
300 names is a list of strings or, when adding a single tag, names may be a
301 string.
301 string.
302
302
303 if local is True, the tags are stored in a per-repository file.
303 if local is True, the tags are stored in a per-repository file.
304 otherwise, they are stored in the .hgtags file, and a new
304 otherwise, they are stored in the .hgtags file, and a new
305 changeset is committed with the change.
305 changeset is committed with the change.
306
306
307 keyword arguments:
307 keyword arguments:
308
308
309 local: whether to store tags in non-version-controlled file
309 local: whether to store tags in non-version-controlled file
310 (default False)
310 (default False)
311
311
312 message: commit message to use if committing
312 message: commit message to use if committing
313
313
314 user: name of user to use if committing
314 user: name of user to use if committing
315
315
316 date: date tuple to use if committing'''
316 date: date tuple to use if committing'''
317
317
318 if not local:
318 if not local:
319 for x in self.status()[:5]:
319 for x in self.status()[:5]:
320 if '.hgtags' in x:
320 if '.hgtags' in x:
321 raise util.Abort(_('working copy of .hgtags is changed '
321 raise util.Abort(_('working copy of .hgtags is changed '
322 '(please commit .hgtags manually)'))
322 '(please commit .hgtags manually)'))
323
323
324 self.tags() # instantiate the cache
324 self.tags() # instantiate the cache
325 self._tag(names, node, message, local, user, date)
325 self._tag(names, node, message, local, user, date)
326
326
327 def tags(self):
327 def tags(self):
328 '''return a mapping of tag to node'''
328 '''return a mapping of tag to node'''
329 if self._tags is None:
329 if self._tags is None:
330 (self._tags, self._tagtypes) = self._findtags()
330 (self._tags, self._tagtypes) = self._findtags()
331
331
332 return self._tags
332 return self._tags
333
333
334 def _findtags(self):
334 def _findtags(self):
335 '''Do the hard work of finding tags. Return a pair of dicts
335 '''Do the hard work of finding tags. Return a pair of dicts
336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
337 maps tag name to a string like \'global\' or \'local\'.
337 maps tag name to a string like \'global\' or \'local\'.
338 Subclasses or extensions are free to add their own tags, but
338 Subclasses or extensions are free to add their own tags, but
339 should be aware that the returned dicts will be retained for the
339 should be aware that the returned dicts will be retained for the
340 duration of the localrepo object.'''
340 duration of the localrepo object.'''
341
341
342 # XXX what tagtype should subclasses/extensions use? Currently
342 # XXX what tagtype should subclasses/extensions use? Currently
343 # mq and bookmarks add tags, but do not set the tagtype at all.
343 # mq and bookmarks add tags, but do not set the tagtype at all.
344 # Should each extension invent its own tag type? Should there
344 # Should each extension invent its own tag type? Should there
345 # be one tagtype for all such "virtual" tags? Or is the status
345 # be one tagtype for all such "virtual" tags? Or is the status
346 # quo fine?
346 # quo fine?
347
347
348 alltags = {} # map tag name to (node, hist)
348 alltags = {} # map tag name to (node, hist)
349 tagtypes = {}
349 tagtypes = {}
350
350
351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
353
353
354 # Build the return dicts. Have to re-encode tag names because
354 # Build the return dicts. Have to re-encode tag names because
355 # the tags module always uses UTF-8 (in order not to lose info
355 # the tags module always uses UTF-8 (in order not to lose info
356 # writing to the cache), but the rest of Mercurial wants them in
356 # writing to the cache), but the rest of Mercurial wants them in
357 # local encoding.
357 # local encoding.
358 tags = {}
358 tags = {}
359 for (name, (node, hist)) in alltags.iteritems():
359 for (name, (node, hist)) in alltags.iteritems():
360 if node != nullid:
360 if node != nullid:
361 try:
361 try:
362 # ignore tags to unknown nodes
362 # ignore tags to unknown nodes
363 self.changelog.lookup(node)
363 self.changelog.lookup(node)
364 tags[encoding.tolocal(name)] = node
364 tags[encoding.tolocal(name)] = node
365 except error.LookupError:
365 except error.LookupError:
366 pass
366 pass
367 tags['tip'] = self.changelog.tip()
367 tags['tip'] = self.changelog.tip()
368 tagtypes = dict([(encoding.tolocal(name), value)
368 tagtypes = dict([(encoding.tolocal(name), value)
369 for (name, value) in tagtypes.iteritems()])
369 for (name, value) in tagtypes.iteritems()])
370 return (tags, tagtypes)
370 return (tags, tagtypes)
371
371
372 def tagtype(self, tagname):
372 def tagtype(self, tagname):
373 '''
373 '''
374 return the type of the given tag. result can be:
374 return the type of the given tag. result can be:
375
375
376 'local' : a local tag
376 'local' : a local tag
377 'global' : a global tag
377 'global' : a global tag
378 None : tag does not exist
378 None : tag does not exist
379 '''
379 '''
380
380
381 self.tags()
381 self.tags()
382
382
383 return self._tagtypes.get(tagname)
383 return self._tagtypes.get(tagname)
384
384
385 def tagslist(self):
385 def tagslist(self):
386 '''return a list of tags ordered by revision'''
386 '''return a list of tags ordered by revision'''
387 l = []
387 l = []
388 for t, n in self.tags().iteritems():
388 for t, n in self.tags().iteritems():
389 r = self.changelog.rev(n)
389 r = self.changelog.rev(n)
390 l.append((r, t, n))
390 l.append((r, t, n))
391 return [(t, n) for r, t, n in sorted(l)]
391 return [(t, n) for r, t, n in sorted(l)]
392
392
393 def nodetags(self, node):
393 def nodetags(self, node):
394 '''return the tags associated with a node'''
394 '''return the tags associated with a node'''
395 if not self.nodetagscache:
395 if not self.nodetagscache:
396 self.nodetagscache = {}
396 self.nodetagscache = {}
397 for t, n in self.tags().iteritems():
397 for t, n in self.tags().iteritems():
398 self.nodetagscache.setdefault(n, []).append(t)
398 self.nodetagscache.setdefault(n, []).append(t)
399 for tags in self.nodetagscache.itervalues():
399 for tags in self.nodetagscache.itervalues():
400 tags.sort()
400 tags.sort()
401 return self.nodetagscache.get(node, [])
401 return self.nodetagscache.get(node, [])
402
402
403 def nodebookmarks(self, node):
403 def nodebookmarks(self, node):
404 marks = []
404 marks = []
405 for bookmark, n in self._bookmarks.iteritems():
405 for bookmark, n in self._bookmarks.iteritems():
406 if n == node:
406 if n == node:
407 marks.append(bookmark)
407 marks.append(bookmark)
408 return sorted(marks)
408 return sorted(marks)
409
409
410 def _branchtags(self, partial, lrev):
410 def _branchtags(self, partial, lrev):
411 # TODO: rename this function?
411 # TODO: rename this function?
412 tiprev = len(self) - 1
412 tiprev = len(self) - 1
413 if lrev != tiprev:
413 if lrev != tiprev:
414 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
414 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
415 self._updatebranchcache(partial, ctxgen)
415 self._updatebranchcache(partial, ctxgen)
416 self._writebranchcache(partial, self.changelog.tip(), tiprev)
416 self._writebranchcache(partial, self.changelog.tip(), tiprev)
417
417
418 return partial
418 return partial
419
419
420 def updatebranchcache(self):
420 def updatebranchcache(self):
421 tip = self.changelog.tip()
421 tip = self.changelog.tip()
422 if self._branchcache is not None and self._branchcachetip == tip:
422 if self._branchcache is not None and self._branchcachetip == tip:
423 return self._branchcache
423 return self._branchcache
424
424
425 oldtip = self._branchcachetip
425 oldtip = self._branchcachetip
426 self._branchcachetip = tip
426 self._branchcachetip = tip
427 if oldtip is None or oldtip not in self.changelog.nodemap:
427 if oldtip is None or oldtip not in self.changelog.nodemap:
428 partial, last, lrev = self._readbranchcache()
428 partial, last, lrev = self._readbranchcache()
429 else:
429 else:
430 lrev = self.changelog.rev(oldtip)
430 lrev = self.changelog.rev(oldtip)
431 partial = self._branchcache
431 partial = self._branchcache
432
432
433 self._branchtags(partial, lrev)
433 self._branchtags(partial, lrev)
434 # this private cache holds all heads (not just tips)
434 # this private cache holds all heads (not just tips)
435 self._branchcache = partial
435 self._branchcache = partial
436
436
437 def branchmap(self):
437 def branchmap(self):
438 '''returns a dictionary {branch: [branchheads]}'''
438 '''returns a dictionary {branch: [branchheads]}'''
439 self.updatebranchcache()
439 self.updatebranchcache()
440 return self._branchcache
440 return self._branchcache
441
441
442 def branchtags(self):
442 def branchtags(self):
443 '''return a dict where branch names map to the tipmost head of
443 '''return a dict where branch names map to the tipmost head of
444 the branch, open heads come before closed'''
444 the branch, open heads come before closed'''
445 bt = {}
445 bt = {}
446 for bn, heads in self.branchmap().iteritems():
446 for bn, heads in self.branchmap().iteritems():
447 tip = heads[-1]
447 tip = heads[-1]
448 for h in reversed(heads):
448 for h in reversed(heads):
449 if 'close' not in self.changelog.read(h)[5]:
449 if 'close' not in self.changelog.read(h)[5]:
450 tip = h
450 tip = h
451 break
451 break
452 bt[bn] = tip
452 bt[bn] = tip
453 return bt
453 return bt
454
454
455 def _readbranchcache(self):
455 def _readbranchcache(self):
456 partial = {}
456 partial = {}
457 try:
457 try:
458 f = self.opener("cache/branchheads")
458 f = self.opener("cache/branchheads")
459 lines = f.read().split('\n')
459 lines = f.read().split('\n')
460 f.close()
460 f.close()
461 except (IOError, OSError):
461 except (IOError, OSError):
462 return {}, nullid, nullrev
462 return {}, nullid, nullrev
463
463
464 try:
464 try:
465 last, lrev = lines.pop(0).split(" ", 1)
465 last, lrev = lines.pop(0).split(" ", 1)
466 last, lrev = bin(last), int(lrev)
466 last, lrev = bin(last), int(lrev)
467 if lrev >= len(self) or self[lrev].node() != last:
467 if lrev >= len(self) or self[lrev].node() != last:
468 # invalidate the cache
468 # invalidate the cache
469 raise ValueError('invalidating branch cache (tip differs)')
469 raise ValueError('invalidating branch cache (tip differs)')
470 for l in lines:
470 for l in lines:
471 if not l:
471 if not l:
472 continue
472 continue
473 node, label = l.split(" ", 1)
473 node, label = l.split(" ", 1)
474 label = encoding.tolocal(label.strip())
474 label = encoding.tolocal(label.strip())
475 partial.setdefault(label, []).append(bin(node))
475 partial.setdefault(label, []).append(bin(node))
476 except KeyboardInterrupt:
476 except KeyboardInterrupt:
477 raise
477 raise
478 except Exception, inst:
478 except Exception, inst:
479 if self.ui.debugflag:
479 if self.ui.debugflag:
480 self.ui.warn(str(inst), '\n')
480 self.ui.warn(str(inst), '\n')
481 partial, last, lrev = {}, nullid, nullrev
481 partial, last, lrev = {}, nullid, nullrev
482 return partial, last, lrev
482 return partial, last, lrev
483
483
484 def _writebranchcache(self, branches, tip, tiprev):
484 def _writebranchcache(self, branches, tip, tiprev):
485 try:
485 try:
486 f = self.opener("cache/branchheads", "w", atomictemp=True)
486 f = self.opener("cache/branchheads", "w", atomictemp=True)
487 f.write("%s %s\n" % (hex(tip), tiprev))
487 f.write("%s %s\n" % (hex(tip), tiprev))
488 for label, nodes in branches.iteritems():
488 for label, nodes in branches.iteritems():
489 for node in nodes:
489 for node in nodes:
490 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
490 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
491 f.rename()
491 f.rename()
492 except (IOError, OSError):
492 except (IOError, OSError):
493 pass
493 pass
494
494
495 def _updatebranchcache(self, partial, ctxgen):
495 def _updatebranchcache(self, partial, ctxgen):
496 # collect new branch entries
496 # collect new branch entries
497 newbranches = {}
497 newbranches = {}
498 for c in ctxgen:
498 for c in ctxgen:
499 newbranches.setdefault(c.branch(), []).append(c.node())
499 newbranches.setdefault(c.branch(), []).append(c.node())
500 # if older branchheads are reachable from new ones, they aren't
500 # if older branchheads are reachable from new ones, they aren't
501 # really branchheads. Note checking parents is insufficient:
501 # really branchheads. Note checking parents is insufficient:
502 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
502 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
503 for branch, newnodes in newbranches.iteritems():
503 for branch, newnodes in newbranches.iteritems():
504 bheads = partial.setdefault(branch, [])
504 bheads = partial.setdefault(branch, [])
505 bheads.extend(newnodes)
505 bheads.extend(newnodes)
506 if len(bheads) <= 1:
506 if len(bheads) <= 1:
507 continue
507 continue
508 bheads = sorted(bheads, key=lambda x: self[x].rev())
508 bheads = sorted(bheads, key=lambda x: self[x].rev())
509 # starting from tip means fewer passes over reachable
509 # starting from tip means fewer passes over reachable
510 while newnodes:
510 while newnodes:
511 latest = newnodes.pop()
511 latest = newnodes.pop()
512 if latest not in bheads:
512 if latest not in bheads:
513 continue
513 continue
514 minbhrev = self[bheads[0]].node()
514 minbhrev = self[bheads[0]].node()
515 reachable = self.changelog.reachable(latest, minbhrev)
515 reachable = self.changelog.reachable(latest, minbhrev)
516 reachable.remove(latest)
516 reachable.remove(latest)
517 if reachable:
517 if reachable:
518 bheads = [b for b in bheads if b not in reachable]
518 bheads = [b for b in bheads if b not in reachable]
519 partial[branch] = bheads
519 partial[branch] = bheads
520
520
521 def lookup(self, key):
521 def lookup(self, key):
522 if isinstance(key, int):
522 if isinstance(key, int):
523 return self.changelog.node(key)
523 return self.changelog.node(key)
524 elif key == '.':
524 elif key == '.':
525 return self.dirstate.p1()
525 return self.dirstate.p1()
526 elif key == 'null':
526 elif key == 'null':
527 return nullid
527 return nullid
528 elif key == 'tip':
528 elif key == 'tip':
529 return self.changelog.tip()
529 return self.changelog.tip()
530 n = self.changelog._match(key)
530 n = self.changelog._match(key)
531 if n:
531 if n:
532 return n
532 return n
533 if key in self._bookmarks:
533 if key in self._bookmarks:
534 return self._bookmarks[key]
534 return self._bookmarks[key]
535 if key in self.tags():
535 if key in self.tags():
536 return self.tags()[key]
536 return self.tags()[key]
537 if key in self.branchtags():
537 if key in self.branchtags():
538 return self.branchtags()[key]
538 return self.branchtags()[key]
539 n = self.changelog._partialmatch(key)
539 n = self.changelog._partialmatch(key)
540 if n:
540 if n:
541 return n
541 return n
542
542
543 # can't find key, check if it might have come from damaged dirstate
543 # can't find key, check if it might have come from damaged dirstate
544 if key in self.dirstate.parents():
544 if key in self.dirstate.parents():
545 raise error.Abort(_("working directory has unknown parent '%s'!")
545 raise error.Abort(_("working directory has unknown parent '%s'!")
546 % short(key))
546 % short(key))
547 try:
547 try:
548 if len(key) == 20:
548 if len(key) == 20:
549 key = hex(key)
549 key = hex(key)
550 except TypeError:
550 except TypeError:
551 pass
551 pass
552 raise error.RepoLookupError(_("unknown revision '%s'") % key)
552 raise error.RepoLookupError(_("unknown revision '%s'") % key)
553
553
554 def lookupbranch(self, key, remote=None):
554 def lookupbranch(self, key, remote=None):
555 repo = remote or self
555 repo = remote or self
556 if key in repo.branchmap():
556 if key in repo.branchmap():
557 return key
557 return key
558
558
559 repo = (remote and remote.local()) and remote or self
559 repo = (remote and remote.local()) and remote or self
560 return repo[key].branch()
560 return repo[key].branch()
561
561
562 def known(self, nodes):
562 def known(self, nodes):
563 nm = self.changelog.nodemap
563 nm = self.changelog.nodemap
564 return [(n in nm) for n in nodes]
564 return [(n in nm) for n in nodes]
565
565
566 def local(self):
566 def local(self):
567 return True
567 return True
568
568
569 def join(self, f):
569 def join(self, f):
570 return os.path.join(self.path, f)
570 return os.path.join(self.path, f)
571
571
572 def wjoin(self, f):
572 def wjoin(self, f):
573 return os.path.join(self.root, f)
573 return os.path.join(self.root, f)
574
574
575 def file(self, f):
575 def file(self, f):
576 if f[0] == '/':
576 if f[0] == '/':
577 f = f[1:]
577 f = f[1:]
578 return filelog.filelog(self.sopener, f)
578 return filelog.filelog(self.sopener, f)
579
579
580 def changectx(self, changeid):
580 def changectx(self, changeid):
581 return self[changeid]
581 return self[changeid]
582
582
583 def parents(self, changeid=None):
583 def parents(self, changeid=None):
584 '''get list of changectxs for parents of changeid'''
584 '''get list of changectxs for parents of changeid'''
585 return self[changeid].parents()
585 return self[changeid].parents()
586
586
587 def filectx(self, path, changeid=None, fileid=None):
587 def filectx(self, path, changeid=None, fileid=None):
588 """changeid can be a changeset revision, node, or tag.
588 """changeid can be a changeset revision, node, or tag.
589 fileid can be a file revision or node."""
589 fileid can be a file revision or node."""
590 return context.filectx(self, path, changeid, fileid)
590 return context.filectx(self, path, changeid, fileid)
591
591
592 def getcwd(self):
592 def getcwd(self):
593 return self.dirstate.getcwd()
593 return self.dirstate.getcwd()
594
594
595 def pathto(self, f, cwd=None):
595 def pathto(self, f, cwd=None):
596 return self.dirstate.pathto(f, cwd)
596 return self.dirstate.pathto(f, cwd)
597
597
598 def wfile(self, f, mode='r'):
598 def wfile(self, f, mode='r'):
599 return self.wopener(f, mode)
599 return self.wopener(f, mode)
600
600
601 def _link(self, f):
601 def _link(self, f):
602 return os.path.islink(self.wjoin(f))
602 return os.path.islink(self.wjoin(f))
603
603
604 def _loadfilter(self, filter):
604 def _loadfilter(self, filter):
605 if filter not in self.filterpats:
605 if filter not in self.filterpats:
606 l = []
606 l = []
607 for pat, cmd in self.ui.configitems(filter):
607 for pat, cmd in self.ui.configitems(filter):
608 if cmd == '!':
608 if cmd == '!':
609 continue
609 continue
610 mf = matchmod.match(self.root, '', [pat])
610 mf = matchmod.match(self.root, '', [pat])
611 fn = None
611 fn = None
612 params = cmd
612 params = cmd
613 for name, filterfn in self._datafilters.iteritems():
613 for name, filterfn in self._datafilters.iteritems():
614 if cmd.startswith(name):
614 if cmd.startswith(name):
615 fn = filterfn
615 fn = filterfn
616 params = cmd[len(name):].lstrip()
616 params = cmd[len(name):].lstrip()
617 break
617 break
618 if not fn:
618 if not fn:
619 fn = lambda s, c, **kwargs: util.filter(s, c)
619 fn = lambda s, c, **kwargs: util.filter(s, c)
620 # Wrap old filters not supporting keyword arguments
620 # Wrap old filters not supporting keyword arguments
621 if not inspect.getargspec(fn)[2]:
621 if not inspect.getargspec(fn)[2]:
622 oldfn = fn
622 oldfn = fn
623 fn = lambda s, c, **kwargs: oldfn(s, c)
623 fn = lambda s, c, **kwargs: oldfn(s, c)
624 l.append((mf, fn, params))
624 l.append((mf, fn, params))
625 self.filterpats[filter] = l
625 self.filterpats[filter] = l
626 return self.filterpats[filter]
626 return self.filterpats[filter]
627
627
628 def _filter(self, filterpats, filename, data):
628 def _filter(self, filterpats, filename, data):
629 for mf, fn, cmd in filterpats:
629 for mf, fn, cmd in filterpats:
630 if mf(filename):
630 if mf(filename):
631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
633 break
633 break
634
634
635 return data
635 return data
636
636
637 @propertycache
637 @propertycache
638 def _encodefilterpats(self):
638 def _encodefilterpats(self):
639 return self._loadfilter('encode')
639 return self._loadfilter('encode')
640
640
641 @propertycache
641 @propertycache
642 def _decodefilterpats(self):
642 def _decodefilterpats(self):
643 return self._loadfilter('decode')
643 return self._loadfilter('decode')
644
644
645 def adddatafilter(self, name, filter):
645 def adddatafilter(self, name, filter):
646 self._datafilters[name] = filter
646 self._datafilters[name] = filter
647
647
648 def wread(self, filename):
648 def wread(self, filename):
649 if self._link(filename):
649 if self._link(filename):
650 data = os.readlink(self.wjoin(filename))
650 data = os.readlink(self.wjoin(filename))
651 else:
651 else:
652 data = self.wopener.read(filename)
652 data = self.wopener.read(filename)
653 return self._filter(self._encodefilterpats, filename, data)
653 return self._filter(self._encodefilterpats, filename, data)
654
654
655 def wwrite(self, filename, data, flags):
655 def wwrite(self, filename, data, flags):
656 data = self._filter(self._decodefilterpats, filename, data)
656 data = self._filter(self._decodefilterpats, filename, data)
657 if 'l' in flags:
657 if 'l' in flags:
658 self.wopener.symlink(data, filename)
658 self.wopener.symlink(data, filename)
659 else:
659 else:
660 self.wopener.write(filename, data)
660 self.wopener.write(filename, data)
661 if 'x' in flags:
661 if 'x' in flags:
662 util.set_flags(self.wjoin(filename), False, True)
662 util.setflags(self.wjoin(filename), False, True)
663
663
664 def wwritedata(self, filename, data):
664 def wwritedata(self, filename, data):
665 return self._filter(self._decodefilterpats, filename, data)
665 return self._filter(self._decodefilterpats, filename, data)
666
666
667 def transaction(self, desc):
667 def transaction(self, desc):
668 tr = self._transref and self._transref() or None
668 tr = self._transref and self._transref() or None
669 if tr and tr.running():
669 if tr and tr.running():
670 return tr.nest()
670 return tr.nest()
671
671
672 # abort here if the journal already exists
672 # abort here if the journal already exists
673 if os.path.exists(self.sjoin("journal")):
673 if os.path.exists(self.sjoin("journal")):
674 raise error.RepoError(
674 raise error.RepoError(
675 _("abandoned transaction found - run hg recover"))
675 _("abandoned transaction found - run hg recover"))
676
676
677 # save dirstate for rollback
677 # save dirstate for rollback
678 try:
678 try:
679 ds = self.opener.read("dirstate")
679 ds = self.opener.read("dirstate")
680 except IOError:
680 except IOError:
681 ds = ""
681 ds = ""
682 self.opener.write("journal.dirstate", ds)
682 self.opener.write("journal.dirstate", ds)
683 self.opener.write("journal.branch",
683 self.opener.write("journal.branch",
684 encoding.fromlocal(self.dirstate.branch()))
684 encoding.fromlocal(self.dirstate.branch()))
685 self.opener.write("journal.desc",
685 self.opener.write("journal.desc",
686 "%d\n%s\n" % (len(self), desc))
686 "%d\n%s\n" % (len(self), desc))
687
687
688 renames = [(self.sjoin("journal"), self.sjoin("undo")),
688 renames = [(self.sjoin("journal"), self.sjoin("undo")),
689 (self.join("journal.dirstate"), self.join("undo.dirstate")),
689 (self.join("journal.dirstate"), self.join("undo.dirstate")),
690 (self.join("journal.branch"), self.join("undo.branch")),
690 (self.join("journal.branch"), self.join("undo.branch")),
691 (self.join("journal.desc"), self.join("undo.desc"))]
691 (self.join("journal.desc"), self.join("undo.desc"))]
692 tr = transaction.transaction(self.ui.warn, self.sopener,
692 tr = transaction.transaction(self.ui.warn, self.sopener,
693 self.sjoin("journal"),
693 self.sjoin("journal"),
694 aftertrans(renames),
694 aftertrans(renames),
695 self.store.createmode)
695 self.store.createmode)
696 self._transref = weakref.ref(tr)
696 self._transref = weakref.ref(tr)
697 return tr
697 return tr
698
698
699 def recover(self):
699 def recover(self):
700 lock = self.lock()
700 lock = self.lock()
701 try:
701 try:
702 if os.path.exists(self.sjoin("journal")):
702 if os.path.exists(self.sjoin("journal")):
703 self.ui.status(_("rolling back interrupted transaction\n"))
703 self.ui.status(_("rolling back interrupted transaction\n"))
704 transaction.rollback(self.sopener, self.sjoin("journal"),
704 transaction.rollback(self.sopener, self.sjoin("journal"),
705 self.ui.warn)
705 self.ui.warn)
706 self.invalidate()
706 self.invalidate()
707 return True
707 return True
708 else:
708 else:
709 self.ui.warn(_("no interrupted transaction available\n"))
709 self.ui.warn(_("no interrupted transaction available\n"))
710 return False
710 return False
711 finally:
711 finally:
712 lock.release()
712 lock.release()
713
713
714 def rollback(self, dryrun=False):
714 def rollback(self, dryrun=False):
715 wlock = lock = None
715 wlock = lock = None
716 try:
716 try:
717 wlock = self.wlock()
717 wlock = self.wlock()
718 lock = self.lock()
718 lock = self.lock()
719 if os.path.exists(self.sjoin("undo")):
719 if os.path.exists(self.sjoin("undo")):
720 try:
720 try:
721 args = self.opener.read("undo.desc").splitlines()
721 args = self.opener.read("undo.desc").splitlines()
722 if len(args) >= 3 and self.ui.verbose:
722 if len(args) >= 3 and self.ui.verbose:
723 desc = _("repository tip rolled back to revision %s"
723 desc = _("repository tip rolled back to revision %s"
724 " (undo %s: %s)\n") % (
724 " (undo %s: %s)\n") % (
725 int(args[0]) - 1, args[1], args[2])
725 int(args[0]) - 1, args[1], args[2])
726 elif len(args) >= 2:
726 elif len(args) >= 2:
727 desc = _("repository tip rolled back to revision %s"
727 desc = _("repository tip rolled back to revision %s"
728 " (undo %s)\n") % (
728 " (undo %s)\n") % (
729 int(args[0]) - 1, args[1])
729 int(args[0]) - 1, args[1])
730 except IOError:
730 except IOError:
731 desc = _("rolling back unknown transaction\n")
731 desc = _("rolling back unknown transaction\n")
732 self.ui.status(desc)
732 self.ui.status(desc)
733 if dryrun:
733 if dryrun:
734 return
734 return
735 transaction.rollback(self.sopener, self.sjoin("undo"),
735 transaction.rollback(self.sopener, self.sjoin("undo"),
736 self.ui.warn)
736 self.ui.warn)
737 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
737 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
738 if os.path.exists(self.join('undo.bookmarks')):
738 if os.path.exists(self.join('undo.bookmarks')):
739 util.rename(self.join('undo.bookmarks'),
739 util.rename(self.join('undo.bookmarks'),
740 self.join('bookmarks'))
740 self.join('bookmarks'))
741 try:
741 try:
742 branch = self.opener.read("undo.branch")
742 branch = self.opener.read("undo.branch")
743 self.dirstate.setbranch(branch)
743 self.dirstate.setbranch(branch)
744 except IOError:
744 except IOError:
745 self.ui.warn(_("named branch could not be reset, "
745 self.ui.warn(_("named branch could not be reset, "
746 "current branch is still: %s\n")
746 "current branch is still: %s\n")
747 % self.dirstate.branch())
747 % self.dirstate.branch())
748 self.invalidate()
748 self.invalidate()
749 self.dirstate.invalidate()
749 self.dirstate.invalidate()
750 self.destroyed()
750 self.destroyed()
751 parents = tuple([p.rev() for p in self.parents()])
751 parents = tuple([p.rev() for p in self.parents()])
752 if len(parents) > 1:
752 if len(parents) > 1:
753 self.ui.status(_("working directory now based on "
753 self.ui.status(_("working directory now based on "
754 "revisions %d and %d\n") % parents)
754 "revisions %d and %d\n") % parents)
755 else:
755 else:
756 self.ui.status(_("working directory now based on "
756 self.ui.status(_("working directory now based on "
757 "revision %d\n") % parents)
757 "revision %d\n") % parents)
758 else:
758 else:
759 self.ui.warn(_("no rollback information available\n"))
759 self.ui.warn(_("no rollback information available\n"))
760 return 1
760 return 1
761 finally:
761 finally:
762 release(lock, wlock)
762 release(lock, wlock)
763
763
764 def invalidatecaches(self):
764 def invalidatecaches(self):
765 self._tags = None
765 self._tags = None
766 self._tagtypes = None
766 self._tagtypes = None
767 self.nodetagscache = None
767 self.nodetagscache = None
768 self._branchcache = None # in UTF-8
768 self._branchcache = None # in UTF-8
769 self._branchcachetip = None
769 self._branchcachetip = None
770
770
771 def invalidate(self):
771 def invalidate(self):
772 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
772 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
773 if a in self.__dict__:
773 if a in self.__dict__:
774 delattr(self, a)
774 delattr(self, a)
775 self.invalidatecaches()
775 self.invalidatecaches()
776
776
777 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
777 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
778 try:
778 try:
779 l = lock.lock(lockname, 0, releasefn, desc=desc)
779 l = lock.lock(lockname, 0, releasefn, desc=desc)
780 except error.LockHeld, inst:
780 except error.LockHeld, inst:
781 if not wait:
781 if not wait:
782 raise
782 raise
783 self.ui.warn(_("waiting for lock on %s held by %r\n") %
783 self.ui.warn(_("waiting for lock on %s held by %r\n") %
784 (desc, inst.locker))
784 (desc, inst.locker))
785 # default to 600 seconds timeout
785 # default to 600 seconds timeout
786 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
786 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
787 releasefn, desc=desc)
787 releasefn, desc=desc)
788 if acquirefn:
788 if acquirefn:
789 acquirefn()
789 acquirefn()
790 return l
790 return l
791
791
792 def lock(self, wait=True):
792 def lock(self, wait=True):
793 '''Lock the repository store (.hg/store) and return a weak reference
793 '''Lock the repository store (.hg/store) and return a weak reference
794 to the lock. Use this before modifying the store (e.g. committing or
794 to the lock. Use this before modifying the store (e.g. committing or
795 stripping). If you are opening a transaction, get a lock as well.)'''
795 stripping). If you are opening a transaction, get a lock as well.)'''
796 l = self._lockref and self._lockref()
796 l = self._lockref and self._lockref()
797 if l is not None and l.held:
797 if l is not None and l.held:
798 l.lock()
798 l.lock()
799 return l
799 return l
800
800
801 l = self._lock(self.sjoin("lock"), wait, self.store.write,
801 l = self._lock(self.sjoin("lock"), wait, self.store.write,
802 self.invalidate, _('repository %s') % self.origroot)
802 self.invalidate, _('repository %s') % self.origroot)
803 self._lockref = weakref.ref(l)
803 self._lockref = weakref.ref(l)
804 return l
804 return l
805
805
806 def wlock(self, wait=True):
806 def wlock(self, wait=True):
807 '''Lock the non-store parts of the repository (everything under
807 '''Lock the non-store parts of the repository (everything under
808 .hg except .hg/store) and return a weak reference to the lock.
808 .hg except .hg/store) and return a weak reference to the lock.
809 Use this before modifying files in .hg.'''
809 Use this before modifying files in .hg.'''
810 l = self._wlockref and self._wlockref()
810 l = self._wlockref and self._wlockref()
811 if l is not None and l.held:
811 if l is not None and l.held:
812 l.lock()
812 l.lock()
813 return l
813 return l
814
814
815 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
815 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
816 self.dirstate.invalidate, _('working directory of %s') %
816 self.dirstate.invalidate, _('working directory of %s') %
817 self.origroot)
817 self.origroot)
818 self._wlockref = weakref.ref(l)
818 self._wlockref = weakref.ref(l)
819 return l
819 return l
820
820
821 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
821 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
822 """
822 """
823 commit an individual file as part of a larger transaction
823 commit an individual file as part of a larger transaction
824 """
824 """
825
825
826 fname = fctx.path()
826 fname = fctx.path()
827 text = fctx.data()
827 text = fctx.data()
828 flog = self.file(fname)
828 flog = self.file(fname)
829 fparent1 = manifest1.get(fname, nullid)
829 fparent1 = manifest1.get(fname, nullid)
830 fparent2 = fparent2o = manifest2.get(fname, nullid)
830 fparent2 = fparent2o = manifest2.get(fname, nullid)
831
831
832 meta = {}
832 meta = {}
833 copy = fctx.renamed()
833 copy = fctx.renamed()
834 if copy and copy[0] != fname:
834 if copy and copy[0] != fname:
835 # Mark the new revision of this file as a copy of another
835 # Mark the new revision of this file as a copy of another
836 # file. This copy data will effectively act as a parent
836 # file. This copy data will effectively act as a parent
837 # of this new revision. If this is a merge, the first
837 # of this new revision. If this is a merge, the first
838 # parent will be the nullid (meaning "look up the copy data")
838 # parent will be the nullid (meaning "look up the copy data")
839 # and the second one will be the other parent. For example:
839 # and the second one will be the other parent. For example:
840 #
840 #
841 # 0 --- 1 --- 3 rev1 changes file foo
841 # 0 --- 1 --- 3 rev1 changes file foo
842 # \ / rev2 renames foo to bar and changes it
842 # \ / rev2 renames foo to bar and changes it
843 # \- 2 -/ rev3 should have bar with all changes and
843 # \- 2 -/ rev3 should have bar with all changes and
844 # should record that bar descends from
844 # should record that bar descends from
845 # bar in rev2 and foo in rev1
845 # bar in rev2 and foo in rev1
846 #
846 #
847 # this allows this merge to succeed:
847 # this allows this merge to succeed:
848 #
848 #
849 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
849 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
850 # \ / merging rev3 and rev4 should use bar@rev2
850 # \ / merging rev3 and rev4 should use bar@rev2
851 # \- 2 --- 4 as the merge base
851 # \- 2 --- 4 as the merge base
852 #
852 #
853
853
854 cfname = copy[0]
854 cfname = copy[0]
855 crev = manifest1.get(cfname)
855 crev = manifest1.get(cfname)
856 newfparent = fparent2
856 newfparent = fparent2
857
857
858 if manifest2: # branch merge
858 if manifest2: # branch merge
859 if fparent2 == nullid or crev is None: # copied on remote side
859 if fparent2 == nullid or crev is None: # copied on remote side
860 if cfname in manifest2:
860 if cfname in manifest2:
861 crev = manifest2[cfname]
861 crev = manifest2[cfname]
862 newfparent = fparent1
862 newfparent = fparent1
863
863
864 # find source in nearest ancestor if we've lost track
864 # find source in nearest ancestor if we've lost track
865 if not crev:
865 if not crev:
866 self.ui.debug(" %s: searching for copy revision for %s\n" %
866 self.ui.debug(" %s: searching for copy revision for %s\n" %
867 (fname, cfname))
867 (fname, cfname))
868 for ancestor in self[None].ancestors():
868 for ancestor in self[None].ancestors():
869 if cfname in ancestor:
869 if cfname in ancestor:
870 crev = ancestor[cfname].filenode()
870 crev = ancestor[cfname].filenode()
871 break
871 break
872
872
873 if crev:
873 if crev:
874 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
874 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
875 meta["copy"] = cfname
875 meta["copy"] = cfname
876 meta["copyrev"] = hex(crev)
876 meta["copyrev"] = hex(crev)
877 fparent1, fparent2 = nullid, newfparent
877 fparent1, fparent2 = nullid, newfparent
878 else:
878 else:
879 self.ui.warn(_("warning: can't find ancestor for '%s' "
879 self.ui.warn(_("warning: can't find ancestor for '%s' "
880 "copied from '%s'!\n") % (fname, cfname))
880 "copied from '%s'!\n") % (fname, cfname))
881
881
882 elif fparent2 != nullid:
882 elif fparent2 != nullid:
883 # is one parent an ancestor of the other?
883 # is one parent an ancestor of the other?
884 fparentancestor = flog.ancestor(fparent1, fparent2)
884 fparentancestor = flog.ancestor(fparent1, fparent2)
885 if fparentancestor == fparent1:
885 if fparentancestor == fparent1:
886 fparent1, fparent2 = fparent2, nullid
886 fparent1, fparent2 = fparent2, nullid
887 elif fparentancestor == fparent2:
887 elif fparentancestor == fparent2:
888 fparent2 = nullid
888 fparent2 = nullid
889
889
890 # is the file changed?
890 # is the file changed?
891 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
891 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
892 changelist.append(fname)
892 changelist.append(fname)
893 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
893 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
894
894
895 # are just the flags changed during merge?
895 # are just the flags changed during merge?
896 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
896 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
897 changelist.append(fname)
897 changelist.append(fname)
898
898
899 return fparent1
899 return fparent1
900
900
901 def commit(self, text="", user=None, date=None, match=None, force=False,
901 def commit(self, text="", user=None, date=None, match=None, force=False,
902 editor=False, extra={}):
902 editor=False, extra={}):
903 """Add a new revision to current repository.
903 """Add a new revision to current repository.
904
904
905 Revision information is gathered from the working directory,
905 Revision information is gathered from the working directory,
906 match can be used to filter the committed files. If editor is
906 match can be used to filter the committed files. If editor is
907 supplied, it is called to get a commit message.
907 supplied, it is called to get a commit message.
908 """
908 """
909
909
910 def fail(f, msg):
910 def fail(f, msg):
911 raise util.Abort('%s: %s' % (f, msg))
911 raise util.Abort('%s: %s' % (f, msg))
912
912
913 if not match:
913 if not match:
914 match = matchmod.always(self.root, '')
914 match = matchmod.always(self.root, '')
915
915
916 if not force:
916 if not force:
917 vdirs = []
917 vdirs = []
918 match.dir = vdirs.append
918 match.dir = vdirs.append
919 match.bad = fail
919 match.bad = fail
920
920
921 wlock = self.wlock()
921 wlock = self.wlock()
922 try:
922 try:
923 wctx = self[None]
923 wctx = self[None]
924 merge = len(wctx.parents()) > 1
924 merge = len(wctx.parents()) > 1
925
925
926 if (not force and merge and match and
926 if (not force and merge and match and
927 (match.files() or match.anypats())):
927 (match.files() or match.anypats())):
928 raise util.Abort(_('cannot partially commit a merge '
928 raise util.Abort(_('cannot partially commit a merge '
929 '(do not specify files or patterns)'))
929 '(do not specify files or patterns)'))
930
930
931 changes = self.status(match=match, clean=force)
931 changes = self.status(match=match, clean=force)
932 if force:
932 if force:
933 changes[0].extend(changes[6]) # mq may commit unchanged files
933 changes[0].extend(changes[6]) # mq may commit unchanged files
934
934
935 # check subrepos
935 # check subrepos
936 subs = []
936 subs = []
937 removedsubs = set()
937 removedsubs = set()
938 for p in wctx.parents():
938 for p in wctx.parents():
939 removedsubs.update(s for s in p.substate if match(s))
939 removedsubs.update(s for s in p.substate if match(s))
940 for s in wctx.substate:
940 for s in wctx.substate:
941 removedsubs.discard(s)
941 removedsubs.discard(s)
942 if match(s) and wctx.sub(s).dirty():
942 if match(s) and wctx.sub(s).dirty():
943 subs.append(s)
943 subs.append(s)
944 if (subs or removedsubs):
944 if (subs or removedsubs):
945 if (not match('.hgsub') and
945 if (not match('.hgsub') and
946 '.hgsub' in (wctx.modified() + wctx.added())):
946 '.hgsub' in (wctx.modified() + wctx.added())):
947 raise util.Abort(_("can't commit subrepos without .hgsub"))
947 raise util.Abort(_("can't commit subrepos without .hgsub"))
948 if '.hgsubstate' not in changes[0]:
948 if '.hgsubstate' not in changes[0]:
949 changes[0].insert(0, '.hgsubstate')
949 changes[0].insert(0, '.hgsubstate')
950
950
951 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
951 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
952 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
952 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
953 if changedsubs:
953 if changedsubs:
954 raise util.Abort(_("uncommitted changes in subrepo %s")
954 raise util.Abort(_("uncommitted changes in subrepo %s")
955 % changedsubs[0])
955 % changedsubs[0])
956
956
957 # make sure all explicit patterns are matched
957 # make sure all explicit patterns are matched
958 if not force and match.files():
958 if not force and match.files():
959 matched = set(changes[0] + changes[1] + changes[2])
959 matched = set(changes[0] + changes[1] + changes[2])
960
960
961 for f in match.files():
961 for f in match.files():
962 if f == '.' or f in matched or f in wctx.substate:
962 if f == '.' or f in matched or f in wctx.substate:
963 continue
963 continue
964 if f in changes[3]: # missing
964 if f in changes[3]: # missing
965 fail(f, _('file not found!'))
965 fail(f, _('file not found!'))
966 if f in vdirs: # visited directory
966 if f in vdirs: # visited directory
967 d = f + '/'
967 d = f + '/'
968 for mf in matched:
968 for mf in matched:
969 if mf.startswith(d):
969 if mf.startswith(d):
970 break
970 break
971 else:
971 else:
972 fail(f, _("no match under directory!"))
972 fail(f, _("no match under directory!"))
973 elif f not in self.dirstate:
973 elif f not in self.dirstate:
974 fail(f, _("file not tracked!"))
974 fail(f, _("file not tracked!"))
975
975
976 if (not force and not extra.get("close") and not merge
976 if (not force and not extra.get("close") and not merge
977 and not (changes[0] or changes[1] or changes[2])
977 and not (changes[0] or changes[1] or changes[2])
978 and wctx.branch() == wctx.p1().branch()):
978 and wctx.branch() == wctx.p1().branch()):
979 return None
979 return None
980
980
981 ms = mergemod.mergestate(self)
981 ms = mergemod.mergestate(self)
982 for f in changes[0]:
982 for f in changes[0]:
983 if f in ms and ms[f] == 'u':
983 if f in ms and ms[f] == 'u':
984 raise util.Abort(_("unresolved merge conflicts "
984 raise util.Abort(_("unresolved merge conflicts "
985 "(see hg help resolve)"))
985 "(see hg help resolve)"))
986
986
987 cctx = context.workingctx(self, text, user, date, extra, changes)
987 cctx = context.workingctx(self, text, user, date, extra, changes)
988 if editor:
988 if editor:
989 cctx._text = editor(self, cctx, subs)
989 cctx._text = editor(self, cctx, subs)
990 edited = (text != cctx._text)
990 edited = (text != cctx._text)
991
991
992 # commit subs
992 # commit subs
993 if subs or removedsubs:
993 if subs or removedsubs:
994 state = wctx.substate.copy()
994 state = wctx.substate.copy()
995 for s in sorted(subs):
995 for s in sorted(subs):
996 sub = wctx.sub(s)
996 sub = wctx.sub(s)
997 self.ui.status(_('committing subrepository %s\n') %
997 self.ui.status(_('committing subrepository %s\n') %
998 subrepo.subrelpath(sub))
998 subrepo.subrelpath(sub))
999 sr = sub.commit(cctx._text, user, date)
999 sr = sub.commit(cctx._text, user, date)
1000 state[s] = (state[s][0], sr)
1000 state[s] = (state[s][0], sr)
1001 subrepo.writestate(self, state)
1001 subrepo.writestate(self, state)
1002
1002
1003 # Save commit message in case this transaction gets rolled back
1003 # Save commit message in case this transaction gets rolled back
1004 # (e.g. by a pretxncommit hook). Leave the content alone on
1004 # (e.g. by a pretxncommit hook). Leave the content alone on
1005 # the assumption that the user will use the same editor again.
1005 # the assumption that the user will use the same editor again.
1006 msgfile = self.opener('last-message.txt', 'wb')
1006 msgfile = self.opener('last-message.txt', 'wb')
1007 msgfile.write(cctx._text)
1007 msgfile.write(cctx._text)
1008 msgfile.close()
1008 msgfile.close()
1009
1009
1010 p1, p2 = self.dirstate.parents()
1010 p1, p2 = self.dirstate.parents()
1011 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1011 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1012 try:
1012 try:
1013 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1013 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1014 ret = self.commitctx(cctx, True)
1014 ret = self.commitctx(cctx, True)
1015 except:
1015 except:
1016 if edited:
1016 if edited:
1017 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1017 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1018 self.ui.write(
1018 self.ui.write(
1019 _('note: commit message saved in %s\n') % msgfn)
1019 _('note: commit message saved in %s\n') % msgfn)
1020 raise
1020 raise
1021
1021
1022 # update bookmarks, dirstate and mergestate
1022 # update bookmarks, dirstate and mergestate
1023 bookmarks.update(self, p1, ret)
1023 bookmarks.update(self, p1, ret)
1024 for f in changes[0] + changes[1]:
1024 for f in changes[0] + changes[1]:
1025 self.dirstate.normal(f)
1025 self.dirstate.normal(f)
1026 for f in changes[2]:
1026 for f in changes[2]:
1027 self.dirstate.forget(f)
1027 self.dirstate.forget(f)
1028 self.dirstate.setparents(ret)
1028 self.dirstate.setparents(ret)
1029 ms.reset()
1029 ms.reset()
1030 finally:
1030 finally:
1031 wlock.release()
1031 wlock.release()
1032
1032
1033 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1033 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1034 return ret
1034 return ret
1035
1035
1036 def commitctx(self, ctx, error=False):
1036 def commitctx(self, ctx, error=False):
1037 """Add a new revision to current repository.
1037 """Add a new revision to current repository.
1038 Revision information is passed via the context argument.
1038 Revision information is passed via the context argument.
1039 """
1039 """
1040
1040
1041 tr = lock = None
1041 tr = lock = None
1042 removed = list(ctx.removed())
1042 removed = list(ctx.removed())
1043 p1, p2 = ctx.p1(), ctx.p2()
1043 p1, p2 = ctx.p1(), ctx.p2()
1044 user = ctx.user()
1044 user = ctx.user()
1045
1045
1046 lock = self.lock()
1046 lock = self.lock()
1047 try:
1047 try:
1048 tr = self.transaction("commit")
1048 tr = self.transaction("commit")
1049 trp = weakref.proxy(tr)
1049 trp = weakref.proxy(tr)
1050
1050
1051 if ctx.files():
1051 if ctx.files():
1052 m1 = p1.manifest().copy()
1052 m1 = p1.manifest().copy()
1053 m2 = p2.manifest()
1053 m2 = p2.manifest()
1054
1054
1055 # check in files
1055 # check in files
1056 new = {}
1056 new = {}
1057 changed = []
1057 changed = []
1058 linkrev = len(self)
1058 linkrev = len(self)
1059 for f in sorted(ctx.modified() + ctx.added()):
1059 for f in sorted(ctx.modified() + ctx.added()):
1060 self.ui.note(f + "\n")
1060 self.ui.note(f + "\n")
1061 try:
1061 try:
1062 fctx = ctx[f]
1062 fctx = ctx[f]
1063 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1063 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1064 changed)
1064 changed)
1065 m1.set(f, fctx.flags())
1065 m1.set(f, fctx.flags())
1066 except OSError, inst:
1066 except OSError, inst:
1067 self.ui.warn(_("trouble committing %s!\n") % f)
1067 self.ui.warn(_("trouble committing %s!\n") % f)
1068 raise
1068 raise
1069 except IOError, inst:
1069 except IOError, inst:
1070 errcode = getattr(inst, 'errno', errno.ENOENT)
1070 errcode = getattr(inst, 'errno', errno.ENOENT)
1071 if error or errcode and errcode != errno.ENOENT:
1071 if error or errcode and errcode != errno.ENOENT:
1072 self.ui.warn(_("trouble committing %s!\n") % f)
1072 self.ui.warn(_("trouble committing %s!\n") % f)
1073 raise
1073 raise
1074 else:
1074 else:
1075 removed.append(f)
1075 removed.append(f)
1076
1076
1077 # update manifest
1077 # update manifest
1078 m1.update(new)
1078 m1.update(new)
1079 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1079 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1080 drop = [f for f in removed if f in m1]
1080 drop = [f for f in removed if f in m1]
1081 for f in drop:
1081 for f in drop:
1082 del m1[f]
1082 del m1[f]
1083 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1083 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1084 p2.manifestnode(), (new, drop))
1084 p2.manifestnode(), (new, drop))
1085 files = changed + removed
1085 files = changed + removed
1086 else:
1086 else:
1087 mn = p1.manifestnode()
1087 mn = p1.manifestnode()
1088 files = []
1088 files = []
1089
1089
1090 # update changelog
1090 # update changelog
1091 self.changelog.delayupdate()
1091 self.changelog.delayupdate()
1092 n = self.changelog.add(mn, files, ctx.description(),
1092 n = self.changelog.add(mn, files, ctx.description(),
1093 trp, p1.node(), p2.node(),
1093 trp, p1.node(), p2.node(),
1094 user, ctx.date(), ctx.extra().copy())
1094 user, ctx.date(), ctx.extra().copy())
1095 p = lambda: self.changelog.writepending() and self.root or ""
1095 p = lambda: self.changelog.writepending() and self.root or ""
1096 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1096 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1097 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1097 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1098 parent2=xp2, pending=p)
1098 parent2=xp2, pending=p)
1099 self.changelog.finalize(trp)
1099 self.changelog.finalize(trp)
1100 tr.close()
1100 tr.close()
1101
1101
1102 if self._branchcache:
1102 if self._branchcache:
1103 self.updatebranchcache()
1103 self.updatebranchcache()
1104 return n
1104 return n
1105 finally:
1105 finally:
1106 if tr:
1106 if tr:
1107 tr.release()
1107 tr.release()
1108 lock.release()
1108 lock.release()
1109
1109
1110 def destroyed(self):
1110 def destroyed(self):
1111 '''Inform the repository that nodes have been destroyed.
1111 '''Inform the repository that nodes have been destroyed.
1112 Intended for use by strip and rollback, so there's a common
1112 Intended for use by strip and rollback, so there's a common
1113 place for anything that has to be done after destroying history.'''
1113 place for anything that has to be done after destroying history.'''
1114 # XXX it might be nice if we could take the list of destroyed
1114 # XXX it might be nice if we could take the list of destroyed
1115 # nodes, but I don't see an easy way for rollback() to do that
1115 # nodes, but I don't see an easy way for rollback() to do that
1116
1116
1117 # Ensure the persistent tag cache is updated. Doing it now
1117 # Ensure the persistent tag cache is updated. Doing it now
1118 # means that the tag cache only has to worry about destroyed
1118 # means that the tag cache only has to worry about destroyed
1119 # heads immediately after a strip/rollback. That in turn
1119 # heads immediately after a strip/rollback. That in turn
1120 # guarantees that "cachetip == currenttip" (comparing both rev
1120 # guarantees that "cachetip == currenttip" (comparing both rev
1121 # and node) always means no nodes have been added or destroyed.
1121 # and node) always means no nodes have been added or destroyed.
1122
1122
1123 # XXX this is suboptimal when qrefresh'ing: we strip the current
1123 # XXX this is suboptimal when qrefresh'ing: we strip the current
1124 # head, refresh the tag cache, then immediately add a new head.
1124 # head, refresh the tag cache, then immediately add a new head.
1125 # But I think doing it this way is necessary for the "instant
1125 # But I think doing it this way is necessary for the "instant
1126 # tag cache retrieval" case to work.
1126 # tag cache retrieval" case to work.
1127 self.invalidatecaches()
1127 self.invalidatecaches()
1128
1128
1129 def walk(self, match, node=None):
1129 def walk(self, match, node=None):
1130 '''
1130 '''
1131 walk recursively through the directory tree or a given
1131 walk recursively through the directory tree or a given
1132 changeset, finding all files matched by the match
1132 changeset, finding all files matched by the match
1133 function
1133 function
1134 '''
1134 '''
1135 return self[node].walk(match)
1135 return self[node].walk(match)
1136
1136
1137 def status(self, node1='.', node2=None, match=None,
1137 def status(self, node1='.', node2=None, match=None,
1138 ignored=False, clean=False, unknown=False,
1138 ignored=False, clean=False, unknown=False,
1139 listsubrepos=False):
1139 listsubrepos=False):
1140 """return status of files between two nodes or node and working directory
1140 """return status of files between two nodes or node and working directory
1141
1141
1142 If node1 is None, use the first dirstate parent instead.
1142 If node1 is None, use the first dirstate parent instead.
1143 If node2 is None, compare node1 with working directory.
1143 If node2 is None, compare node1 with working directory.
1144 """
1144 """
1145
1145
1146 def mfmatches(ctx):
1146 def mfmatches(ctx):
1147 mf = ctx.manifest().copy()
1147 mf = ctx.manifest().copy()
1148 for fn in mf.keys():
1148 for fn in mf.keys():
1149 if not match(fn):
1149 if not match(fn):
1150 del mf[fn]
1150 del mf[fn]
1151 return mf
1151 return mf
1152
1152
1153 if isinstance(node1, context.changectx):
1153 if isinstance(node1, context.changectx):
1154 ctx1 = node1
1154 ctx1 = node1
1155 else:
1155 else:
1156 ctx1 = self[node1]
1156 ctx1 = self[node1]
1157 if isinstance(node2, context.changectx):
1157 if isinstance(node2, context.changectx):
1158 ctx2 = node2
1158 ctx2 = node2
1159 else:
1159 else:
1160 ctx2 = self[node2]
1160 ctx2 = self[node2]
1161
1161
1162 working = ctx2.rev() is None
1162 working = ctx2.rev() is None
1163 parentworking = working and ctx1 == self['.']
1163 parentworking = working and ctx1 == self['.']
1164 match = match or matchmod.always(self.root, self.getcwd())
1164 match = match or matchmod.always(self.root, self.getcwd())
1165 listignored, listclean, listunknown = ignored, clean, unknown
1165 listignored, listclean, listunknown = ignored, clean, unknown
1166
1166
1167 # load earliest manifest first for caching reasons
1167 # load earliest manifest first for caching reasons
1168 if not working and ctx2.rev() < ctx1.rev():
1168 if not working and ctx2.rev() < ctx1.rev():
1169 ctx2.manifest()
1169 ctx2.manifest()
1170
1170
1171 if not parentworking:
1171 if not parentworking:
1172 def bad(f, msg):
1172 def bad(f, msg):
1173 if f not in ctx1:
1173 if f not in ctx1:
1174 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1174 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1175 match.bad = bad
1175 match.bad = bad
1176
1176
1177 if working: # we need to scan the working dir
1177 if working: # we need to scan the working dir
1178 subrepos = []
1178 subrepos = []
1179 if '.hgsub' in self.dirstate:
1179 if '.hgsub' in self.dirstate:
1180 subrepos = ctx1.substate.keys()
1180 subrepos = ctx1.substate.keys()
1181 s = self.dirstate.status(match, subrepos, listignored,
1181 s = self.dirstate.status(match, subrepos, listignored,
1182 listclean, listunknown)
1182 listclean, listunknown)
1183 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1183 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1184
1184
1185 # check for any possibly clean files
1185 # check for any possibly clean files
1186 if parentworking and cmp:
1186 if parentworking and cmp:
1187 fixup = []
1187 fixup = []
1188 # do a full compare of any files that might have changed
1188 # do a full compare of any files that might have changed
1189 for f in sorted(cmp):
1189 for f in sorted(cmp):
1190 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1190 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1191 or ctx1[f].cmp(ctx2[f])):
1191 or ctx1[f].cmp(ctx2[f])):
1192 modified.append(f)
1192 modified.append(f)
1193 else:
1193 else:
1194 fixup.append(f)
1194 fixup.append(f)
1195
1195
1196 # update dirstate for files that are actually clean
1196 # update dirstate for files that are actually clean
1197 if fixup:
1197 if fixup:
1198 if listclean:
1198 if listclean:
1199 clean += fixup
1199 clean += fixup
1200
1200
1201 try:
1201 try:
1202 # updating the dirstate is optional
1202 # updating the dirstate is optional
1203 # so we don't wait on the lock
1203 # so we don't wait on the lock
1204 wlock = self.wlock(False)
1204 wlock = self.wlock(False)
1205 try:
1205 try:
1206 for f in fixup:
1206 for f in fixup:
1207 self.dirstate.normal(f)
1207 self.dirstate.normal(f)
1208 finally:
1208 finally:
1209 wlock.release()
1209 wlock.release()
1210 except error.LockError:
1210 except error.LockError:
1211 pass
1211 pass
1212
1212
1213 if not parentworking:
1213 if not parentworking:
1214 mf1 = mfmatches(ctx1)
1214 mf1 = mfmatches(ctx1)
1215 if working:
1215 if working:
1216 # we are comparing working dir against non-parent
1216 # we are comparing working dir against non-parent
1217 # generate a pseudo-manifest for the working dir
1217 # generate a pseudo-manifest for the working dir
1218 mf2 = mfmatches(self['.'])
1218 mf2 = mfmatches(self['.'])
1219 for f in cmp + modified + added:
1219 for f in cmp + modified + added:
1220 mf2[f] = None
1220 mf2[f] = None
1221 mf2.set(f, ctx2.flags(f))
1221 mf2.set(f, ctx2.flags(f))
1222 for f in removed:
1222 for f in removed:
1223 if f in mf2:
1223 if f in mf2:
1224 del mf2[f]
1224 del mf2[f]
1225 else:
1225 else:
1226 # we are comparing two revisions
1226 # we are comparing two revisions
1227 deleted, unknown, ignored = [], [], []
1227 deleted, unknown, ignored = [], [], []
1228 mf2 = mfmatches(ctx2)
1228 mf2 = mfmatches(ctx2)
1229
1229
1230 modified, added, clean = [], [], []
1230 modified, added, clean = [], [], []
1231 for fn in mf2:
1231 for fn in mf2:
1232 if fn in mf1:
1232 if fn in mf1:
1233 if (fn not in deleted and
1233 if (fn not in deleted and
1234 (mf1.flags(fn) != mf2.flags(fn) or
1234 (mf1.flags(fn) != mf2.flags(fn) or
1235 (mf1[fn] != mf2[fn] and
1235 (mf1[fn] != mf2[fn] and
1236 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1236 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1237 modified.append(fn)
1237 modified.append(fn)
1238 elif listclean:
1238 elif listclean:
1239 clean.append(fn)
1239 clean.append(fn)
1240 del mf1[fn]
1240 del mf1[fn]
1241 elif fn not in deleted:
1241 elif fn not in deleted:
1242 added.append(fn)
1242 added.append(fn)
1243 removed = mf1.keys()
1243 removed = mf1.keys()
1244
1244
1245 r = modified, added, removed, deleted, unknown, ignored, clean
1245 r = modified, added, removed, deleted, unknown, ignored, clean
1246
1246
1247 if listsubrepos:
1247 if listsubrepos:
1248 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1248 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1249 if working:
1249 if working:
1250 rev2 = None
1250 rev2 = None
1251 else:
1251 else:
1252 rev2 = ctx2.substate[subpath][1]
1252 rev2 = ctx2.substate[subpath][1]
1253 try:
1253 try:
1254 submatch = matchmod.narrowmatcher(subpath, match)
1254 submatch = matchmod.narrowmatcher(subpath, match)
1255 s = sub.status(rev2, match=submatch, ignored=listignored,
1255 s = sub.status(rev2, match=submatch, ignored=listignored,
1256 clean=listclean, unknown=listunknown,
1256 clean=listclean, unknown=listunknown,
1257 listsubrepos=True)
1257 listsubrepos=True)
1258 for rfiles, sfiles in zip(r, s):
1258 for rfiles, sfiles in zip(r, s):
1259 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1259 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1260 except error.LookupError:
1260 except error.LookupError:
1261 self.ui.status(_("skipping missing subrepository: %s\n")
1261 self.ui.status(_("skipping missing subrepository: %s\n")
1262 % subpath)
1262 % subpath)
1263
1263
1264 for l in r:
1264 for l in r:
1265 l.sort()
1265 l.sort()
1266 return r
1266 return r
1267
1267
1268 def heads(self, start=None):
1268 def heads(self, start=None):
1269 heads = self.changelog.heads(start)
1269 heads = self.changelog.heads(start)
1270 # sort the output in rev descending order
1270 # sort the output in rev descending order
1271 return sorted(heads, key=self.changelog.rev, reverse=True)
1271 return sorted(heads, key=self.changelog.rev, reverse=True)
1272
1272
1273 def branchheads(self, branch=None, start=None, closed=False):
1273 def branchheads(self, branch=None, start=None, closed=False):
1274 '''return a (possibly filtered) list of heads for the given branch
1274 '''return a (possibly filtered) list of heads for the given branch
1275
1275
1276 Heads are returned in topological order, from newest to oldest.
1276 Heads are returned in topological order, from newest to oldest.
1277 If branch is None, use the dirstate branch.
1277 If branch is None, use the dirstate branch.
1278 If start is not None, return only heads reachable from start.
1278 If start is not None, return only heads reachable from start.
1279 If closed is True, return heads that are marked as closed as well.
1279 If closed is True, return heads that are marked as closed as well.
1280 '''
1280 '''
1281 if branch is None:
1281 if branch is None:
1282 branch = self[None].branch()
1282 branch = self[None].branch()
1283 branches = self.branchmap()
1283 branches = self.branchmap()
1284 if branch not in branches:
1284 if branch not in branches:
1285 return []
1285 return []
1286 # the cache returns heads ordered lowest to highest
1286 # the cache returns heads ordered lowest to highest
1287 bheads = list(reversed(branches[branch]))
1287 bheads = list(reversed(branches[branch]))
1288 if start is not None:
1288 if start is not None:
1289 # filter out the heads that cannot be reached from startrev
1289 # filter out the heads that cannot be reached from startrev
1290 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1290 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1291 bheads = [h for h in bheads if h in fbheads]
1291 bheads = [h for h in bheads if h in fbheads]
1292 if not closed:
1292 if not closed:
1293 bheads = [h for h in bheads if
1293 bheads = [h for h in bheads if
1294 ('close' not in self.changelog.read(h)[5])]
1294 ('close' not in self.changelog.read(h)[5])]
1295 return bheads
1295 return bheads
1296
1296
1297 def branches(self, nodes):
1297 def branches(self, nodes):
1298 if not nodes:
1298 if not nodes:
1299 nodes = [self.changelog.tip()]
1299 nodes = [self.changelog.tip()]
1300 b = []
1300 b = []
1301 for n in nodes:
1301 for n in nodes:
1302 t = n
1302 t = n
1303 while 1:
1303 while 1:
1304 p = self.changelog.parents(n)
1304 p = self.changelog.parents(n)
1305 if p[1] != nullid or p[0] == nullid:
1305 if p[1] != nullid or p[0] == nullid:
1306 b.append((t, n, p[0], p[1]))
1306 b.append((t, n, p[0], p[1]))
1307 break
1307 break
1308 n = p[0]
1308 n = p[0]
1309 return b
1309 return b
1310
1310
1311 def between(self, pairs):
1311 def between(self, pairs):
1312 r = []
1312 r = []
1313
1313
1314 for top, bottom in pairs:
1314 for top, bottom in pairs:
1315 n, l, i = top, [], 0
1315 n, l, i = top, [], 0
1316 f = 1
1316 f = 1
1317
1317
1318 while n != bottom and n != nullid:
1318 while n != bottom and n != nullid:
1319 p = self.changelog.parents(n)[0]
1319 p = self.changelog.parents(n)[0]
1320 if i == f:
1320 if i == f:
1321 l.append(n)
1321 l.append(n)
1322 f = f * 2
1322 f = f * 2
1323 n = p
1323 n = p
1324 i += 1
1324 i += 1
1325
1325
1326 r.append(l)
1326 r.append(l)
1327
1327
1328 return r
1328 return r
1329
1329
1330 def pull(self, remote, heads=None, force=False):
1330 def pull(self, remote, heads=None, force=False):
1331 lock = self.lock()
1331 lock = self.lock()
1332 try:
1332 try:
1333 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1333 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1334 force=force)
1334 force=force)
1335 common, fetch, rheads = tmp
1335 common, fetch, rheads = tmp
1336 if not fetch:
1336 if not fetch:
1337 self.ui.status(_("no changes found\n"))
1337 self.ui.status(_("no changes found\n"))
1338 result = 0
1338 result = 0
1339 else:
1339 else:
1340 if heads is None and list(common) == [nullid]:
1340 if heads is None and list(common) == [nullid]:
1341 self.ui.status(_("requesting all changes\n"))
1341 self.ui.status(_("requesting all changes\n"))
1342 elif heads is None and remote.capable('changegroupsubset'):
1342 elif heads is None and remote.capable('changegroupsubset'):
1343 # issue1320, avoid a race if remote changed after discovery
1343 # issue1320, avoid a race if remote changed after discovery
1344 heads = rheads
1344 heads = rheads
1345
1345
1346 if remote.capable('getbundle'):
1346 if remote.capable('getbundle'):
1347 cg = remote.getbundle('pull', common=common,
1347 cg = remote.getbundle('pull', common=common,
1348 heads=heads or rheads)
1348 heads=heads or rheads)
1349 elif heads is None:
1349 elif heads is None:
1350 cg = remote.changegroup(fetch, 'pull')
1350 cg = remote.changegroup(fetch, 'pull')
1351 elif not remote.capable('changegroupsubset'):
1351 elif not remote.capable('changegroupsubset'):
1352 raise util.Abort(_("partial pull cannot be done because "
1352 raise util.Abort(_("partial pull cannot be done because "
1353 "other repository doesn't support "
1353 "other repository doesn't support "
1354 "changegroupsubset."))
1354 "changegroupsubset."))
1355 else:
1355 else:
1356 cg = remote.changegroupsubset(fetch, heads, 'pull')
1356 cg = remote.changegroupsubset(fetch, heads, 'pull')
1357 result = self.addchangegroup(cg, 'pull', remote.url(),
1357 result = self.addchangegroup(cg, 'pull', remote.url(),
1358 lock=lock)
1358 lock=lock)
1359 finally:
1359 finally:
1360 lock.release()
1360 lock.release()
1361
1361
1362 return result
1362 return result
1363
1363
1364 def checkpush(self, force, revs):
1364 def checkpush(self, force, revs):
1365 """Extensions can override this function if additional checks have
1365 """Extensions can override this function if additional checks have
1366 to be performed before pushing, or call it if they override push
1366 to be performed before pushing, or call it if they override push
1367 command.
1367 command.
1368 """
1368 """
1369 pass
1369 pass
1370
1370
1371 def push(self, remote, force=False, revs=None, newbranch=False):
1371 def push(self, remote, force=False, revs=None, newbranch=False):
1372 '''Push outgoing changesets (limited by revs) from the current
1372 '''Push outgoing changesets (limited by revs) from the current
1373 repository to remote. Return an integer:
1373 repository to remote. Return an integer:
1374 - 0 means HTTP error *or* nothing to push
1374 - 0 means HTTP error *or* nothing to push
1375 - 1 means we pushed and remote head count is unchanged *or*
1375 - 1 means we pushed and remote head count is unchanged *or*
1376 we have outgoing changesets but refused to push
1376 we have outgoing changesets but refused to push
1377 - other values as described by addchangegroup()
1377 - other values as described by addchangegroup()
1378 '''
1378 '''
1379 # there are two ways to push to remote repo:
1379 # there are two ways to push to remote repo:
1380 #
1380 #
1381 # addchangegroup assumes local user can lock remote
1381 # addchangegroup assumes local user can lock remote
1382 # repo (local filesystem, old ssh servers).
1382 # repo (local filesystem, old ssh servers).
1383 #
1383 #
1384 # unbundle assumes local user cannot lock remote repo (new ssh
1384 # unbundle assumes local user cannot lock remote repo (new ssh
1385 # servers, http servers).
1385 # servers, http servers).
1386
1386
1387 self.checkpush(force, revs)
1387 self.checkpush(force, revs)
1388 lock = None
1388 lock = None
1389 unbundle = remote.capable('unbundle')
1389 unbundle = remote.capable('unbundle')
1390 if not unbundle:
1390 if not unbundle:
1391 lock = remote.lock()
1391 lock = remote.lock()
1392 try:
1392 try:
1393 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1393 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1394 newbranch)
1394 newbranch)
1395 ret = remote_heads
1395 ret = remote_heads
1396 if cg is not None:
1396 if cg is not None:
1397 if unbundle:
1397 if unbundle:
1398 # local repo finds heads on server, finds out what
1398 # local repo finds heads on server, finds out what
1399 # revs it must push. once revs transferred, if server
1399 # revs it must push. once revs transferred, if server
1400 # finds it has different heads (someone else won
1400 # finds it has different heads (someone else won
1401 # commit/push race), server aborts.
1401 # commit/push race), server aborts.
1402 if force:
1402 if force:
1403 remote_heads = ['force']
1403 remote_heads = ['force']
1404 # ssh: return remote's addchangegroup()
1404 # ssh: return remote's addchangegroup()
1405 # http: return remote's addchangegroup() or 0 for error
1405 # http: return remote's addchangegroup() or 0 for error
1406 ret = remote.unbundle(cg, remote_heads, 'push')
1406 ret = remote.unbundle(cg, remote_heads, 'push')
1407 else:
1407 else:
1408 # we return an integer indicating remote head count change
1408 # we return an integer indicating remote head count change
1409 ret = remote.addchangegroup(cg, 'push', self.url(),
1409 ret = remote.addchangegroup(cg, 'push', self.url(),
1410 lock=lock)
1410 lock=lock)
1411 finally:
1411 finally:
1412 if lock is not None:
1412 if lock is not None:
1413 lock.release()
1413 lock.release()
1414
1414
1415 self.ui.debug("checking for updated bookmarks\n")
1415 self.ui.debug("checking for updated bookmarks\n")
1416 rb = remote.listkeys('bookmarks')
1416 rb = remote.listkeys('bookmarks')
1417 for k in rb.keys():
1417 for k in rb.keys():
1418 if k in self._bookmarks:
1418 if k in self._bookmarks:
1419 nr, nl = rb[k], hex(self._bookmarks[k])
1419 nr, nl = rb[k], hex(self._bookmarks[k])
1420 if nr in self:
1420 if nr in self:
1421 cr = self[nr]
1421 cr = self[nr]
1422 cl = self[nl]
1422 cl = self[nl]
1423 if cl in cr.descendants():
1423 if cl in cr.descendants():
1424 r = remote.pushkey('bookmarks', k, nr, nl)
1424 r = remote.pushkey('bookmarks', k, nr, nl)
1425 if r:
1425 if r:
1426 self.ui.status(_("updating bookmark %s\n") % k)
1426 self.ui.status(_("updating bookmark %s\n") % k)
1427 else:
1427 else:
1428 self.ui.warn(_('updating bookmark %s'
1428 self.ui.warn(_('updating bookmark %s'
1429 ' failed!\n') % k)
1429 ' failed!\n') % k)
1430
1430
1431 return ret
1431 return ret
1432
1432
1433 def changegroupinfo(self, nodes, source):
1433 def changegroupinfo(self, nodes, source):
1434 if self.ui.verbose or source == 'bundle':
1434 if self.ui.verbose or source == 'bundle':
1435 self.ui.status(_("%d changesets found\n") % len(nodes))
1435 self.ui.status(_("%d changesets found\n") % len(nodes))
1436 if self.ui.debugflag:
1436 if self.ui.debugflag:
1437 self.ui.debug("list of changesets:\n")
1437 self.ui.debug("list of changesets:\n")
1438 for node in nodes:
1438 for node in nodes:
1439 self.ui.debug("%s\n" % hex(node))
1439 self.ui.debug("%s\n" % hex(node))
1440
1440
1441 def changegroupsubset(self, bases, heads, source):
1441 def changegroupsubset(self, bases, heads, source):
1442 """Compute a changegroup consisting of all the nodes that are
1442 """Compute a changegroup consisting of all the nodes that are
1443 descendents of any of the bases and ancestors of any of the heads.
1443 descendents of any of the bases and ancestors of any of the heads.
1444 Return a chunkbuffer object whose read() method will return
1444 Return a chunkbuffer object whose read() method will return
1445 successive changegroup chunks.
1445 successive changegroup chunks.
1446
1446
1447 It is fairly complex as determining which filenodes and which
1447 It is fairly complex as determining which filenodes and which
1448 manifest nodes need to be included for the changeset to be complete
1448 manifest nodes need to be included for the changeset to be complete
1449 is non-trivial.
1449 is non-trivial.
1450
1450
1451 Another wrinkle is doing the reverse, figuring out which changeset in
1451 Another wrinkle is doing the reverse, figuring out which changeset in
1452 the changegroup a particular filenode or manifestnode belongs to.
1452 the changegroup a particular filenode or manifestnode belongs to.
1453 """
1453 """
1454 cl = self.changelog
1454 cl = self.changelog
1455 if not bases:
1455 if not bases:
1456 bases = [nullid]
1456 bases = [nullid]
1457 csets, bases, heads = cl.nodesbetween(bases, heads)
1457 csets, bases, heads = cl.nodesbetween(bases, heads)
1458 # We assume that all ancestors of bases are known
1458 # We assume that all ancestors of bases are known
1459 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1459 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1460 return self._changegroupsubset(common, csets, heads, source)
1460 return self._changegroupsubset(common, csets, heads, source)
1461
1461
1462 def getbundle(self, source, heads=None, common=None):
1462 def getbundle(self, source, heads=None, common=None):
1463 """Like changegroupsubset, but returns the set difference between the
1463 """Like changegroupsubset, but returns the set difference between the
1464 ancestors of heads and the ancestors common.
1464 ancestors of heads and the ancestors common.
1465
1465
1466 If heads is None, use the local heads. If common is None, use [nullid].
1466 If heads is None, use the local heads. If common is None, use [nullid].
1467
1467
1468 The nodes in common might not all be known locally due to the way the
1468 The nodes in common might not all be known locally due to the way the
1469 current discovery protocol works.
1469 current discovery protocol works.
1470 """
1470 """
1471 cl = self.changelog
1471 cl = self.changelog
1472 if common:
1472 if common:
1473 nm = cl.nodemap
1473 nm = cl.nodemap
1474 common = [n for n in common if n in nm]
1474 common = [n for n in common if n in nm]
1475 else:
1475 else:
1476 common = [nullid]
1476 common = [nullid]
1477 if not heads:
1477 if not heads:
1478 heads = cl.heads()
1478 heads = cl.heads()
1479 common, missing = cl.findcommonmissing(common, heads)
1479 common, missing = cl.findcommonmissing(common, heads)
1480 if not missing:
1480 if not missing:
1481 return None
1481 return None
1482 return self._changegroupsubset(common, missing, heads, source)
1482 return self._changegroupsubset(common, missing, heads, source)
1483
1483
1484 def _changegroupsubset(self, commonrevs, csets, heads, source):
1484 def _changegroupsubset(self, commonrevs, csets, heads, source):
1485
1485
1486 cl = self.changelog
1486 cl = self.changelog
1487 mf = self.manifest
1487 mf = self.manifest
1488 mfs = {} # needed manifests
1488 mfs = {} # needed manifests
1489 fnodes = {} # needed file nodes
1489 fnodes = {} # needed file nodes
1490 changedfiles = set()
1490 changedfiles = set()
1491 fstate = ['', {}]
1491 fstate = ['', {}]
1492 count = [0]
1492 count = [0]
1493
1493
1494 # can we go through the fast path ?
1494 # can we go through the fast path ?
1495 heads.sort()
1495 heads.sort()
1496 if heads == sorted(self.heads()):
1496 if heads == sorted(self.heads()):
1497 return self._changegroup(csets, source)
1497 return self._changegroup(csets, source)
1498
1498
1499 # slow path
1499 # slow path
1500 self.hook('preoutgoing', throw=True, source=source)
1500 self.hook('preoutgoing', throw=True, source=source)
1501 self.changegroupinfo(csets, source)
1501 self.changegroupinfo(csets, source)
1502
1502
1503 # filter any nodes that claim to be part of the known set
1503 # filter any nodes that claim to be part of the known set
1504 def prune(revlog, missing):
1504 def prune(revlog, missing):
1505 for n in missing:
1505 for n in missing:
1506 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1506 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1507 yield n
1507 yield n
1508
1508
1509 def lookup(revlog, x):
1509 def lookup(revlog, x):
1510 if revlog == cl:
1510 if revlog == cl:
1511 c = cl.read(x)
1511 c = cl.read(x)
1512 changedfiles.update(c[3])
1512 changedfiles.update(c[3])
1513 mfs.setdefault(c[0], x)
1513 mfs.setdefault(c[0], x)
1514 count[0] += 1
1514 count[0] += 1
1515 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1515 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1516 return x
1516 return x
1517 elif revlog == mf:
1517 elif revlog == mf:
1518 clnode = mfs[x]
1518 clnode = mfs[x]
1519 mdata = mf.readfast(x)
1519 mdata = mf.readfast(x)
1520 for f in changedfiles:
1520 for f in changedfiles:
1521 if f in mdata:
1521 if f in mdata:
1522 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1522 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1523 count[0] += 1
1523 count[0] += 1
1524 self.ui.progress(_('bundling'), count[0],
1524 self.ui.progress(_('bundling'), count[0],
1525 unit=_('manifests'), total=len(mfs))
1525 unit=_('manifests'), total=len(mfs))
1526 return mfs[x]
1526 return mfs[x]
1527 else:
1527 else:
1528 self.ui.progress(
1528 self.ui.progress(
1529 _('bundling'), count[0], item=fstate[0],
1529 _('bundling'), count[0], item=fstate[0],
1530 unit=_('files'), total=len(changedfiles))
1530 unit=_('files'), total=len(changedfiles))
1531 return fstate[1][x]
1531 return fstate[1][x]
1532
1532
1533 bundler = changegroup.bundle10(lookup)
1533 bundler = changegroup.bundle10(lookup)
1534
1534
1535 def gengroup():
1535 def gengroup():
1536 # Create a changenode group generator that will call our functions
1536 # Create a changenode group generator that will call our functions
1537 # back to lookup the owning changenode and collect information.
1537 # back to lookup the owning changenode and collect information.
1538 for chunk in cl.group(csets, bundler):
1538 for chunk in cl.group(csets, bundler):
1539 yield chunk
1539 yield chunk
1540 self.ui.progress(_('bundling'), None)
1540 self.ui.progress(_('bundling'), None)
1541
1541
1542 # Create a generator for the manifestnodes that calls our lookup
1542 # Create a generator for the manifestnodes that calls our lookup
1543 # and data collection functions back.
1543 # and data collection functions back.
1544 count[0] = 0
1544 count[0] = 0
1545 for chunk in mf.group(prune(mf, mfs), bundler):
1545 for chunk in mf.group(prune(mf, mfs), bundler):
1546 yield chunk
1546 yield chunk
1547 self.ui.progress(_('bundling'), None)
1547 self.ui.progress(_('bundling'), None)
1548
1548
1549 mfs.clear()
1549 mfs.clear()
1550
1550
1551 # Go through all our files in order sorted by name.
1551 # Go through all our files in order sorted by name.
1552 count[0] = 0
1552 count[0] = 0
1553 for fname in sorted(changedfiles):
1553 for fname in sorted(changedfiles):
1554 filerevlog = self.file(fname)
1554 filerevlog = self.file(fname)
1555 if not len(filerevlog):
1555 if not len(filerevlog):
1556 raise util.Abort(_("empty or missing revlog for %s") % fname)
1556 raise util.Abort(_("empty or missing revlog for %s") % fname)
1557 fstate[0] = fname
1557 fstate[0] = fname
1558 fstate[1] = fnodes.pop(fname, {})
1558 fstate[1] = fnodes.pop(fname, {})
1559 first = True
1559 first = True
1560
1560
1561 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1561 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1562 bundler):
1562 bundler):
1563 if first:
1563 if first:
1564 if chunk == bundler.close():
1564 if chunk == bundler.close():
1565 break
1565 break
1566 count[0] += 1
1566 count[0] += 1
1567 yield bundler.fileheader(fname)
1567 yield bundler.fileheader(fname)
1568 first = False
1568 first = False
1569 yield chunk
1569 yield chunk
1570 # Signal that no more groups are left.
1570 # Signal that no more groups are left.
1571 yield bundler.close()
1571 yield bundler.close()
1572 self.ui.progress(_('bundling'), None)
1572 self.ui.progress(_('bundling'), None)
1573
1573
1574 if csets:
1574 if csets:
1575 self.hook('outgoing', node=hex(csets[0]), source=source)
1575 self.hook('outgoing', node=hex(csets[0]), source=source)
1576
1576
1577 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1577 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1578
1578
1579 def changegroup(self, basenodes, source):
1579 def changegroup(self, basenodes, source):
1580 # to avoid a race we use changegroupsubset() (issue1320)
1580 # to avoid a race we use changegroupsubset() (issue1320)
1581 return self.changegroupsubset(basenodes, self.heads(), source)
1581 return self.changegroupsubset(basenodes, self.heads(), source)
1582
1582
1583 def _changegroup(self, nodes, source):
1583 def _changegroup(self, nodes, source):
1584 """Compute the changegroup of all nodes that we have that a recipient
1584 """Compute the changegroup of all nodes that we have that a recipient
1585 doesn't. Return a chunkbuffer object whose read() method will return
1585 doesn't. Return a chunkbuffer object whose read() method will return
1586 successive changegroup chunks.
1586 successive changegroup chunks.
1587
1587
1588 This is much easier than the previous function as we can assume that
1588 This is much easier than the previous function as we can assume that
1589 the recipient has any changenode we aren't sending them.
1589 the recipient has any changenode we aren't sending them.
1590
1590
1591 nodes is the set of nodes to send"""
1591 nodes is the set of nodes to send"""
1592
1592
1593 cl = self.changelog
1593 cl = self.changelog
1594 mf = self.manifest
1594 mf = self.manifest
1595 mfs = {}
1595 mfs = {}
1596 changedfiles = set()
1596 changedfiles = set()
1597 fstate = ['']
1597 fstate = ['']
1598 count = [0]
1598 count = [0]
1599
1599
1600 self.hook('preoutgoing', throw=True, source=source)
1600 self.hook('preoutgoing', throw=True, source=source)
1601 self.changegroupinfo(nodes, source)
1601 self.changegroupinfo(nodes, source)
1602
1602
1603 revset = set([cl.rev(n) for n in nodes])
1603 revset = set([cl.rev(n) for n in nodes])
1604
1604
1605 def gennodelst(log):
1605 def gennodelst(log):
1606 for r in log:
1606 for r in log:
1607 if log.linkrev(r) in revset:
1607 if log.linkrev(r) in revset:
1608 yield log.node(r)
1608 yield log.node(r)
1609
1609
1610 def lookup(revlog, x):
1610 def lookup(revlog, x):
1611 if revlog == cl:
1611 if revlog == cl:
1612 c = cl.read(x)
1612 c = cl.read(x)
1613 changedfiles.update(c[3])
1613 changedfiles.update(c[3])
1614 mfs.setdefault(c[0], x)
1614 mfs.setdefault(c[0], x)
1615 count[0] += 1
1615 count[0] += 1
1616 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1616 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1617 return x
1617 return x
1618 elif revlog == mf:
1618 elif revlog == mf:
1619 count[0] += 1
1619 count[0] += 1
1620 self.ui.progress(_('bundling'), count[0],
1620 self.ui.progress(_('bundling'), count[0],
1621 unit=_('manifests'), total=len(mfs))
1621 unit=_('manifests'), total=len(mfs))
1622 return cl.node(revlog.linkrev(revlog.rev(x)))
1622 return cl.node(revlog.linkrev(revlog.rev(x)))
1623 else:
1623 else:
1624 self.ui.progress(
1624 self.ui.progress(
1625 _('bundling'), count[0], item=fstate[0],
1625 _('bundling'), count[0], item=fstate[0],
1626 total=len(changedfiles), unit=_('files'))
1626 total=len(changedfiles), unit=_('files'))
1627 return cl.node(revlog.linkrev(revlog.rev(x)))
1627 return cl.node(revlog.linkrev(revlog.rev(x)))
1628
1628
1629 bundler = changegroup.bundle10(lookup)
1629 bundler = changegroup.bundle10(lookup)
1630
1630
1631 def gengroup():
1631 def gengroup():
1632 '''yield a sequence of changegroup chunks (strings)'''
1632 '''yield a sequence of changegroup chunks (strings)'''
1633 # construct a list of all changed files
1633 # construct a list of all changed files
1634
1634
1635 for chunk in cl.group(nodes, bundler):
1635 for chunk in cl.group(nodes, bundler):
1636 yield chunk
1636 yield chunk
1637 self.ui.progress(_('bundling'), None)
1637 self.ui.progress(_('bundling'), None)
1638
1638
1639 count[0] = 0
1639 count[0] = 0
1640 for chunk in mf.group(gennodelst(mf), bundler):
1640 for chunk in mf.group(gennodelst(mf), bundler):
1641 yield chunk
1641 yield chunk
1642 self.ui.progress(_('bundling'), None)
1642 self.ui.progress(_('bundling'), None)
1643
1643
1644 count[0] = 0
1644 count[0] = 0
1645 for fname in sorted(changedfiles):
1645 for fname in sorted(changedfiles):
1646 filerevlog = self.file(fname)
1646 filerevlog = self.file(fname)
1647 if not len(filerevlog):
1647 if not len(filerevlog):
1648 raise util.Abort(_("empty or missing revlog for %s") % fname)
1648 raise util.Abort(_("empty or missing revlog for %s") % fname)
1649 fstate[0] = fname
1649 fstate[0] = fname
1650 first = True
1650 first = True
1651 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1651 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1652 if first:
1652 if first:
1653 if chunk == bundler.close():
1653 if chunk == bundler.close():
1654 break
1654 break
1655 count[0] += 1
1655 count[0] += 1
1656 yield bundler.fileheader(fname)
1656 yield bundler.fileheader(fname)
1657 first = False
1657 first = False
1658 yield chunk
1658 yield chunk
1659 yield bundler.close()
1659 yield bundler.close()
1660 self.ui.progress(_('bundling'), None)
1660 self.ui.progress(_('bundling'), None)
1661
1661
1662 if nodes:
1662 if nodes:
1663 self.hook('outgoing', node=hex(nodes[0]), source=source)
1663 self.hook('outgoing', node=hex(nodes[0]), source=source)
1664
1664
1665 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1665 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1666
1666
1667 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1667 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1668 """Add the changegroup returned by source.read() to this repo.
1668 """Add the changegroup returned by source.read() to this repo.
1669 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1669 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1670 the URL of the repo where this changegroup is coming from.
1670 the URL of the repo where this changegroup is coming from.
1671 If lock is not None, the function takes ownership of the lock
1671 If lock is not None, the function takes ownership of the lock
1672 and releases it after the changegroup is added.
1672 and releases it after the changegroup is added.
1673
1673
1674 Return an integer summarizing the change to this repo:
1674 Return an integer summarizing the change to this repo:
1675 - nothing changed or no source: 0
1675 - nothing changed or no source: 0
1676 - more heads than before: 1+added heads (2..n)
1676 - more heads than before: 1+added heads (2..n)
1677 - fewer heads than before: -1-removed heads (-2..-n)
1677 - fewer heads than before: -1-removed heads (-2..-n)
1678 - number of heads stays the same: 1
1678 - number of heads stays the same: 1
1679 """
1679 """
1680 def csmap(x):
1680 def csmap(x):
1681 self.ui.debug("add changeset %s\n" % short(x))
1681 self.ui.debug("add changeset %s\n" % short(x))
1682 return len(cl)
1682 return len(cl)
1683
1683
1684 def revmap(x):
1684 def revmap(x):
1685 return cl.rev(x)
1685 return cl.rev(x)
1686
1686
1687 if not source:
1687 if not source:
1688 return 0
1688 return 0
1689
1689
1690 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1690 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1691
1691
1692 changesets = files = revisions = 0
1692 changesets = files = revisions = 0
1693 efiles = set()
1693 efiles = set()
1694
1694
1695 # write changelog data to temp files so concurrent readers will not see
1695 # write changelog data to temp files so concurrent readers will not see
1696 # inconsistent view
1696 # inconsistent view
1697 cl = self.changelog
1697 cl = self.changelog
1698 cl.delayupdate()
1698 cl.delayupdate()
1699 oldheads = cl.heads()
1699 oldheads = cl.heads()
1700
1700
1701 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1701 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1702 try:
1702 try:
1703 trp = weakref.proxy(tr)
1703 trp = weakref.proxy(tr)
1704 # pull off the changeset group
1704 # pull off the changeset group
1705 self.ui.status(_("adding changesets\n"))
1705 self.ui.status(_("adding changesets\n"))
1706 clstart = len(cl)
1706 clstart = len(cl)
1707 class prog(object):
1707 class prog(object):
1708 step = _('changesets')
1708 step = _('changesets')
1709 count = 1
1709 count = 1
1710 ui = self.ui
1710 ui = self.ui
1711 total = None
1711 total = None
1712 def __call__(self):
1712 def __call__(self):
1713 self.ui.progress(self.step, self.count, unit=_('chunks'),
1713 self.ui.progress(self.step, self.count, unit=_('chunks'),
1714 total=self.total)
1714 total=self.total)
1715 self.count += 1
1715 self.count += 1
1716 pr = prog()
1716 pr = prog()
1717 source.callback = pr
1717 source.callback = pr
1718
1718
1719 source.changelogheader()
1719 source.changelogheader()
1720 if (cl.addgroup(source, csmap, trp) is None
1720 if (cl.addgroup(source, csmap, trp) is None
1721 and not emptyok):
1721 and not emptyok):
1722 raise util.Abort(_("received changelog group is empty"))
1722 raise util.Abort(_("received changelog group is empty"))
1723 clend = len(cl)
1723 clend = len(cl)
1724 changesets = clend - clstart
1724 changesets = clend - clstart
1725 for c in xrange(clstart, clend):
1725 for c in xrange(clstart, clend):
1726 efiles.update(self[c].files())
1726 efiles.update(self[c].files())
1727 efiles = len(efiles)
1727 efiles = len(efiles)
1728 self.ui.progress(_('changesets'), None)
1728 self.ui.progress(_('changesets'), None)
1729
1729
1730 # pull off the manifest group
1730 # pull off the manifest group
1731 self.ui.status(_("adding manifests\n"))
1731 self.ui.status(_("adding manifests\n"))
1732 pr.step = _('manifests')
1732 pr.step = _('manifests')
1733 pr.count = 1
1733 pr.count = 1
1734 pr.total = changesets # manifests <= changesets
1734 pr.total = changesets # manifests <= changesets
1735 # no need to check for empty manifest group here:
1735 # no need to check for empty manifest group here:
1736 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1736 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1737 # no new manifest will be created and the manifest group will
1737 # no new manifest will be created and the manifest group will
1738 # be empty during the pull
1738 # be empty during the pull
1739 source.manifestheader()
1739 source.manifestheader()
1740 self.manifest.addgroup(source, revmap, trp)
1740 self.manifest.addgroup(source, revmap, trp)
1741 self.ui.progress(_('manifests'), None)
1741 self.ui.progress(_('manifests'), None)
1742
1742
1743 needfiles = {}
1743 needfiles = {}
1744 if self.ui.configbool('server', 'validate', default=False):
1744 if self.ui.configbool('server', 'validate', default=False):
1745 # validate incoming csets have their manifests
1745 # validate incoming csets have their manifests
1746 for cset in xrange(clstart, clend):
1746 for cset in xrange(clstart, clend):
1747 mfest = self.changelog.read(self.changelog.node(cset))[0]
1747 mfest = self.changelog.read(self.changelog.node(cset))[0]
1748 mfest = self.manifest.readdelta(mfest)
1748 mfest = self.manifest.readdelta(mfest)
1749 # store file nodes we must see
1749 # store file nodes we must see
1750 for f, n in mfest.iteritems():
1750 for f, n in mfest.iteritems():
1751 needfiles.setdefault(f, set()).add(n)
1751 needfiles.setdefault(f, set()).add(n)
1752
1752
1753 # process the files
1753 # process the files
1754 self.ui.status(_("adding file changes\n"))
1754 self.ui.status(_("adding file changes\n"))
1755 pr.step = 'files'
1755 pr.step = 'files'
1756 pr.count = 1
1756 pr.count = 1
1757 pr.total = efiles
1757 pr.total = efiles
1758 source.callback = None
1758 source.callback = None
1759
1759
1760 while 1:
1760 while 1:
1761 chunkdata = source.filelogheader()
1761 chunkdata = source.filelogheader()
1762 if not chunkdata:
1762 if not chunkdata:
1763 break
1763 break
1764 f = chunkdata["filename"]
1764 f = chunkdata["filename"]
1765 self.ui.debug("adding %s revisions\n" % f)
1765 self.ui.debug("adding %s revisions\n" % f)
1766 pr()
1766 pr()
1767 fl = self.file(f)
1767 fl = self.file(f)
1768 o = len(fl)
1768 o = len(fl)
1769 if fl.addgroup(source, revmap, trp) is None:
1769 if fl.addgroup(source, revmap, trp) is None:
1770 raise util.Abort(_("received file revlog group is empty"))
1770 raise util.Abort(_("received file revlog group is empty"))
1771 revisions += len(fl) - o
1771 revisions += len(fl) - o
1772 files += 1
1772 files += 1
1773 if f in needfiles:
1773 if f in needfiles:
1774 needs = needfiles[f]
1774 needs = needfiles[f]
1775 for new in xrange(o, len(fl)):
1775 for new in xrange(o, len(fl)):
1776 n = fl.node(new)
1776 n = fl.node(new)
1777 if n in needs:
1777 if n in needs:
1778 needs.remove(n)
1778 needs.remove(n)
1779 if not needs:
1779 if not needs:
1780 del needfiles[f]
1780 del needfiles[f]
1781 self.ui.progress(_('files'), None)
1781 self.ui.progress(_('files'), None)
1782
1782
1783 for f, needs in needfiles.iteritems():
1783 for f, needs in needfiles.iteritems():
1784 fl = self.file(f)
1784 fl = self.file(f)
1785 for n in needs:
1785 for n in needs:
1786 try:
1786 try:
1787 fl.rev(n)
1787 fl.rev(n)
1788 except error.LookupError:
1788 except error.LookupError:
1789 raise util.Abort(
1789 raise util.Abort(
1790 _('missing file data for %s:%s - run hg verify') %
1790 _('missing file data for %s:%s - run hg verify') %
1791 (f, hex(n)))
1791 (f, hex(n)))
1792
1792
1793 dh = 0
1793 dh = 0
1794 if oldheads:
1794 if oldheads:
1795 heads = cl.heads()
1795 heads = cl.heads()
1796 dh = len(heads) - len(oldheads)
1796 dh = len(heads) - len(oldheads)
1797 for h in heads:
1797 for h in heads:
1798 if h not in oldheads and 'close' in self[h].extra():
1798 if h not in oldheads and 'close' in self[h].extra():
1799 dh -= 1
1799 dh -= 1
1800 htext = ""
1800 htext = ""
1801 if dh:
1801 if dh:
1802 htext = _(" (%+d heads)") % dh
1802 htext = _(" (%+d heads)") % dh
1803
1803
1804 self.ui.status(_("added %d changesets"
1804 self.ui.status(_("added %d changesets"
1805 " with %d changes to %d files%s\n")
1805 " with %d changes to %d files%s\n")
1806 % (changesets, revisions, files, htext))
1806 % (changesets, revisions, files, htext))
1807
1807
1808 if changesets > 0:
1808 if changesets > 0:
1809 p = lambda: cl.writepending() and self.root or ""
1809 p = lambda: cl.writepending() and self.root or ""
1810 self.hook('pretxnchangegroup', throw=True,
1810 self.hook('pretxnchangegroup', throw=True,
1811 node=hex(cl.node(clstart)), source=srctype,
1811 node=hex(cl.node(clstart)), source=srctype,
1812 url=url, pending=p)
1812 url=url, pending=p)
1813
1813
1814 # make changelog see real files again
1814 # make changelog see real files again
1815 cl.finalize(trp)
1815 cl.finalize(trp)
1816
1816
1817 tr.close()
1817 tr.close()
1818 finally:
1818 finally:
1819 tr.release()
1819 tr.release()
1820 if lock:
1820 if lock:
1821 lock.release()
1821 lock.release()
1822
1822
1823 if changesets > 0:
1823 if changesets > 0:
1824 # forcefully update the on-disk branch cache
1824 # forcefully update the on-disk branch cache
1825 self.ui.debug("updating the branch cache\n")
1825 self.ui.debug("updating the branch cache\n")
1826 self.updatebranchcache()
1826 self.updatebranchcache()
1827 self.hook("changegroup", node=hex(cl.node(clstart)),
1827 self.hook("changegroup", node=hex(cl.node(clstart)),
1828 source=srctype, url=url)
1828 source=srctype, url=url)
1829
1829
1830 for i in xrange(clstart, clend):
1830 for i in xrange(clstart, clend):
1831 self.hook("incoming", node=hex(cl.node(i)),
1831 self.hook("incoming", node=hex(cl.node(i)),
1832 source=srctype, url=url)
1832 source=srctype, url=url)
1833
1833
1834 # never return 0 here:
1834 # never return 0 here:
1835 if dh < 0:
1835 if dh < 0:
1836 return dh - 1
1836 return dh - 1
1837 else:
1837 else:
1838 return dh + 1
1838 return dh + 1
1839
1839
1840 def stream_in(self, remote, requirements):
1840 def stream_in(self, remote, requirements):
1841 lock = self.lock()
1841 lock = self.lock()
1842 try:
1842 try:
1843 fp = remote.stream_out()
1843 fp = remote.stream_out()
1844 l = fp.readline()
1844 l = fp.readline()
1845 try:
1845 try:
1846 resp = int(l)
1846 resp = int(l)
1847 except ValueError:
1847 except ValueError:
1848 raise error.ResponseError(
1848 raise error.ResponseError(
1849 _('Unexpected response from remote server:'), l)
1849 _('Unexpected response from remote server:'), l)
1850 if resp == 1:
1850 if resp == 1:
1851 raise util.Abort(_('operation forbidden by server'))
1851 raise util.Abort(_('operation forbidden by server'))
1852 elif resp == 2:
1852 elif resp == 2:
1853 raise util.Abort(_('locking the remote repository failed'))
1853 raise util.Abort(_('locking the remote repository failed'))
1854 elif resp != 0:
1854 elif resp != 0:
1855 raise util.Abort(_('the server sent an unknown error code'))
1855 raise util.Abort(_('the server sent an unknown error code'))
1856 self.ui.status(_('streaming all changes\n'))
1856 self.ui.status(_('streaming all changes\n'))
1857 l = fp.readline()
1857 l = fp.readline()
1858 try:
1858 try:
1859 total_files, total_bytes = map(int, l.split(' ', 1))
1859 total_files, total_bytes = map(int, l.split(' ', 1))
1860 except (ValueError, TypeError):
1860 except (ValueError, TypeError):
1861 raise error.ResponseError(
1861 raise error.ResponseError(
1862 _('Unexpected response from remote server:'), l)
1862 _('Unexpected response from remote server:'), l)
1863 self.ui.status(_('%d files to transfer, %s of data\n') %
1863 self.ui.status(_('%d files to transfer, %s of data\n') %
1864 (total_files, util.bytecount(total_bytes)))
1864 (total_files, util.bytecount(total_bytes)))
1865 start = time.time()
1865 start = time.time()
1866 for i in xrange(total_files):
1866 for i in xrange(total_files):
1867 # XXX doesn't support '\n' or '\r' in filenames
1867 # XXX doesn't support '\n' or '\r' in filenames
1868 l = fp.readline()
1868 l = fp.readline()
1869 try:
1869 try:
1870 name, size = l.split('\0', 1)
1870 name, size = l.split('\0', 1)
1871 size = int(size)
1871 size = int(size)
1872 except (ValueError, TypeError):
1872 except (ValueError, TypeError):
1873 raise error.ResponseError(
1873 raise error.ResponseError(
1874 _('Unexpected response from remote server:'), l)
1874 _('Unexpected response from remote server:'), l)
1875 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1875 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1876 # for backwards compat, name was partially encoded
1876 # for backwards compat, name was partially encoded
1877 ofp = self.sopener(store.decodedir(name), 'w')
1877 ofp = self.sopener(store.decodedir(name), 'w')
1878 for chunk in util.filechunkiter(fp, limit=size):
1878 for chunk in util.filechunkiter(fp, limit=size):
1879 ofp.write(chunk)
1879 ofp.write(chunk)
1880 ofp.close()
1880 ofp.close()
1881 elapsed = time.time() - start
1881 elapsed = time.time() - start
1882 if elapsed <= 0:
1882 if elapsed <= 0:
1883 elapsed = 0.001
1883 elapsed = 0.001
1884 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1884 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1885 (util.bytecount(total_bytes), elapsed,
1885 (util.bytecount(total_bytes), elapsed,
1886 util.bytecount(total_bytes / elapsed)))
1886 util.bytecount(total_bytes / elapsed)))
1887
1887
1888 # new requirements = old non-format requirements + new format-related
1888 # new requirements = old non-format requirements + new format-related
1889 # requirements from the streamed-in repository
1889 # requirements from the streamed-in repository
1890 requirements.update(set(self.requirements) - self.supportedformats)
1890 requirements.update(set(self.requirements) - self.supportedformats)
1891 self._applyrequirements(requirements)
1891 self._applyrequirements(requirements)
1892 self._writerequirements()
1892 self._writerequirements()
1893
1893
1894 self.invalidate()
1894 self.invalidate()
1895 return len(self.heads()) + 1
1895 return len(self.heads()) + 1
1896 finally:
1896 finally:
1897 lock.release()
1897 lock.release()
1898
1898
1899 def clone(self, remote, heads=[], stream=False):
1899 def clone(self, remote, heads=[], stream=False):
1900 '''clone remote repository.
1900 '''clone remote repository.
1901
1901
1902 keyword arguments:
1902 keyword arguments:
1903 heads: list of revs to clone (forces use of pull)
1903 heads: list of revs to clone (forces use of pull)
1904 stream: use streaming clone if possible'''
1904 stream: use streaming clone if possible'''
1905
1905
1906 # now, all clients that can request uncompressed clones can
1906 # now, all clients that can request uncompressed clones can
1907 # read repo formats supported by all servers that can serve
1907 # read repo formats supported by all servers that can serve
1908 # them.
1908 # them.
1909
1909
1910 # if revlog format changes, client will have to check version
1910 # if revlog format changes, client will have to check version
1911 # and format flags on "stream" capability, and use
1911 # and format flags on "stream" capability, and use
1912 # uncompressed only if compatible.
1912 # uncompressed only if compatible.
1913
1913
1914 if stream and not heads:
1914 if stream and not heads:
1915 # 'stream' means remote revlog format is revlogv1 only
1915 # 'stream' means remote revlog format is revlogv1 only
1916 if remote.capable('stream'):
1916 if remote.capable('stream'):
1917 return self.stream_in(remote, set(('revlogv1',)))
1917 return self.stream_in(remote, set(('revlogv1',)))
1918 # otherwise, 'streamreqs' contains the remote revlog format
1918 # otherwise, 'streamreqs' contains the remote revlog format
1919 streamreqs = remote.capable('streamreqs')
1919 streamreqs = remote.capable('streamreqs')
1920 if streamreqs:
1920 if streamreqs:
1921 streamreqs = set(streamreqs.split(','))
1921 streamreqs = set(streamreqs.split(','))
1922 # if we support it, stream in and adjust our requirements
1922 # if we support it, stream in and adjust our requirements
1923 if not streamreqs - self.supportedformats:
1923 if not streamreqs - self.supportedformats:
1924 return self.stream_in(remote, streamreqs)
1924 return self.stream_in(remote, streamreqs)
1925 return self.pull(remote, heads)
1925 return self.pull(remote, heads)
1926
1926
1927 def pushkey(self, namespace, key, old, new):
1927 def pushkey(self, namespace, key, old, new):
1928 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1928 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1929 old=old, new=new)
1929 old=old, new=new)
1930 ret = pushkey.push(self, namespace, key, old, new)
1930 ret = pushkey.push(self, namespace, key, old, new)
1931 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1931 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1932 ret=ret)
1932 ret=ret)
1933 return ret
1933 return ret
1934
1934
1935 def listkeys(self, namespace):
1935 def listkeys(self, namespace):
1936 self.hook('prelistkeys', throw=True, namespace=namespace)
1936 self.hook('prelistkeys', throw=True, namespace=namespace)
1937 values = pushkey.list(self, namespace)
1937 values = pushkey.list(self, namespace)
1938 self.hook('listkeys', namespace=namespace, values=values)
1938 self.hook('listkeys', namespace=namespace, values=values)
1939 return values
1939 return values
1940
1940
1941 def debugwireargs(self, one, two, three=None, four=None, five=None):
1941 def debugwireargs(self, one, two, three=None, four=None, five=None):
1942 '''used to test argument passing over the wire'''
1942 '''used to test argument passing over the wire'''
1943 return "%s %s %s %s %s" % (one, two, three, four, five)
1943 return "%s %s %s %s %s" % (one, two, three, four, five)
1944
1944
1945 # used to avoid circular references so destructors work
1945 # used to avoid circular references so destructors work
1946 def aftertrans(files):
1946 def aftertrans(files):
1947 renamefiles = [tuple(t) for t in files]
1947 renamefiles = [tuple(t) for t in files]
1948 def a():
1948 def a():
1949 for src, dest in renamefiles:
1949 for src, dest in renamefiles:
1950 util.rename(src, dest)
1950 util.rename(src, dest)
1951 return a
1951 return a
1952
1952
1953 def instance(ui, path, create):
1953 def instance(ui, path, create):
1954 return localrepository(ui, util.localpath(path), create)
1954 return localrepository(ui, util.localpath(path), create)
1955
1955
1956 def islocal(path):
1956 def islocal(path):
1957 return True
1957 return True
@@ -1,561 +1,561 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, hex, bin
8 from node import nullid, nullrev, hex, bin
9 from i18n import _
9 from i18n import _
10 import scmutil, util, filemerge, copies, subrepo
10 import scmutil, util, filemerge, copies, subrepo
11 import errno, os, shutil
11 import errno, os, shutil
12
12
13 class mergestate(object):
13 class mergestate(object):
14 '''track 3-way merge state of individual files'''
14 '''track 3-way merge state of individual files'''
15 def __init__(self, repo):
15 def __init__(self, repo):
16 self._repo = repo
16 self._repo = repo
17 self._dirty = False
17 self._dirty = False
18 self._read()
18 self._read()
19 def reset(self, node=None):
19 def reset(self, node=None):
20 self._state = {}
20 self._state = {}
21 if node:
21 if node:
22 self._local = node
22 self._local = node
23 shutil.rmtree(self._repo.join("merge"), True)
23 shutil.rmtree(self._repo.join("merge"), True)
24 self._dirty = False
24 self._dirty = False
25 def _read(self):
25 def _read(self):
26 self._state = {}
26 self._state = {}
27 try:
27 try:
28 f = self._repo.opener("merge/state")
28 f = self._repo.opener("merge/state")
29 for i, l in enumerate(f):
29 for i, l in enumerate(f):
30 if i == 0:
30 if i == 0:
31 self._local = bin(l[:-1])
31 self._local = bin(l[:-1])
32 else:
32 else:
33 bits = l[:-1].split("\0")
33 bits = l[:-1].split("\0")
34 self._state[bits[0]] = bits[1:]
34 self._state[bits[0]] = bits[1:]
35 f.close()
35 f.close()
36 except IOError, err:
36 except IOError, err:
37 if err.errno != errno.ENOENT:
37 if err.errno != errno.ENOENT:
38 raise
38 raise
39 self._dirty = False
39 self._dirty = False
40 def commit(self):
40 def commit(self):
41 if self._dirty:
41 if self._dirty:
42 f = self._repo.opener("merge/state", "w")
42 f = self._repo.opener("merge/state", "w")
43 f.write(hex(self._local) + "\n")
43 f.write(hex(self._local) + "\n")
44 for d, v in self._state.iteritems():
44 for d, v in self._state.iteritems():
45 f.write("\0".join([d] + v) + "\n")
45 f.write("\0".join([d] + v) + "\n")
46 f.close()
46 f.close()
47 self._dirty = False
47 self._dirty = False
48 def add(self, fcl, fco, fca, fd, flags):
48 def add(self, fcl, fco, fca, fd, flags):
49 hash = util.sha1(fcl.path()).hexdigest()
49 hash = util.sha1(fcl.path()).hexdigest()
50 self._repo.opener.write("merge/" + hash, fcl.data())
50 self._repo.opener.write("merge/" + hash, fcl.data())
51 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
51 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
52 hex(fca.filenode()), fco.path(), flags]
52 hex(fca.filenode()), fco.path(), flags]
53 self._dirty = True
53 self._dirty = True
54 def __contains__(self, dfile):
54 def __contains__(self, dfile):
55 return dfile in self._state
55 return dfile in self._state
56 def __getitem__(self, dfile):
56 def __getitem__(self, dfile):
57 return self._state[dfile][0]
57 return self._state[dfile][0]
58 def __iter__(self):
58 def __iter__(self):
59 l = self._state.keys()
59 l = self._state.keys()
60 l.sort()
60 l.sort()
61 for f in l:
61 for f in l:
62 yield f
62 yield f
63 def mark(self, dfile, state):
63 def mark(self, dfile, state):
64 self._state[dfile][0] = state
64 self._state[dfile][0] = state
65 self._dirty = True
65 self._dirty = True
66 def resolve(self, dfile, wctx, octx):
66 def resolve(self, dfile, wctx, octx):
67 if self[dfile] == 'r':
67 if self[dfile] == 'r':
68 return 0
68 return 0
69 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
69 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
70 f = self._repo.opener("merge/" + hash)
70 f = self._repo.opener("merge/" + hash)
71 self._repo.wwrite(dfile, f.read(), flags)
71 self._repo.wwrite(dfile, f.read(), flags)
72 f.close()
72 f.close()
73 fcd = wctx[dfile]
73 fcd = wctx[dfile]
74 fco = octx[ofile]
74 fco = octx[ofile]
75 fca = self._repo.filectx(afile, fileid=anode)
75 fca = self._repo.filectx(afile, fileid=anode)
76 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
76 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
77 if r is None:
77 if r is None:
78 # no real conflict
78 # no real conflict
79 del self._state[dfile]
79 del self._state[dfile]
80 elif not r:
80 elif not r:
81 self.mark(dfile, 'r')
81 self.mark(dfile, 'r')
82 return r
82 return r
83
83
84 def _checkunknown(wctx, mctx):
84 def _checkunknown(wctx, mctx):
85 "check for collisions between unknown files and files in mctx"
85 "check for collisions between unknown files and files in mctx"
86 for f in wctx.unknown():
86 for f in wctx.unknown():
87 if f in mctx and mctx[f].cmp(wctx[f]):
87 if f in mctx and mctx[f].cmp(wctx[f]):
88 raise util.Abort(_("untracked file in working directory differs"
88 raise util.Abort(_("untracked file in working directory differs"
89 " from file in requested revision: '%s'") % f)
89 " from file in requested revision: '%s'") % f)
90
90
91 def _checkcollision(mctx):
91 def _checkcollision(mctx):
92 "check for case folding collisions in the destination context"
92 "check for case folding collisions in the destination context"
93 folded = {}
93 folded = {}
94 for fn in mctx:
94 for fn in mctx:
95 fold = fn.lower()
95 fold = fn.lower()
96 if fold in folded:
96 if fold in folded:
97 raise util.Abort(_("case-folding collision between %s and %s")
97 raise util.Abort(_("case-folding collision between %s and %s")
98 % (fn, folded[fold]))
98 % (fn, folded[fold]))
99 folded[fold] = fn
99 folded[fold] = fn
100
100
101 def _forgetremoved(wctx, mctx, branchmerge):
101 def _forgetremoved(wctx, mctx, branchmerge):
102 """
102 """
103 Forget removed files
103 Forget removed files
104
104
105 If we're jumping between revisions (as opposed to merging), and if
105 If we're jumping between revisions (as opposed to merging), and if
106 neither the working directory nor the target rev has the file,
106 neither the working directory nor the target rev has the file,
107 then we need to remove it from the dirstate, to prevent the
107 then we need to remove it from the dirstate, to prevent the
108 dirstate from listing the file when it is no longer in the
108 dirstate from listing the file when it is no longer in the
109 manifest.
109 manifest.
110
110
111 If we're merging, and the other revision has removed a file
111 If we're merging, and the other revision has removed a file
112 that is not present in the working directory, we need to mark it
112 that is not present in the working directory, we need to mark it
113 as removed.
113 as removed.
114 """
114 """
115
115
116 action = []
116 action = []
117 state = branchmerge and 'r' or 'f'
117 state = branchmerge and 'r' or 'f'
118 for f in wctx.deleted():
118 for f in wctx.deleted():
119 if f not in mctx:
119 if f not in mctx:
120 action.append((f, state))
120 action.append((f, state))
121
121
122 if not branchmerge:
122 if not branchmerge:
123 for f in wctx.removed():
123 for f in wctx.removed():
124 if f not in mctx:
124 if f not in mctx:
125 action.append((f, "f"))
125 action.append((f, "f"))
126
126
127 return action
127 return action
128
128
129 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
129 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
130 """
130 """
131 Merge p1 and p2 with ancestor pa and generate merge action list
131 Merge p1 and p2 with ancestor pa and generate merge action list
132
132
133 overwrite = whether we clobber working files
133 overwrite = whether we clobber working files
134 partial = function to filter file lists
134 partial = function to filter file lists
135 """
135 """
136
136
137 def fmerge(f, f2, fa):
137 def fmerge(f, f2, fa):
138 """merge flags"""
138 """merge flags"""
139 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
139 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
140 if m == n: # flags agree
140 if m == n: # flags agree
141 return m # unchanged
141 return m # unchanged
142 if m and n and not a: # flags set, don't agree, differ from parent
142 if m and n and not a: # flags set, don't agree, differ from parent
143 r = repo.ui.promptchoice(
143 r = repo.ui.promptchoice(
144 _(" conflicting flags for %s\n"
144 _(" conflicting flags for %s\n"
145 "(n)one, e(x)ec or sym(l)ink?") % f,
145 "(n)one, e(x)ec or sym(l)ink?") % f,
146 (_("&None"), _("E&xec"), _("Sym&link")), 0)
146 (_("&None"), _("E&xec"), _("Sym&link")), 0)
147 if r == 1:
147 if r == 1:
148 return "x" # Exec
148 return "x" # Exec
149 if r == 2:
149 if r == 2:
150 return "l" # Symlink
150 return "l" # Symlink
151 return ""
151 return ""
152 if m and m != a: # changed from a to m
152 if m and m != a: # changed from a to m
153 return m
153 return m
154 if n and n != a: # changed from a to n
154 if n and n != a: # changed from a to n
155 return n
155 return n
156 return '' # flag was cleared
156 return '' # flag was cleared
157
157
158 def act(msg, m, f, *args):
158 def act(msg, m, f, *args):
159 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
159 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
160 action.append((f, m) + args)
160 action.append((f, m) + args)
161
161
162 action, copy = [], {}
162 action, copy = [], {}
163
163
164 if overwrite:
164 if overwrite:
165 pa = p1
165 pa = p1
166 elif pa == p2: # backwards
166 elif pa == p2: # backwards
167 pa = p1.p1()
167 pa = p1.p1()
168 elif pa and repo.ui.configbool("merge", "followcopies", True):
168 elif pa and repo.ui.configbool("merge", "followcopies", True):
169 dirs = repo.ui.configbool("merge", "followdirs", True)
169 dirs = repo.ui.configbool("merge", "followdirs", True)
170 copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
170 copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
171 for of, fl in diverge.iteritems():
171 for of, fl in diverge.iteritems():
172 act("divergent renames", "dr", of, fl)
172 act("divergent renames", "dr", of, fl)
173
173
174 repo.ui.note(_("resolving manifests\n"))
174 repo.ui.note(_("resolving manifests\n"))
175 repo.ui.debug(" overwrite %s partial %s\n" % (overwrite, bool(partial)))
175 repo.ui.debug(" overwrite %s partial %s\n" % (overwrite, bool(partial)))
176 repo.ui.debug(" ancestor %s local %s remote %s\n" % (pa, p1, p2))
176 repo.ui.debug(" ancestor %s local %s remote %s\n" % (pa, p1, p2))
177
177
178 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
178 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
179 copied = set(copy.values())
179 copied = set(copy.values())
180
180
181 if '.hgsubstate' in m1:
181 if '.hgsubstate' in m1:
182 # check whether sub state is modified
182 # check whether sub state is modified
183 for s in p1.substate:
183 for s in p1.substate:
184 if p1.sub(s).dirty():
184 if p1.sub(s).dirty():
185 m1['.hgsubstate'] += "+"
185 m1['.hgsubstate'] += "+"
186 break
186 break
187
187
188 # Compare manifests
188 # Compare manifests
189 for f, n in m1.iteritems():
189 for f, n in m1.iteritems():
190 if partial and not partial(f):
190 if partial and not partial(f):
191 continue
191 continue
192 if f in m2:
192 if f in m2:
193 rflags = fmerge(f, f, f)
193 rflags = fmerge(f, f, f)
194 a = ma.get(f, nullid)
194 a = ma.get(f, nullid)
195 if n == m2[f] or m2[f] == a: # same or local newer
195 if n == m2[f] or m2[f] == a: # same or local newer
196 # is file locally modified or flags need changing?
196 # is file locally modified or flags need changing?
197 # dirstate flags may need to be made current
197 # dirstate flags may need to be made current
198 if m1.flags(f) != rflags or n[20:]:
198 if m1.flags(f) != rflags or n[20:]:
199 act("update permissions", "e", f, rflags)
199 act("update permissions", "e", f, rflags)
200 elif n == a: # remote newer
200 elif n == a: # remote newer
201 act("remote is newer", "g", f, rflags)
201 act("remote is newer", "g", f, rflags)
202 else: # both changed
202 else: # both changed
203 act("versions differ", "m", f, f, f, rflags, False)
203 act("versions differ", "m", f, f, f, rflags, False)
204 elif f in copied: # files we'll deal with on m2 side
204 elif f in copied: # files we'll deal with on m2 side
205 pass
205 pass
206 elif f in copy:
206 elif f in copy:
207 f2 = copy[f]
207 f2 = copy[f]
208 if f2 not in m2: # directory rename
208 if f2 not in m2: # directory rename
209 act("remote renamed directory to " + f2, "d",
209 act("remote renamed directory to " + f2, "d",
210 f, None, f2, m1.flags(f))
210 f, None, f2, m1.flags(f))
211 else: # case 2 A,B/B/B or case 4,21 A/B/B
211 else: # case 2 A,B/B/B or case 4,21 A/B/B
212 act("local copied/moved to " + f2, "m",
212 act("local copied/moved to " + f2, "m",
213 f, f2, f, fmerge(f, f2, f2), False)
213 f, f2, f, fmerge(f, f2, f2), False)
214 elif f in ma: # clean, a different, no remote
214 elif f in ma: # clean, a different, no remote
215 if n != ma[f]:
215 if n != ma[f]:
216 if repo.ui.promptchoice(
216 if repo.ui.promptchoice(
217 _(" local changed %s which remote deleted\n"
217 _(" local changed %s which remote deleted\n"
218 "use (c)hanged version or (d)elete?") % f,
218 "use (c)hanged version or (d)elete?") % f,
219 (_("&Changed"), _("&Delete")), 0):
219 (_("&Changed"), _("&Delete")), 0):
220 act("prompt delete", "r", f)
220 act("prompt delete", "r", f)
221 else:
221 else:
222 act("prompt keep", "a", f)
222 act("prompt keep", "a", f)
223 elif n[20:] == "a": # added, no remote
223 elif n[20:] == "a": # added, no remote
224 act("remote deleted", "f", f)
224 act("remote deleted", "f", f)
225 elif n[20:] != "u":
225 elif n[20:] != "u":
226 act("other deleted", "r", f)
226 act("other deleted", "r", f)
227
227
228 for f, n in m2.iteritems():
228 for f, n in m2.iteritems():
229 if partial and not partial(f):
229 if partial and not partial(f):
230 continue
230 continue
231 if f in m1 or f in copied: # files already visited
231 if f in m1 or f in copied: # files already visited
232 continue
232 continue
233 if f in copy:
233 if f in copy:
234 f2 = copy[f]
234 f2 = copy[f]
235 if f2 not in m1: # directory rename
235 if f2 not in m1: # directory rename
236 act("local renamed directory to " + f2, "d",
236 act("local renamed directory to " + f2, "d",
237 None, f, f2, m2.flags(f))
237 None, f, f2, m2.flags(f))
238 elif f2 in m2: # rename case 1, A/A,B/A
238 elif f2 in m2: # rename case 1, A/A,B/A
239 act("remote copied to " + f, "m",
239 act("remote copied to " + f, "m",
240 f2, f, f, fmerge(f2, f, f2), False)
240 f2, f, f, fmerge(f2, f, f2), False)
241 else: # case 3,20 A/B/A
241 else: # case 3,20 A/B/A
242 act("remote moved to " + f, "m",
242 act("remote moved to " + f, "m",
243 f2, f, f, fmerge(f2, f, f2), True)
243 f2, f, f, fmerge(f2, f, f2), True)
244 elif f not in ma:
244 elif f not in ma:
245 act("remote created", "g", f, m2.flags(f))
245 act("remote created", "g", f, m2.flags(f))
246 elif n != ma[f]:
246 elif n != ma[f]:
247 if repo.ui.promptchoice(
247 if repo.ui.promptchoice(
248 _("remote changed %s which local deleted\n"
248 _("remote changed %s which local deleted\n"
249 "use (c)hanged version or leave (d)eleted?") % f,
249 "use (c)hanged version or leave (d)eleted?") % f,
250 (_("&Changed"), _("&Deleted")), 0) == 0:
250 (_("&Changed"), _("&Deleted")), 0) == 0:
251 act("prompt recreating", "g", f, m2.flags(f))
251 act("prompt recreating", "g", f, m2.flags(f))
252
252
253 return action
253 return action
254
254
255 def actionkey(a):
255 def actionkey(a):
256 return a[1] == 'r' and -1 or 0, a
256 return a[1] == 'r' and -1 or 0, a
257
257
258 def applyupdates(repo, action, wctx, mctx, actx, overwrite):
258 def applyupdates(repo, action, wctx, mctx, actx, overwrite):
259 """apply the merge action list to the working directory
259 """apply the merge action list to the working directory
260
260
261 wctx is the working copy context
261 wctx is the working copy context
262 mctx is the context to be merged into the working copy
262 mctx is the context to be merged into the working copy
263 actx is the context of the common ancestor
263 actx is the context of the common ancestor
264
264
265 Return a tuple of counts (updated, merged, removed, unresolved) that
265 Return a tuple of counts (updated, merged, removed, unresolved) that
266 describes how many files were affected by the update.
266 describes how many files were affected by the update.
267 """
267 """
268
268
269 updated, merged, removed, unresolved = 0, 0, 0, 0
269 updated, merged, removed, unresolved = 0, 0, 0, 0
270 ms = mergestate(repo)
270 ms = mergestate(repo)
271 ms.reset(wctx.p1().node())
271 ms.reset(wctx.p1().node())
272 moves = []
272 moves = []
273 action.sort(key=actionkey)
273 action.sort(key=actionkey)
274
274
275 # prescan for merges
275 # prescan for merges
276 u = repo.ui
276 u = repo.ui
277 for a in action:
277 for a in action:
278 f, m = a[:2]
278 f, m = a[:2]
279 if m == 'm': # merge
279 if m == 'm': # merge
280 f2, fd, flags, move = a[2:]
280 f2, fd, flags, move = a[2:]
281 if f == '.hgsubstate': # merged internally
281 if f == '.hgsubstate': # merged internally
282 continue
282 continue
283 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
283 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
284 fcl = wctx[f]
284 fcl = wctx[f]
285 fco = mctx[f2]
285 fco = mctx[f2]
286 if mctx == actx: # backwards, use working dir parent as ancestor
286 if mctx == actx: # backwards, use working dir parent as ancestor
287 if fcl.parents():
287 if fcl.parents():
288 fca = fcl.p1()
288 fca = fcl.p1()
289 else:
289 else:
290 fca = repo.filectx(f, fileid=nullrev)
290 fca = repo.filectx(f, fileid=nullrev)
291 else:
291 else:
292 fca = fcl.ancestor(fco, actx)
292 fca = fcl.ancestor(fco, actx)
293 if not fca:
293 if not fca:
294 fca = repo.filectx(f, fileid=nullrev)
294 fca = repo.filectx(f, fileid=nullrev)
295 ms.add(fcl, fco, fca, fd, flags)
295 ms.add(fcl, fco, fca, fd, flags)
296 if f != fd and move:
296 if f != fd and move:
297 moves.append(f)
297 moves.append(f)
298
298
299 # remove renamed files after safely stored
299 # remove renamed files after safely stored
300 for f in moves:
300 for f in moves:
301 if os.path.lexists(repo.wjoin(f)):
301 if os.path.lexists(repo.wjoin(f)):
302 repo.ui.debug("removing %s\n" % f)
302 repo.ui.debug("removing %s\n" % f)
303 os.unlink(repo.wjoin(f))
303 os.unlink(repo.wjoin(f))
304
304
305 audit_path = scmutil.pathauditor(repo.root)
305 audit_path = scmutil.pathauditor(repo.root)
306
306
307 numupdates = len(action)
307 numupdates = len(action)
308 for i, a in enumerate(action):
308 for i, a in enumerate(action):
309 f, m = a[:2]
309 f, m = a[:2]
310 u.progress(_('updating'), i + 1, item=f, total=numupdates,
310 u.progress(_('updating'), i + 1, item=f, total=numupdates,
311 unit=_('files'))
311 unit=_('files'))
312 if f and f[0] == "/":
312 if f and f[0] == "/":
313 continue
313 continue
314 if m == "r": # remove
314 if m == "r": # remove
315 repo.ui.note(_("removing %s\n") % f)
315 repo.ui.note(_("removing %s\n") % f)
316 audit_path(f)
316 audit_path(f)
317 if f == '.hgsubstate': # subrepo states need updating
317 if f == '.hgsubstate': # subrepo states need updating
318 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
318 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
319 try:
319 try:
320 util.unlinkpath(repo.wjoin(f))
320 util.unlinkpath(repo.wjoin(f))
321 except OSError, inst:
321 except OSError, inst:
322 if inst.errno != errno.ENOENT:
322 if inst.errno != errno.ENOENT:
323 repo.ui.warn(_("update failed to remove %s: %s!\n") %
323 repo.ui.warn(_("update failed to remove %s: %s!\n") %
324 (f, inst.strerror))
324 (f, inst.strerror))
325 removed += 1
325 removed += 1
326 elif m == "m": # merge
326 elif m == "m": # merge
327 if f == '.hgsubstate': # subrepo states need updating
327 if f == '.hgsubstate': # subrepo states need updating
328 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite)
328 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite)
329 continue
329 continue
330 f2, fd, flags, move = a[2:]
330 f2, fd, flags, move = a[2:]
331 r = ms.resolve(fd, wctx, mctx)
331 r = ms.resolve(fd, wctx, mctx)
332 if r is not None and r > 0:
332 if r is not None and r > 0:
333 unresolved += 1
333 unresolved += 1
334 else:
334 else:
335 if r is None:
335 if r is None:
336 updated += 1
336 updated += 1
337 else:
337 else:
338 merged += 1
338 merged += 1
339 util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
339 util.setflags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
340 if (move and repo.dirstate.normalize(fd) != f
340 if (move and repo.dirstate.normalize(fd) != f
341 and os.path.lexists(repo.wjoin(f))):
341 and os.path.lexists(repo.wjoin(f))):
342 repo.ui.debug("removing %s\n" % f)
342 repo.ui.debug("removing %s\n" % f)
343 os.unlink(repo.wjoin(f))
343 os.unlink(repo.wjoin(f))
344 elif m == "g": # get
344 elif m == "g": # get
345 flags = a[2]
345 flags = a[2]
346 repo.ui.note(_("getting %s\n") % f)
346 repo.ui.note(_("getting %s\n") % f)
347 t = mctx.filectx(f).data()
347 t = mctx.filectx(f).data()
348 repo.wwrite(f, t, flags)
348 repo.wwrite(f, t, flags)
349 t = None
349 t = None
350 updated += 1
350 updated += 1
351 if f == '.hgsubstate': # subrepo states need updating
351 if f == '.hgsubstate': # subrepo states need updating
352 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
352 subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
353 elif m == "d": # directory rename
353 elif m == "d": # directory rename
354 f2, fd, flags = a[2:]
354 f2, fd, flags = a[2:]
355 if f:
355 if f:
356 repo.ui.note(_("moving %s to %s\n") % (f, fd))
356 repo.ui.note(_("moving %s to %s\n") % (f, fd))
357 t = wctx.filectx(f).data()
357 t = wctx.filectx(f).data()
358 repo.wwrite(fd, t, flags)
358 repo.wwrite(fd, t, flags)
359 util.unlinkpath(repo.wjoin(f))
359 util.unlinkpath(repo.wjoin(f))
360 if f2:
360 if f2:
361 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
361 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
362 t = mctx.filectx(f2).data()
362 t = mctx.filectx(f2).data()
363 repo.wwrite(fd, t, flags)
363 repo.wwrite(fd, t, flags)
364 updated += 1
364 updated += 1
365 elif m == "dr": # divergent renames
365 elif m == "dr": # divergent renames
366 fl = a[2]
366 fl = a[2]
367 repo.ui.warn(_("note: possible conflict - %s was renamed "
367 repo.ui.warn(_("note: possible conflict - %s was renamed "
368 "multiple times to:\n") % f)
368 "multiple times to:\n") % f)
369 for nf in fl:
369 for nf in fl:
370 repo.ui.warn(" %s\n" % nf)
370 repo.ui.warn(" %s\n" % nf)
371 elif m == "e": # exec
371 elif m == "e": # exec
372 flags = a[2]
372 flags = a[2]
373 util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags)
373 util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
374 ms.commit()
374 ms.commit()
375 u.progress(_('updating'), None, total=numupdates, unit=_('files'))
375 u.progress(_('updating'), None, total=numupdates, unit=_('files'))
376
376
377 return updated, merged, removed, unresolved
377 return updated, merged, removed, unresolved
378
378
379 def recordupdates(repo, action, branchmerge):
379 def recordupdates(repo, action, branchmerge):
380 "record merge actions to the dirstate"
380 "record merge actions to the dirstate"
381
381
382 for a in action:
382 for a in action:
383 f, m = a[:2]
383 f, m = a[:2]
384 if m == "r": # remove
384 if m == "r": # remove
385 if branchmerge:
385 if branchmerge:
386 repo.dirstate.remove(f)
386 repo.dirstate.remove(f)
387 else:
387 else:
388 repo.dirstate.forget(f)
388 repo.dirstate.forget(f)
389 elif m == "a": # re-add
389 elif m == "a": # re-add
390 if not branchmerge:
390 if not branchmerge:
391 repo.dirstate.add(f)
391 repo.dirstate.add(f)
392 elif m == "f": # forget
392 elif m == "f": # forget
393 repo.dirstate.forget(f)
393 repo.dirstate.forget(f)
394 elif m == "e": # exec change
394 elif m == "e": # exec change
395 repo.dirstate.normallookup(f)
395 repo.dirstate.normallookup(f)
396 elif m == "g": # get
396 elif m == "g": # get
397 if branchmerge:
397 if branchmerge:
398 repo.dirstate.otherparent(f)
398 repo.dirstate.otherparent(f)
399 else:
399 else:
400 repo.dirstate.normal(f)
400 repo.dirstate.normal(f)
401 elif m == "m": # merge
401 elif m == "m": # merge
402 f2, fd, flag, move = a[2:]
402 f2, fd, flag, move = a[2:]
403 if branchmerge:
403 if branchmerge:
404 # We've done a branch merge, mark this file as merged
404 # We've done a branch merge, mark this file as merged
405 # so that we properly record the merger later
405 # so that we properly record the merger later
406 repo.dirstate.merge(fd)
406 repo.dirstate.merge(fd)
407 if f != f2: # copy/rename
407 if f != f2: # copy/rename
408 if move:
408 if move:
409 repo.dirstate.remove(f)
409 repo.dirstate.remove(f)
410 if f != fd:
410 if f != fd:
411 repo.dirstate.copy(f, fd)
411 repo.dirstate.copy(f, fd)
412 else:
412 else:
413 repo.dirstate.copy(f2, fd)
413 repo.dirstate.copy(f2, fd)
414 else:
414 else:
415 # We've update-merged a locally modified file, so
415 # We've update-merged a locally modified file, so
416 # we set the dirstate to emulate a normal checkout
416 # we set the dirstate to emulate a normal checkout
417 # of that file some time in the past. Thus our
417 # of that file some time in the past. Thus our
418 # merge will appear as a normal local file
418 # merge will appear as a normal local file
419 # modification.
419 # modification.
420 if f2 == fd: # file not locally copied/moved
420 if f2 == fd: # file not locally copied/moved
421 repo.dirstate.normallookup(fd)
421 repo.dirstate.normallookup(fd)
422 if move:
422 if move:
423 repo.dirstate.forget(f)
423 repo.dirstate.forget(f)
424 elif m == "d": # directory rename
424 elif m == "d": # directory rename
425 f2, fd, flag = a[2:]
425 f2, fd, flag = a[2:]
426 if not f2 and f not in repo.dirstate:
426 if not f2 and f not in repo.dirstate:
427 # untracked file moved
427 # untracked file moved
428 continue
428 continue
429 if branchmerge:
429 if branchmerge:
430 repo.dirstate.add(fd)
430 repo.dirstate.add(fd)
431 if f:
431 if f:
432 repo.dirstate.remove(f)
432 repo.dirstate.remove(f)
433 repo.dirstate.copy(f, fd)
433 repo.dirstate.copy(f, fd)
434 if f2:
434 if f2:
435 repo.dirstate.copy(f2, fd)
435 repo.dirstate.copy(f2, fd)
436 else:
436 else:
437 repo.dirstate.normal(fd)
437 repo.dirstate.normal(fd)
438 if f:
438 if f:
439 repo.dirstate.forget(f)
439 repo.dirstate.forget(f)
440
440
441 def update(repo, node, branchmerge, force, partial, ancestor=None):
441 def update(repo, node, branchmerge, force, partial, ancestor=None):
442 """
442 """
443 Perform a merge between the working directory and the given node
443 Perform a merge between the working directory and the given node
444
444
445 node = the node to update to, or None if unspecified
445 node = the node to update to, or None if unspecified
446 branchmerge = whether to merge between branches
446 branchmerge = whether to merge between branches
447 force = whether to force branch merging or file overwriting
447 force = whether to force branch merging or file overwriting
448 partial = a function to filter file lists (dirstate not updated)
448 partial = a function to filter file lists (dirstate not updated)
449
449
450 The table below shows all the behaviors of the update command
450 The table below shows all the behaviors of the update command
451 given the -c and -C or no options, whether the working directory
451 given the -c and -C or no options, whether the working directory
452 is dirty, whether a revision is specified, and the relationship of
452 is dirty, whether a revision is specified, and the relationship of
453 the parent rev to the target rev (linear, on the same named
453 the parent rev to the target rev (linear, on the same named
454 branch, or on another named branch).
454 branch, or on another named branch).
455
455
456 This logic is tested by test-update-branches.t.
456 This logic is tested by test-update-branches.t.
457
457
458 -c -C dirty rev | linear same cross
458 -c -C dirty rev | linear same cross
459 n n n n | ok (1) x
459 n n n n | ok (1) x
460 n n n y | ok ok ok
460 n n n y | ok ok ok
461 n n y * | merge (2) (2)
461 n n y * | merge (2) (2)
462 n y * * | --- discard ---
462 n y * * | --- discard ---
463 y n y * | --- (3) ---
463 y n y * | --- (3) ---
464 y n n * | --- ok ---
464 y n n * | --- ok ---
465 y y * * | --- (4) ---
465 y y * * | --- (4) ---
466
466
467 x = can't happen
467 x = can't happen
468 * = don't-care
468 * = don't-care
469 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
469 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
470 2 = abort: crosses branches (use 'hg merge' to merge or
470 2 = abort: crosses branches (use 'hg merge' to merge or
471 use 'hg update -C' to discard changes)
471 use 'hg update -C' to discard changes)
472 3 = abort: uncommitted local changes
472 3 = abort: uncommitted local changes
473 4 = incompatible options (checked in commands.py)
473 4 = incompatible options (checked in commands.py)
474
474
475 Return the same tuple as applyupdates().
475 Return the same tuple as applyupdates().
476 """
476 """
477
477
478 onode = node
478 onode = node
479 wlock = repo.wlock()
479 wlock = repo.wlock()
480 try:
480 try:
481 wc = repo[None]
481 wc = repo[None]
482 if node is None:
482 if node is None:
483 # tip of current branch
483 # tip of current branch
484 try:
484 try:
485 node = repo.branchtags()[wc.branch()]
485 node = repo.branchtags()[wc.branch()]
486 except KeyError:
486 except KeyError:
487 if wc.branch() == "default": # no default branch!
487 if wc.branch() == "default": # no default branch!
488 node = repo.lookup("tip") # update to tip
488 node = repo.lookup("tip") # update to tip
489 else:
489 else:
490 raise util.Abort(_("branch %s not found") % wc.branch())
490 raise util.Abort(_("branch %s not found") % wc.branch())
491 overwrite = force and not branchmerge
491 overwrite = force and not branchmerge
492 pl = wc.parents()
492 pl = wc.parents()
493 p1, p2 = pl[0], repo[node]
493 p1, p2 = pl[0], repo[node]
494 if ancestor:
494 if ancestor:
495 pa = repo[ancestor]
495 pa = repo[ancestor]
496 else:
496 else:
497 pa = p1.ancestor(p2)
497 pa = p1.ancestor(p2)
498
498
499 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
499 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
500
500
501 ### check phase
501 ### check phase
502 if not overwrite and len(pl) > 1:
502 if not overwrite and len(pl) > 1:
503 raise util.Abort(_("outstanding uncommitted merges"))
503 raise util.Abort(_("outstanding uncommitted merges"))
504 if branchmerge:
504 if branchmerge:
505 if pa == p2:
505 if pa == p2:
506 raise util.Abort(_("merging with a working directory ancestor"
506 raise util.Abort(_("merging with a working directory ancestor"
507 " has no effect"))
507 " has no effect"))
508 elif pa == p1:
508 elif pa == p1:
509 if p1.branch() == p2.branch():
509 if p1.branch() == p2.branch():
510 raise util.Abort(_("nothing to merge (use 'hg update'"
510 raise util.Abort(_("nothing to merge (use 'hg update'"
511 " or check 'hg heads')"))
511 " or check 'hg heads')"))
512 if not force and (wc.files() or wc.deleted()):
512 if not force and (wc.files() or wc.deleted()):
513 raise util.Abort(_("outstanding uncommitted changes "
513 raise util.Abort(_("outstanding uncommitted changes "
514 "(use 'hg status' to list changes)"))
514 "(use 'hg status' to list changes)"))
515 for s in wc.substate:
515 for s in wc.substate:
516 if wc.sub(s).dirty():
516 if wc.sub(s).dirty():
517 raise util.Abort(_("outstanding uncommitted changes in "
517 raise util.Abort(_("outstanding uncommitted changes in "
518 "subrepository '%s'") % s)
518 "subrepository '%s'") % s)
519
519
520 elif not overwrite:
520 elif not overwrite:
521 if pa == p1 or pa == p2: # linear
521 if pa == p1 or pa == p2: # linear
522 pass # all good
522 pass # all good
523 elif wc.files() or wc.deleted():
523 elif wc.files() or wc.deleted():
524 raise util.Abort(_("crosses branches (merge branches or use"
524 raise util.Abort(_("crosses branches (merge branches or use"
525 " --clean to discard changes)"))
525 " --clean to discard changes)"))
526 elif onode is None:
526 elif onode is None:
527 raise util.Abort(_("crosses branches (merge branches or use"
527 raise util.Abort(_("crosses branches (merge branches or use"
528 " --check to force update)"))
528 " --check to force update)"))
529 else:
529 else:
530 # Allow jumping branches if clean and specific rev given
530 # Allow jumping branches if clean and specific rev given
531 overwrite = True
531 overwrite = True
532
532
533 ### calculate phase
533 ### calculate phase
534 action = []
534 action = []
535 wc.status(unknown=True) # prime cache
535 wc.status(unknown=True) # prime cache
536 if not force:
536 if not force:
537 _checkunknown(wc, p2)
537 _checkunknown(wc, p2)
538 if not util.checkcase(repo.path):
538 if not util.checkcase(repo.path):
539 _checkcollision(p2)
539 _checkcollision(p2)
540 action += _forgetremoved(wc, p2, branchmerge)
540 action += _forgetremoved(wc, p2, branchmerge)
541 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
541 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
542
542
543 ### apply phase
543 ### apply phase
544 if not branchmerge: # just jump to the new rev
544 if not branchmerge: # just jump to the new rev
545 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
545 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
546 if not partial:
546 if not partial:
547 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
547 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
548
548
549 stats = applyupdates(repo, action, wc, p2, pa, overwrite)
549 stats = applyupdates(repo, action, wc, p2, pa, overwrite)
550
550
551 if not partial:
551 if not partial:
552 repo.dirstate.setparents(fp1, fp2)
552 repo.dirstate.setparents(fp1, fp2)
553 recordupdates(repo, action, branchmerge)
553 recordupdates(repo, action, branchmerge)
554 if not branchmerge:
554 if not branchmerge:
555 repo.dirstate.setbranch(p2.branch())
555 repo.dirstate.setbranch(p2.branch())
556 finally:
556 finally:
557 wlock.release()
557 wlock.release()
558
558
559 if not partial:
559 if not partial:
560 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
560 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
561 return stats
561 return stats
@@ -1,331 +1,331 b''
1 # posix.py - Posix utility function implementations for Mercurial
1 # posix.py - Posix utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import os, sys, errno, stat, getpass, pwd, grp, tempfile
9 import os, sys, errno, stat, getpass, pwd, grp, tempfile
10
10
11 posixfile = open
11 posixfile = open
12 nulldev = '/dev/null'
12 nulldev = '/dev/null'
13 normpath = os.path.normpath
13 normpath = os.path.normpath
14 samestat = os.path.samestat
14 samestat = os.path.samestat
15 os_link = os.link
15 os_link = os.link
16 unlink = os.unlink
16 unlink = os.unlink
17 rename = os.rename
17 rename = os.rename
18 expandglobs = False
18 expandglobs = False
19
19
20 umask = os.umask(0)
20 umask = os.umask(0)
21 os.umask(umask)
21 os.umask(umask)
22
22
23 def openhardlinks():
23 def openhardlinks():
24 '''return true if it is safe to hold open file handles to hardlinks'''
24 '''return true if it is safe to hold open file handles to hardlinks'''
25 return True
25 return True
26
26
27 def nlinks(name):
27 def nlinks(name):
28 '''return number of hardlinks for the given file'''
28 '''return number of hardlinks for the given file'''
29 return os.lstat(name).st_nlink
29 return os.lstat(name).st_nlink
30
30
31 def parsepatchoutput(output_line):
31 def parsepatchoutput(output_line):
32 """parses the output produced by patch and returns the filename"""
32 """parses the output produced by patch and returns the filename"""
33 pf = output_line[14:]
33 pf = output_line[14:]
34 if os.sys.platform == 'OpenVMS':
34 if os.sys.platform == 'OpenVMS':
35 if pf[0] == '`':
35 if pf[0] == '`':
36 pf = pf[1:-1] # Remove the quotes
36 pf = pf[1:-1] # Remove the quotes
37 else:
37 else:
38 if pf.startswith("'") and pf.endswith("'") and " " in pf:
38 if pf.startswith("'") and pf.endswith("'") and " " in pf:
39 pf = pf[1:-1] # Remove the quotes
39 pf = pf[1:-1] # Remove the quotes
40 return pf
40 return pf
41
41
42 def sshargs(sshcmd, host, user, port):
42 def sshargs(sshcmd, host, user, port):
43 '''Build argument list for ssh'''
43 '''Build argument list for ssh'''
44 args = user and ("%s@%s" % (user, host)) or host
44 args = user and ("%s@%s" % (user, host)) or host
45 return port and ("%s -p %s" % (args, port)) or args
45 return port and ("%s -p %s" % (args, port)) or args
46
46
47 def is_exec(f):
47 def is_exec(f):
48 """check whether a file is executable"""
48 """check whether a file is executable"""
49 return (os.lstat(f).st_mode & 0100 != 0)
49 return (os.lstat(f).st_mode & 0100 != 0)
50
50
51 def set_flags(f, l, x):
51 def setflags(f, l, x):
52 s = os.lstat(f).st_mode
52 s = os.lstat(f).st_mode
53 if l:
53 if l:
54 if not stat.S_ISLNK(s):
54 if not stat.S_ISLNK(s):
55 # switch file to link
55 # switch file to link
56 fp = open(f)
56 fp = open(f)
57 data = fp.read()
57 data = fp.read()
58 fp.close()
58 fp.close()
59 os.unlink(f)
59 os.unlink(f)
60 try:
60 try:
61 os.symlink(data, f)
61 os.symlink(data, f)
62 except OSError:
62 except OSError:
63 # failed to make a link, rewrite file
63 # failed to make a link, rewrite file
64 fp = open(f, "w")
64 fp = open(f, "w")
65 fp.write(data)
65 fp.write(data)
66 fp.close()
66 fp.close()
67 # no chmod needed at this point
67 # no chmod needed at this point
68 return
68 return
69 if stat.S_ISLNK(s):
69 if stat.S_ISLNK(s):
70 # switch link to file
70 # switch link to file
71 data = os.readlink(f)
71 data = os.readlink(f)
72 os.unlink(f)
72 os.unlink(f)
73 fp = open(f, "w")
73 fp = open(f, "w")
74 fp.write(data)
74 fp.write(data)
75 fp.close()
75 fp.close()
76 s = 0666 & ~umask # avoid restatting for chmod
76 s = 0666 & ~umask # avoid restatting for chmod
77
77
78 sx = s & 0100
78 sx = s & 0100
79 if x and not sx:
79 if x and not sx:
80 # Turn on +x for every +r bit when making a file executable
80 # Turn on +x for every +r bit when making a file executable
81 # and obey umask.
81 # and obey umask.
82 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
82 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
83 elif not x and sx:
83 elif not x and sx:
84 # Turn off all +x bits
84 # Turn off all +x bits
85 os.chmod(f, s & 0666)
85 os.chmod(f, s & 0666)
86
86
87 def checkexec(path):
87 def checkexec(path):
88 """
88 """
89 Check whether the given path is on a filesystem with UNIX-like exec flags
89 Check whether the given path is on a filesystem with UNIX-like exec flags
90
90
91 Requires a directory (like /foo/.hg)
91 Requires a directory (like /foo/.hg)
92 """
92 """
93
93
94 # VFAT on some Linux versions can flip mode but it doesn't persist
94 # VFAT on some Linux versions can flip mode but it doesn't persist
95 # a FS remount. Frequently we can detect it if files are created
95 # a FS remount. Frequently we can detect it if files are created
96 # with exec bit on.
96 # with exec bit on.
97
97
98 try:
98 try:
99 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
99 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
100 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
100 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
101 try:
101 try:
102 os.close(fh)
102 os.close(fh)
103 m = os.stat(fn).st_mode & 0777
103 m = os.stat(fn).st_mode & 0777
104 new_file_has_exec = m & EXECFLAGS
104 new_file_has_exec = m & EXECFLAGS
105 os.chmod(fn, m ^ EXECFLAGS)
105 os.chmod(fn, m ^ EXECFLAGS)
106 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
106 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
107 finally:
107 finally:
108 os.unlink(fn)
108 os.unlink(fn)
109 except (IOError, OSError):
109 except (IOError, OSError):
110 # we don't care, the user probably won't be able to commit anyway
110 # we don't care, the user probably won't be able to commit anyway
111 return False
111 return False
112 return not (new_file_has_exec or exec_flags_cannot_flip)
112 return not (new_file_has_exec or exec_flags_cannot_flip)
113
113
114 def checklink(path):
114 def checklink(path):
115 """check whether the given path is on a symlink-capable filesystem"""
115 """check whether the given path is on a symlink-capable filesystem"""
116 # mktemp is not racy because symlink creation will fail if the
116 # mktemp is not racy because symlink creation will fail if the
117 # file already exists
117 # file already exists
118 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
118 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
119 try:
119 try:
120 os.symlink(".", name)
120 os.symlink(".", name)
121 os.unlink(name)
121 os.unlink(name)
122 return True
122 return True
123 except (OSError, AttributeError):
123 except (OSError, AttributeError):
124 return False
124 return False
125
125
126 def checkosfilename(path):
126 def checkosfilename(path):
127 '''Check that the base-relative path is a valid filename on this platform.
127 '''Check that the base-relative path is a valid filename on this platform.
128 Returns None if the path is ok, or a UI string describing the problem.'''
128 Returns None if the path is ok, or a UI string describing the problem.'''
129 pass # on posix platforms, every path is ok
129 pass # on posix platforms, every path is ok
130
130
131 def set_binary(fd):
131 def set_binary(fd):
132 pass
132 pass
133
133
134 def pconvert(path):
134 def pconvert(path):
135 return path
135 return path
136
136
137 def localpath(path):
137 def localpath(path):
138 return path
138 return path
139
139
140 def samefile(fpath1, fpath2):
140 def samefile(fpath1, fpath2):
141 """Returns whether path1 and path2 refer to the same file. This is only
141 """Returns whether path1 and path2 refer to the same file. This is only
142 guaranteed to work for files, not directories."""
142 guaranteed to work for files, not directories."""
143 return os.path.samefile(fpath1, fpath2)
143 return os.path.samefile(fpath1, fpath2)
144
144
145 def samedevice(fpath1, fpath2):
145 def samedevice(fpath1, fpath2):
146 """Returns whether fpath1 and fpath2 are on the same device. This is only
146 """Returns whether fpath1 and fpath2 are on the same device. This is only
147 guaranteed to work for files, not directories."""
147 guaranteed to work for files, not directories."""
148 st1 = os.lstat(fpath1)
148 st1 = os.lstat(fpath1)
149 st2 = os.lstat(fpath2)
149 st2 = os.lstat(fpath2)
150 return st1.st_dev == st2.st_dev
150 return st1.st_dev == st2.st_dev
151
151
152 if sys.platform == 'darwin':
152 if sys.platform == 'darwin':
153 import fcntl # only needed on darwin, missing on jython
153 import fcntl # only needed on darwin, missing on jython
154 def realpath(path):
154 def realpath(path):
155 '''
155 '''
156 Returns the true, canonical file system path equivalent to the given
156 Returns the true, canonical file system path equivalent to the given
157 path.
157 path.
158
158
159 Equivalent means, in this case, resulting in the same, unique
159 Equivalent means, in this case, resulting in the same, unique
160 file system link to the path. Every file system entry, whether a file,
160 file system link to the path. Every file system entry, whether a file,
161 directory, hard link or symbolic link or special, will have a single
161 directory, hard link or symbolic link or special, will have a single
162 path preferred by the system, but may allow multiple, differing path
162 path preferred by the system, but may allow multiple, differing path
163 lookups to point to it.
163 lookups to point to it.
164
164
165 Most regular UNIX file systems only allow a file system entry to be
165 Most regular UNIX file systems only allow a file system entry to be
166 looked up by its distinct path. Obviously, this does not apply to case
166 looked up by its distinct path. Obviously, this does not apply to case
167 insensitive file systems, whether case preserving or not. The most
167 insensitive file systems, whether case preserving or not. The most
168 complex issue to deal with is file systems transparently reencoding the
168 complex issue to deal with is file systems transparently reencoding the
169 path, such as the non-standard Unicode normalisation required for HFS+
169 path, such as the non-standard Unicode normalisation required for HFS+
170 and HFSX.
170 and HFSX.
171 '''
171 '''
172 # Constants copied from /usr/include/sys/fcntl.h
172 # Constants copied from /usr/include/sys/fcntl.h
173 F_GETPATH = 50
173 F_GETPATH = 50
174 O_SYMLINK = 0x200000
174 O_SYMLINK = 0x200000
175
175
176 try:
176 try:
177 fd = os.open(path, O_SYMLINK)
177 fd = os.open(path, O_SYMLINK)
178 except OSError, err:
178 except OSError, err:
179 if err.errno == errno.ENOENT:
179 if err.errno == errno.ENOENT:
180 return path
180 return path
181 raise
181 raise
182
182
183 try:
183 try:
184 return fcntl.fcntl(fd, F_GETPATH, '\0' * 1024).rstrip('\0')
184 return fcntl.fcntl(fd, F_GETPATH, '\0' * 1024).rstrip('\0')
185 finally:
185 finally:
186 os.close(fd)
186 os.close(fd)
187 else:
187 else:
188 # Fallback to the likely inadequate Python builtin function.
188 # Fallback to the likely inadequate Python builtin function.
189 realpath = os.path.realpath
189 realpath = os.path.realpath
190
190
191 def shellquote(s):
191 def shellquote(s):
192 if os.sys.platform == 'OpenVMS':
192 if os.sys.platform == 'OpenVMS':
193 return '"%s"' % s
193 return '"%s"' % s
194 else:
194 else:
195 return "'%s'" % s.replace("'", "'\\''")
195 return "'%s'" % s.replace("'", "'\\''")
196
196
197 def quotecommand(cmd):
197 def quotecommand(cmd):
198 return cmd
198 return cmd
199
199
200 def popen(command, mode='r'):
200 def popen(command, mode='r'):
201 return os.popen(command, mode)
201 return os.popen(command, mode)
202
202
203 def testpid(pid):
203 def testpid(pid):
204 '''return False if pid dead, True if running or not sure'''
204 '''return False if pid dead, True if running or not sure'''
205 if os.sys.platform == 'OpenVMS':
205 if os.sys.platform == 'OpenVMS':
206 return True
206 return True
207 try:
207 try:
208 os.kill(pid, 0)
208 os.kill(pid, 0)
209 return True
209 return True
210 except OSError, inst:
210 except OSError, inst:
211 return inst.errno != errno.ESRCH
211 return inst.errno != errno.ESRCH
212
212
213 def explain_exit(code):
213 def explain_exit(code):
214 """return a 2-tuple (desc, code) describing a subprocess status
214 """return a 2-tuple (desc, code) describing a subprocess status
215 (codes from kill are negative - not os.system/wait encoding)"""
215 (codes from kill are negative - not os.system/wait encoding)"""
216 if code >= 0:
216 if code >= 0:
217 return _("exited with status %d") % code, code
217 return _("exited with status %d") % code, code
218 return _("killed by signal %d") % -code, -code
218 return _("killed by signal %d") % -code, -code
219
219
220 def isowner(st):
220 def isowner(st):
221 """Return True if the stat object st is from the current user."""
221 """Return True if the stat object st is from the current user."""
222 return st.st_uid == os.getuid()
222 return st.st_uid == os.getuid()
223
223
224 def find_exe(command):
224 def find_exe(command):
225 '''Find executable for command searching like which does.
225 '''Find executable for command searching like which does.
226 If command is a basename then PATH is searched for command.
226 If command is a basename then PATH is searched for command.
227 PATH isn't searched if command is an absolute or relative path.
227 PATH isn't searched if command is an absolute or relative path.
228 If command isn't found None is returned.'''
228 If command isn't found None is returned.'''
229 if sys.platform == 'OpenVMS':
229 if sys.platform == 'OpenVMS':
230 return command
230 return command
231
231
232 def findexisting(executable):
232 def findexisting(executable):
233 'Will return executable if existing file'
233 'Will return executable if existing file'
234 if os.path.exists(executable):
234 if os.path.exists(executable):
235 return executable
235 return executable
236 return None
236 return None
237
237
238 if os.sep in command:
238 if os.sep in command:
239 return findexisting(command)
239 return findexisting(command)
240
240
241 for path in os.environ.get('PATH', '').split(os.pathsep):
241 for path in os.environ.get('PATH', '').split(os.pathsep):
242 executable = findexisting(os.path.join(path, command))
242 executable = findexisting(os.path.join(path, command))
243 if executable is not None:
243 if executable is not None:
244 return executable
244 return executable
245 return None
245 return None
246
246
247 def set_signal_handler():
247 def set_signal_handler():
248 pass
248 pass
249
249
250 def statfiles(files):
250 def statfiles(files):
251 'Stat each file in files and yield stat or None if file does not exist.'
251 'Stat each file in files and yield stat or None if file does not exist.'
252 lstat = os.lstat
252 lstat = os.lstat
253 for nf in files:
253 for nf in files:
254 try:
254 try:
255 st = lstat(nf)
255 st = lstat(nf)
256 except OSError, err:
256 except OSError, err:
257 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
257 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
258 raise
258 raise
259 st = None
259 st = None
260 yield st
260 yield st
261
261
262 def getuser():
262 def getuser():
263 '''return name of current user'''
263 '''return name of current user'''
264 return getpass.getuser()
264 return getpass.getuser()
265
265
266 def expand_glob(pats):
266 def expand_glob(pats):
267 '''On Windows, expand the implicit globs in a list of patterns'''
267 '''On Windows, expand the implicit globs in a list of patterns'''
268 return list(pats)
268 return list(pats)
269
269
270 def username(uid=None):
270 def username(uid=None):
271 """Return the name of the user with the given uid.
271 """Return the name of the user with the given uid.
272
272
273 If uid is None, return the name of the current user."""
273 If uid is None, return the name of the current user."""
274
274
275 if uid is None:
275 if uid is None:
276 uid = os.getuid()
276 uid = os.getuid()
277 try:
277 try:
278 return pwd.getpwuid(uid)[0]
278 return pwd.getpwuid(uid)[0]
279 except KeyError:
279 except KeyError:
280 return str(uid)
280 return str(uid)
281
281
282 def groupname(gid=None):
282 def groupname(gid=None):
283 """Return the name of the group with the given gid.
283 """Return the name of the group with the given gid.
284
284
285 If gid is None, return the name of the current group."""
285 If gid is None, return the name of the current group."""
286
286
287 if gid is None:
287 if gid is None:
288 gid = os.getgid()
288 gid = os.getgid()
289 try:
289 try:
290 return grp.getgrgid(gid)[0]
290 return grp.getgrgid(gid)[0]
291 except KeyError:
291 except KeyError:
292 return str(gid)
292 return str(gid)
293
293
294 def groupmembers(name):
294 def groupmembers(name):
295 """Return the list of members of the group with the given
295 """Return the list of members of the group with the given
296 name, KeyError if the group does not exist.
296 name, KeyError if the group does not exist.
297 """
297 """
298 return list(grp.getgrnam(name).gr_mem)
298 return list(grp.getgrnam(name).gr_mem)
299
299
300 def spawndetached(args):
300 def spawndetached(args):
301 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
301 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
302 args[0], args)
302 args[0], args)
303
303
304 def gethgcmd():
304 def gethgcmd():
305 return sys.argv[:1]
305 return sys.argv[:1]
306
306
307 def termwidth():
307 def termwidth():
308 try:
308 try:
309 import termios, array, fcntl
309 import termios, array, fcntl
310 for dev in (sys.stderr, sys.stdout, sys.stdin):
310 for dev in (sys.stderr, sys.stdout, sys.stdin):
311 try:
311 try:
312 try:
312 try:
313 fd = dev.fileno()
313 fd = dev.fileno()
314 except AttributeError:
314 except AttributeError:
315 continue
315 continue
316 if not os.isatty(fd):
316 if not os.isatty(fd):
317 continue
317 continue
318 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
318 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
319 width = array.array('h', arri)[1]
319 width = array.array('h', arri)[1]
320 if width > 0:
320 if width > 0:
321 return width
321 return width
322 except ValueError:
322 except ValueError:
323 pass
323 pass
324 except IOError, e:
324 except IOError, e:
325 if e[0] == errno.EINVAL:
325 if e[0] == errno.EINVAL:
326 pass
326 pass
327 else:
327 else:
328 raise
328 raise
329 except ImportError:
329 except ImportError:
330 pass
330 pass
331 return 80
331 return 80
@@ -1,286 +1,286 b''
1 # windows.py - Windows utility function implementations for Mercurial
1 # windows.py - Windows utility function implementations for Mercurial
2 #
2 #
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import osutil
9 import osutil
10 import errno, msvcrt, os, re, sys
10 import errno, msvcrt, os, re, sys
11
11
12 nulldev = 'NUL:'
12 nulldev = 'NUL:'
13 umask = 002
13 umask = 002
14
14
15 # wrap osutil.posixfile to provide friendlier exceptions
15 # wrap osutil.posixfile to provide friendlier exceptions
16 def posixfile(name, mode='r', buffering=-1):
16 def posixfile(name, mode='r', buffering=-1):
17 try:
17 try:
18 return osutil.posixfile(name, mode, buffering)
18 return osutil.posixfile(name, mode, buffering)
19 except WindowsError, err:
19 except WindowsError, err:
20 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
20 raise IOError(err.errno, '%s: %s' % (name, err.strerror))
21 posixfile.__doc__ = osutil.posixfile.__doc__
21 posixfile.__doc__ = osutil.posixfile.__doc__
22
22
23 class winstdout(object):
23 class winstdout(object):
24 '''stdout on windows misbehaves if sent through a pipe'''
24 '''stdout on windows misbehaves if sent through a pipe'''
25
25
26 def __init__(self, fp):
26 def __init__(self, fp):
27 self.fp = fp
27 self.fp = fp
28
28
29 def __getattr__(self, key):
29 def __getattr__(self, key):
30 return getattr(self.fp, key)
30 return getattr(self.fp, key)
31
31
32 def close(self):
32 def close(self):
33 try:
33 try:
34 self.fp.close()
34 self.fp.close()
35 except IOError:
35 except IOError:
36 pass
36 pass
37
37
38 def write(self, s):
38 def write(self, s):
39 try:
39 try:
40 # This is workaround for "Not enough space" error on
40 # This is workaround for "Not enough space" error on
41 # writing large size of data to console.
41 # writing large size of data to console.
42 limit = 16000
42 limit = 16000
43 l = len(s)
43 l = len(s)
44 start = 0
44 start = 0
45 self.softspace = 0
45 self.softspace = 0
46 while start < l:
46 while start < l:
47 end = start + limit
47 end = start + limit
48 self.fp.write(s[start:end])
48 self.fp.write(s[start:end])
49 start = end
49 start = end
50 except IOError, inst:
50 except IOError, inst:
51 if inst.errno != 0:
51 if inst.errno != 0:
52 raise
52 raise
53 self.close()
53 self.close()
54 raise IOError(errno.EPIPE, 'Broken pipe')
54 raise IOError(errno.EPIPE, 'Broken pipe')
55
55
56 def flush(self):
56 def flush(self):
57 try:
57 try:
58 return self.fp.flush()
58 return self.fp.flush()
59 except IOError, inst:
59 except IOError, inst:
60 if inst.errno != errno.EINVAL:
60 if inst.errno != errno.EINVAL:
61 raise
61 raise
62 self.close()
62 self.close()
63 raise IOError(errno.EPIPE, 'Broken pipe')
63 raise IOError(errno.EPIPE, 'Broken pipe')
64
64
65 sys.stdout = winstdout(sys.stdout)
65 sys.stdout = winstdout(sys.stdout)
66
66
67 def _is_win_9x():
67 def _is_win_9x():
68 '''return true if run on windows 95, 98 or me.'''
68 '''return true if run on windows 95, 98 or me.'''
69 try:
69 try:
70 return sys.getwindowsversion()[3] == 1
70 return sys.getwindowsversion()[3] == 1
71 except AttributeError:
71 except AttributeError:
72 return 'command' in os.environ.get('comspec', '')
72 return 'command' in os.environ.get('comspec', '')
73
73
74 def openhardlinks():
74 def openhardlinks():
75 return not _is_win_9x()
75 return not _is_win_9x()
76
76
77 def parsepatchoutput(output_line):
77 def parsepatchoutput(output_line):
78 """parses the output produced by patch and returns the filename"""
78 """parses the output produced by patch and returns the filename"""
79 pf = output_line[14:]
79 pf = output_line[14:]
80 if pf[0] == '`':
80 if pf[0] == '`':
81 pf = pf[1:-1] # Remove the quotes
81 pf = pf[1:-1] # Remove the quotes
82 return pf
82 return pf
83
83
84 def sshargs(sshcmd, host, user, port):
84 def sshargs(sshcmd, host, user, port):
85 '''Build argument list for ssh or Plink'''
85 '''Build argument list for ssh or Plink'''
86 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
86 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
87 args = user and ("%s@%s" % (user, host)) or host
87 args = user and ("%s@%s" % (user, host)) or host
88 return port and ("%s %s %s" % (args, pflag, port)) or args
88 return port and ("%s %s %s" % (args, pflag, port)) or args
89
89
90 def set_flags(f, l, x):
90 def setflags(f, l, x):
91 pass
91 pass
92
92
93 def checkexec(path):
93 def checkexec(path):
94 return False
94 return False
95
95
96 def checklink(path):
96 def checklink(path):
97 return False
97 return False
98
98
99 def set_binary(fd):
99 def set_binary(fd):
100 # When run without console, pipes may expose invalid
100 # When run without console, pipes may expose invalid
101 # fileno(), usually set to -1.
101 # fileno(), usually set to -1.
102 if hasattr(fd, 'fileno') and fd.fileno() >= 0:
102 if hasattr(fd, 'fileno') and fd.fileno() >= 0:
103 msvcrt.setmode(fd.fileno(), os.O_BINARY)
103 msvcrt.setmode(fd.fileno(), os.O_BINARY)
104
104
105 def pconvert(path):
105 def pconvert(path):
106 return '/'.join(path.split(os.sep))
106 return '/'.join(path.split(os.sep))
107
107
108 def localpath(path):
108 def localpath(path):
109 return path.replace('/', '\\')
109 return path.replace('/', '\\')
110
110
111 def normpath(path):
111 def normpath(path):
112 return pconvert(os.path.normpath(path))
112 return pconvert(os.path.normpath(path))
113
113
114 def realpath(path):
114 def realpath(path):
115 '''
115 '''
116 Returns the true, canonical file system path equivalent to the given
116 Returns the true, canonical file system path equivalent to the given
117 path.
117 path.
118 '''
118 '''
119 # TODO: There may be a more clever way to do this that also handles other,
119 # TODO: There may be a more clever way to do this that also handles other,
120 # less common file systems.
120 # less common file systems.
121 return os.path.normpath(os.path.normcase(os.path.realpath(path)))
121 return os.path.normpath(os.path.normcase(os.path.realpath(path)))
122
122
123 def samestat(s1, s2):
123 def samestat(s1, s2):
124 return False
124 return False
125
125
126 # A sequence of backslashes is special iff it precedes a double quote:
126 # A sequence of backslashes is special iff it precedes a double quote:
127 # - if there's an even number of backslashes, the double quote is not
127 # - if there's an even number of backslashes, the double quote is not
128 # quoted (i.e. it ends the quoted region)
128 # quoted (i.e. it ends the quoted region)
129 # - if there's an odd number of backslashes, the double quote is quoted
129 # - if there's an odd number of backslashes, the double quote is quoted
130 # - in both cases, every pair of backslashes is unquoted into a single
130 # - in both cases, every pair of backslashes is unquoted into a single
131 # backslash
131 # backslash
132 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
132 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
133 # So, to quote a string, we must surround it in double quotes, double
133 # So, to quote a string, we must surround it in double quotes, double
134 # the number of backslashes that preceed double quotes and add another
134 # the number of backslashes that preceed double quotes and add another
135 # backslash before every double quote (being careful with the double
135 # backslash before every double quote (being careful with the double
136 # quote we've appended to the end)
136 # quote we've appended to the end)
137 _quotere = None
137 _quotere = None
138 def shellquote(s):
138 def shellquote(s):
139 global _quotere
139 global _quotere
140 if _quotere is None:
140 if _quotere is None:
141 _quotere = re.compile(r'(\\*)("|\\$)')
141 _quotere = re.compile(r'(\\*)("|\\$)')
142 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
142 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
143
143
144 def quotecommand(cmd):
144 def quotecommand(cmd):
145 """Build a command string suitable for os.popen* calls."""
145 """Build a command string suitable for os.popen* calls."""
146 if sys.version_info < (2, 7, 1):
146 if sys.version_info < (2, 7, 1):
147 # Python versions since 2.7.1 do this extra quoting themselves
147 # Python versions since 2.7.1 do this extra quoting themselves
148 return '"' + cmd + '"'
148 return '"' + cmd + '"'
149 return cmd
149 return cmd
150
150
151 def popen(command, mode='r'):
151 def popen(command, mode='r'):
152 # Work around "popen spawned process may not write to stdout
152 # Work around "popen spawned process may not write to stdout
153 # under windows"
153 # under windows"
154 # http://bugs.python.org/issue1366
154 # http://bugs.python.org/issue1366
155 command += " 2> %s" % nulldev
155 command += " 2> %s" % nulldev
156 return os.popen(quotecommand(command), mode)
156 return os.popen(quotecommand(command), mode)
157
157
158 def explain_exit(code):
158 def explain_exit(code):
159 return _("exited with status %d") % code, code
159 return _("exited with status %d") % code, code
160
160
161 # if you change this stub into a real check, please try to implement the
161 # if you change this stub into a real check, please try to implement the
162 # username and groupname functions above, too.
162 # username and groupname functions above, too.
163 def isowner(st):
163 def isowner(st):
164 return True
164 return True
165
165
166 def find_exe(command):
166 def find_exe(command):
167 '''Find executable for command searching like cmd.exe does.
167 '''Find executable for command searching like cmd.exe does.
168 If command is a basename then PATH is searched for command.
168 If command is a basename then PATH is searched for command.
169 PATH isn't searched if command is an absolute or relative path.
169 PATH isn't searched if command is an absolute or relative path.
170 An extension from PATHEXT is found and added if not present.
170 An extension from PATHEXT is found and added if not present.
171 If command isn't found None is returned.'''
171 If command isn't found None is returned.'''
172 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
172 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
173 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
173 pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
174 if os.path.splitext(command)[1].lower() in pathexts:
174 if os.path.splitext(command)[1].lower() in pathexts:
175 pathexts = ['']
175 pathexts = ['']
176
176
177 def findexisting(pathcommand):
177 def findexisting(pathcommand):
178 'Will append extension (if needed) and return existing file'
178 'Will append extension (if needed) and return existing file'
179 for ext in pathexts:
179 for ext in pathexts:
180 executable = pathcommand + ext
180 executable = pathcommand + ext
181 if os.path.exists(executable):
181 if os.path.exists(executable):
182 return executable
182 return executable
183 return None
183 return None
184
184
185 if os.sep in command:
185 if os.sep in command:
186 return findexisting(command)
186 return findexisting(command)
187
187
188 for path in os.environ.get('PATH', '').split(os.pathsep):
188 for path in os.environ.get('PATH', '').split(os.pathsep):
189 executable = findexisting(os.path.join(path, command))
189 executable = findexisting(os.path.join(path, command))
190 if executable is not None:
190 if executable is not None:
191 return executable
191 return executable
192 return findexisting(os.path.expanduser(os.path.expandvars(command)))
192 return findexisting(os.path.expanduser(os.path.expandvars(command)))
193
193
194 def statfiles(files):
194 def statfiles(files):
195 '''Stat each file in files and yield stat or None if file does not exist.
195 '''Stat each file in files and yield stat or None if file does not exist.
196 Cluster and cache stat per directory to minimize number of OS stat calls.'''
196 Cluster and cache stat per directory to minimize number of OS stat calls.'''
197 ncase = os.path.normcase
197 ncase = os.path.normcase
198 dircache = {} # dirname -> filename -> status | None if file does not exist
198 dircache = {} # dirname -> filename -> status | None if file does not exist
199 for nf in files:
199 for nf in files:
200 nf = ncase(nf)
200 nf = ncase(nf)
201 dir, base = os.path.split(nf)
201 dir, base = os.path.split(nf)
202 if not dir:
202 if not dir:
203 dir = '.'
203 dir = '.'
204 cache = dircache.get(dir, None)
204 cache = dircache.get(dir, None)
205 if cache is None:
205 if cache is None:
206 try:
206 try:
207 dmap = dict([(ncase(n), s)
207 dmap = dict([(ncase(n), s)
208 for n, k, s in osutil.listdir(dir, True)])
208 for n, k, s in osutil.listdir(dir, True)])
209 except OSError, err:
209 except OSError, err:
210 # handle directory not found in Python version prior to 2.5
210 # handle directory not found in Python version prior to 2.5
211 # Python <= 2.4 returns native Windows code 3 in errno
211 # Python <= 2.4 returns native Windows code 3 in errno
212 # Python >= 2.5 returns ENOENT and adds winerror field
212 # Python >= 2.5 returns ENOENT and adds winerror field
213 # EINVAL is raised if dir is not a directory.
213 # EINVAL is raised if dir is not a directory.
214 if err.errno not in (3, errno.ENOENT, errno.EINVAL,
214 if err.errno not in (3, errno.ENOENT, errno.EINVAL,
215 errno.ENOTDIR):
215 errno.ENOTDIR):
216 raise
216 raise
217 dmap = {}
217 dmap = {}
218 cache = dircache.setdefault(dir, dmap)
218 cache = dircache.setdefault(dir, dmap)
219 yield cache.get(base, None)
219 yield cache.get(base, None)
220
220
221 def username(uid=None):
221 def username(uid=None):
222 """Return the name of the user with the given uid.
222 """Return the name of the user with the given uid.
223
223
224 If uid is None, return the name of the current user."""
224 If uid is None, return the name of the current user."""
225 return None
225 return None
226
226
227 def groupname(gid=None):
227 def groupname(gid=None):
228 """Return the name of the group with the given gid.
228 """Return the name of the group with the given gid.
229
229
230 If gid is None, return the name of the current group."""
230 If gid is None, return the name of the current group."""
231 return None
231 return None
232
232
233 def _removedirs(name):
233 def _removedirs(name):
234 """special version of os.removedirs that does not remove symlinked
234 """special version of os.removedirs that does not remove symlinked
235 directories or junction points if they actually contain files"""
235 directories or junction points if they actually contain files"""
236 if osutil.listdir(name):
236 if osutil.listdir(name):
237 return
237 return
238 os.rmdir(name)
238 os.rmdir(name)
239 head, tail = os.path.split(name)
239 head, tail = os.path.split(name)
240 if not tail:
240 if not tail:
241 head, tail = os.path.split(head)
241 head, tail = os.path.split(head)
242 while head and tail:
242 while head and tail:
243 try:
243 try:
244 if osutil.listdir(head):
244 if osutil.listdir(head):
245 return
245 return
246 os.rmdir(head)
246 os.rmdir(head)
247 except (ValueError, OSError):
247 except (ValueError, OSError):
248 break
248 break
249 head, tail = os.path.split(head)
249 head, tail = os.path.split(head)
250
250
251 def unlinkpath(f):
251 def unlinkpath(f):
252 """unlink and remove the directory if it is empty"""
252 """unlink and remove the directory if it is empty"""
253 unlink(f)
253 unlink(f)
254 # try removing directories that might now be empty
254 # try removing directories that might now be empty
255 try:
255 try:
256 _removedirs(os.path.dirname(f))
256 _removedirs(os.path.dirname(f))
257 except OSError:
257 except OSError:
258 pass
258 pass
259
259
260 def rename(src, dst):
260 def rename(src, dst):
261 '''atomically rename file src to dst, replacing dst if it exists'''
261 '''atomically rename file src to dst, replacing dst if it exists'''
262 try:
262 try:
263 os.rename(src, dst)
263 os.rename(src, dst)
264 except OSError, e:
264 except OSError, e:
265 if e.errno != errno.EEXIST:
265 if e.errno != errno.EEXIST:
266 raise
266 raise
267 unlink(dst)
267 unlink(dst)
268 os.rename(src, dst)
268 os.rename(src, dst)
269
269
270 def gethgcmd():
270 def gethgcmd():
271 return [sys.executable] + sys.argv[:1]
271 return [sys.executable] + sys.argv[:1]
272
272
273 def termwidth():
273 def termwidth():
274 # cmd.exe does not handle CR like a unix console, the CR is
274 # cmd.exe does not handle CR like a unix console, the CR is
275 # counted in the line length. On 80 columns consoles, if 80
275 # counted in the line length. On 80 columns consoles, if 80
276 # characters are written, the following CR won't apply on the
276 # characters are written, the following CR won't apply on the
277 # current line but on the new one. Keep room for it.
277 # current line but on the new one. Keep room for it.
278 return 79
278 return 79
279
279
280 def groupmembers(name):
280 def groupmembers(name):
281 # Don't support groups on Windows for now
281 # Don't support groups on Windows for now
282 raise KeyError()
282 raise KeyError()
283
283
284 from win32 import *
284 from win32 import *
285
285
286 expandglobs = True
286 expandglobs = True
General Comments 0
You need to be logged in to leave comments. Login now