##// END OF EJS Templates
commit: move 'nothing changed' test into commit()
Matt Mackall -
r8404:a2bc39ad default
parent child Browse files
Show More
@@ -1,2137 +1,2136 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset'))
21 capabilities = set(('lookup', 'changegroupsubset'))
22 supported = set('revlogv1 store fncache'.split())
22 supported = set('revlogv1 store fncache'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31
31
32 if not os.path.isdir(self.path):
32 if not os.path.isdir(self.path):
33 if create:
33 if create:
34 if not os.path.exists(path):
34 if not os.path.exists(path):
35 os.mkdir(path)
35 os.mkdir(path)
36 os.mkdir(self.path)
36 os.mkdir(self.path)
37 requirements = ["revlogv1"]
37 requirements = ["revlogv1"]
38 if baseui.configbool('format', 'usestore', True):
38 if baseui.configbool('format', 'usestore', True):
39 os.mkdir(os.path.join(self.path, "store"))
39 os.mkdir(os.path.join(self.path, "store"))
40 requirements.append("store")
40 requirements.append("store")
41 if baseui.configbool('format', 'usefncache', True):
41 if baseui.configbool('format', 'usefncache', True):
42 requirements.append("fncache")
42 requirements.append("fncache")
43 # create an invalid changelog
43 # create an invalid changelog
44 self.opener("00changelog.i", "a").write(
44 self.opener("00changelog.i", "a").write(
45 '\0\0\0\2' # represents revlogv2
45 '\0\0\0\2' # represents revlogv2
46 ' dummy changelog to prevent using the old repo layout'
46 ' dummy changelog to prevent using the old repo layout'
47 )
47 )
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 else:
52 else:
53 raise error.RepoError(_("repository %s not found") % path)
53 raise error.RepoError(_("repository %s not found") % path)
54 elif create:
54 elif create:
55 raise error.RepoError(_("repository %s already exists") % path)
55 raise error.RepoError(_("repository %s already exists") % path)
56 else:
56 else:
57 # find requirements
57 # find requirements
58 requirements = set()
58 requirements = set()
59 try:
59 try:
60 requirements = set(self.opener("requires").read().splitlines())
60 requirements = set(self.opener("requires").read().splitlines())
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64 for r in requirements - self.supported:
64 for r in requirements - self.supported:
65 raise error.RepoError(_("requirement '%s' not supported") % r)
65 raise error.RepoError(_("requirement '%s' not supported") % r)
66
66
67 self.store = store.store(requirements, self.path, util.opener)
67 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
68 self.spath = self.store.path
69 self.sopener = self.store.opener
69 self.sopener = self.store.opener
70 self.sjoin = self.store.join
70 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
71 self.opener.createmode = self.store.createmode
72
72
73 self.baseui = baseui
73 self.baseui = baseui
74 self.ui = baseui.copy()
74 self.ui = baseui.copy()
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self._ubranchcache = None # UTF-8 version of branchcache
84 self._ubranchcache = None # UTF-8 version of branchcache
85 self._branchcachetip = None
85 self._branchcachetip = None
86 self.nodetagscache = None
86 self.nodetagscache = None
87 self.filterpats = {}
87 self.filterpats = {}
88 self._datafilters = {}
88 self._datafilters = {}
89 self._transref = self._lockref = self._wlockref = None
89 self._transref = self._lockref = self._wlockref = None
90
90
91 @propertycache
91 @propertycache
92 def changelog(self):
92 def changelog(self):
93 c = changelog.changelog(self.sopener)
93 c = changelog.changelog(self.sopener)
94 if 'HG_PENDING' in os.environ:
94 if 'HG_PENDING' in os.environ:
95 p = os.environ['HG_PENDING']
95 p = os.environ['HG_PENDING']
96 if p.startswith(self.root):
96 if p.startswith(self.root):
97 c.readpending('00changelog.i.a')
97 c.readpending('00changelog.i.a')
98 self.sopener.defversion = c.version
98 self.sopener.defversion = c.version
99 return c
99 return c
100
100
101 @propertycache
101 @propertycache
102 def manifest(self):
102 def manifest(self):
103 return manifest.manifest(self.sopener)
103 return manifest.manifest(self.sopener)
104
104
105 @propertycache
105 @propertycache
106 def dirstate(self):
106 def dirstate(self):
107 return dirstate.dirstate(self.opener, self.ui, self.root)
107 return dirstate.dirstate(self.opener, self.ui, self.root)
108
108
109 def __getitem__(self, changeid):
109 def __getitem__(self, changeid):
110 if changeid == None:
110 if changeid == None:
111 return context.workingctx(self)
111 return context.workingctx(self)
112 return context.changectx(self, changeid)
112 return context.changectx(self, changeid)
113
113
114 def __nonzero__(self):
114 def __nonzero__(self):
115 return True
115 return True
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self.changelog)
118 return len(self.changelog)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 for i in xrange(len(self)):
121 for i in xrange(len(self)):
122 yield i
122 yield i
123
123
124 def url(self):
124 def url(self):
125 return 'file:' + self.root
125 return 'file:' + self.root
126
126
127 def hook(self, name, throw=False, **args):
127 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
128 return hook.hook(self.ui, self, name, throw, **args)
129
129
130 tag_disallowed = ':\r\n'
130 tag_disallowed = ':\r\n'
131
131
132 def _tag(self, names, node, message, local, user, date, extra={}):
132 def _tag(self, names, node, message, local, user, date, extra={}):
133 if isinstance(names, str):
133 if isinstance(names, str):
134 allchars = names
134 allchars = names
135 names = (names,)
135 names = (names,)
136 else:
136 else:
137 allchars = ''.join(names)
137 allchars = ''.join(names)
138 for c in self.tag_disallowed:
138 for c in self.tag_disallowed:
139 if c in allchars:
139 if c in allchars:
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
141
141
142 for name in names:
142 for name in names:
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 local=local)
144 local=local)
145
145
146 def writetags(fp, names, munge, prevtags):
146 def writetags(fp, names, munge, prevtags):
147 fp.seek(0, 2)
147 fp.seek(0, 2)
148 if prevtags and prevtags[-1] != '\n':
148 if prevtags and prevtags[-1] != '\n':
149 fp.write('\n')
149 fp.write('\n')
150 for name in names:
150 for name in names:
151 m = munge and munge(name) or name
151 m = munge and munge(name) or name
152 if self._tagstypecache and name in self._tagstypecache:
152 if self._tagstypecache and name in self._tagstypecache:
153 old = self.tagscache.get(name, nullid)
153 old = self.tagscache.get(name, nullid)
154 fp.write('%s %s\n' % (hex(old), m))
154 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(node), m))
155 fp.write('%s %s\n' % (hex(node), m))
156 fp.close()
156 fp.close()
157
157
158 prevtags = ''
158 prevtags = ''
159 if local:
159 if local:
160 try:
160 try:
161 fp = self.opener('localtags', 'r+')
161 fp = self.opener('localtags', 'r+')
162 except IOError:
162 except IOError:
163 fp = self.opener('localtags', 'a')
163 fp = self.opener('localtags', 'a')
164 else:
164 else:
165 prevtags = fp.read()
165 prevtags = fp.read()
166
166
167 # local tags are stored in the current charset
167 # local tags are stored in the current charset
168 writetags(fp, names, None, prevtags)
168 writetags(fp, names, None, prevtags)
169 for name in names:
169 for name in names:
170 self.hook('tag', node=hex(node), tag=name, local=local)
170 self.hook('tag', node=hex(node), tag=name, local=local)
171 return
171 return
172
172
173 try:
173 try:
174 fp = self.wfile('.hgtags', 'rb+')
174 fp = self.wfile('.hgtags', 'rb+')
175 except IOError:
175 except IOError:
176 fp = self.wfile('.hgtags', 'ab')
176 fp = self.wfile('.hgtags', 'ab')
177 else:
177 else:
178 prevtags = fp.read()
178 prevtags = fp.read()
179
179
180 # committed tags are stored in UTF-8
180 # committed tags are stored in UTF-8
181 writetags(fp, names, encoding.fromlocal, prevtags)
181 writetags(fp, names, encoding.fromlocal, prevtags)
182
182
183 if '.hgtags' not in self.dirstate:
183 if '.hgtags' not in self.dirstate:
184 self.add(['.hgtags'])
184 self.add(['.hgtags'])
185
185
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
187
187
188 for name in names:
188 for name in names:
189 self.hook('tag', node=hex(node), tag=name, local=local)
189 self.hook('tag', node=hex(node), tag=name, local=local)
190
190
191 return tagnode
191 return tagnode
192
192
193 def tag(self, names, node, message, local, user, date):
193 def tag(self, names, node, message, local, user, date):
194 '''tag a revision with one or more symbolic names.
194 '''tag a revision with one or more symbolic names.
195
195
196 names is a list of strings or, when adding a single tag, names may be a
196 names is a list of strings or, when adding a single tag, names may be a
197 string.
197 string.
198
198
199 if local is True, the tags are stored in a per-repository file.
199 if local is True, the tags are stored in a per-repository file.
200 otherwise, they are stored in the .hgtags file, and a new
200 otherwise, they are stored in the .hgtags file, and a new
201 changeset is committed with the change.
201 changeset is committed with the change.
202
202
203 keyword arguments:
203 keyword arguments:
204
204
205 local: whether to store tags in non-version-controlled file
205 local: whether to store tags in non-version-controlled file
206 (default False)
206 (default False)
207
207
208 message: commit message to use if committing
208 message: commit message to use if committing
209
209
210 user: name of user to use if committing
210 user: name of user to use if committing
211
211
212 date: date tuple to use if committing'''
212 date: date tuple to use if committing'''
213
213
214 for x in self.status()[:5]:
214 for x in self.status()[:5]:
215 if '.hgtags' in x:
215 if '.hgtags' in x:
216 raise util.Abort(_('working copy of .hgtags is changed '
216 raise util.Abort(_('working copy of .hgtags is changed '
217 '(please commit .hgtags manually)'))
217 '(please commit .hgtags manually)'))
218
218
219 self.tags() # instantiate the cache
219 self.tags() # instantiate the cache
220 self._tag(names, node, message, local, user, date)
220 self._tag(names, node, message, local, user, date)
221
221
222 def tags(self):
222 def tags(self):
223 '''return a mapping of tag to node'''
223 '''return a mapping of tag to node'''
224 if self.tagscache:
224 if self.tagscache:
225 return self.tagscache
225 return self.tagscache
226
226
227 globaltags = {}
227 globaltags = {}
228 tagtypes = {}
228 tagtypes = {}
229
229
230 def readtags(lines, fn, tagtype):
230 def readtags(lines, fn, tagtype):
231 filetags = {}
231 filetags = {}
232 count = 0
232 count = 0
233
233
234 def warn(msg):
234 def warn(msg):
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
236
236
237 for l in lines:
237 for l in lines:
238 count += 1
238 count += 1
239 if not l:
239 if not l:
240 continue
240 continue
241 s = l.split(" ", 1)
241 s = l.split(" ", 1)
242 if len(s) != 2:
242 if len(s) != 2:
243 warn(_("cannot parse entry"))
243 warn(_("cannot parse entry"))
244 continue
244 continue
245 node, key = s
245 node, key = s
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
247 try:
247 try:
248 bin_n = bin(node)
248 bin_n = bin(node)
249 except TypeError:
249 except TypeError:
250 warn(_("node '%s' is not well formed") % node)
250 warn(_("node '%s' is not well formed") % node)
251 continue
251 continue
252 if bin_n not in self.changelog.nodemap:
252 if bin_n not in self.changelog.nodemap:
253 warn(_("tag '%s' refers to unknown node") % key)
253 warn(_("tag '%s' refers to unknown node") % key)
254 continue
254 continue
255
255
256 h = []
256 h = []
257 if key in filetags:
257 if key in filetags:
258 n, h = filetags[key]
258 n, h = filetags[key]
259 h.append(n)
259 h.append(n)
260 filetags[key] = (bin_n, h)
260 filetags[key] = (bin_n, h)
261
261
262 for k, nh in filetags.iteritems():
262 for k, nh in filetags.iteritems():
263 if k not in globaltags:
263 if k not in globaltags:
264 globaltags[k] = nh
264 globaltags[k] = nh
265 tagtypes[k] = tagtype
265 tagtypes[k] = tagtype
266 continue
266 continue
267
267
268 # we prefer the global tag if:
268 # we prefer the global tag if:
269 # it supercedes us OR
269 # it supercedes us OR
270 # mutual supercedes and it has a higher rank
270 # mutual supercedes and it has a higher rank
271 # otherwise we win because we're tip-most
271 # otherwise we win because we're tip-most
272 an, ah = nh
272 an, ah = nh
273 bn, bh = globaltags[k]
273 bn, bh = globaltags[k]
274 if (bn != an and an in bh and
274 if (bn != an and an in bh and
275 (bn not in ah or len(bh) > len(ah))):
275 (bn not in ah or len(bh) > len(ah))):
276 an = bn
276 an = bn
277 ah.extend([n for n in bh if n not in ah])
277 ah.extend([n for n in bh if n not in ah])
278 globaltags[k] = an, ah
278 globaltags[k] = an, ah
279 tagtypes[k] = tagtype
279 tagtypes[k] = tagtype
280
280
281 # read the tags file from each head, ending with the tip
281 # read the tags file from each head, ending with the tip
282 f = None
282 f = None
283 for rev, node, fnode in self._hgtagsnodes():
283 for rev, node, fnode in self._hgtagsnodes():
284 f = (f and f.filectx(fnode) or
284 f = (f and f.filectx(fnode) or
285 self.filectx('.hgtags', fileid=fnode))
285 self.filectx('.hgtags', fileid=fnode))
286 readtags(f.data().splitlines(), f, "global")
286 readtags(f.data().splitlines(), f, "global")
287
287
288 try:
288 try:
289 data = encoding.fromlocal(self.opener("localtags").read())
289 data = encoding.fromlocal(self.opener("localtags").read())
290 # localtags are stored in the local character set
290 # localtags are stored in the local character set
291 # while the internal tag table is stored in UTF-8
291 # while the internal tag table is stored in UTF-8
292 readtags(data.splitlines(), "localtags", "local")
292 readtags(data.splitlines(), "localtags", "local")
293 except IOError:
293 except IOError:
294 pass
294 pass
295
295
296 self.tagscache = {}
296 self.tagscache = {}
297 self._tagstypecache = {}
297 self._tagstypecache = {}
298 for k, nh in globaltags.iteritems():
298 for k, nh in globaltags.iteritems():
299 n = nh[0]
299 n = nh[0]
300 if n != nullid:
300 if n != nullid:
301 self.tagscache[k] = n
301 self.tagscache[k] = n
302 self._tagstypecache[k] = tagtypes[k]
302 self._tagstypecache[k] = tagtypes[k]
303 self.tagscache['tip'] = self.changelog.tip()
303 self.tagscache['tip'] = self.changelog.tip()
304 return self.tagscache
304 return self.tagscache
305
305
306 def tagtype(self, tagname):
306 def tagtype(self, tagname):
307 '''
307 '''
308 return the type of the given tag. result can be:
308 return the type of the given tag. result can be:
309
309
310 'local' : a local tag
310 'local' : a local tag
311 'global' : a global tag
311 'global' : a global tag
312 None : tag does not exist
312 None : tag does not exist
313 '''
313 '''
314
314
315 self.tags()
315 self.tags()
316
316
317 return self._tagstypecache.get(tagname)
317 return self._tagstypecache.get(tagname)
318
318
319 def _hgtagsnodes(self):
319 def _hgtagsnodes(self):
320 last = {}
320 last = {}
321 ret = []
321 ret = []
322 for node in reversed(self.heads()):
322 for node in reversed(self.heads()):
323 c = self[node]
323 c = self[node]
324 rev = c.rev()
324 rev = c.rev()
325 try:
325 try:
326 fnode = c.filenode('.hgtags')
326 fnode = c.filenode('.hgtags')
327 except error.LookupError:
327 except error.LookupError:
328 continue
328 continue
329 ret.append((rev, node, fnode))
329 ret.append((rev, node, fnode))
330 if fnode in last:
330 if fnode in last:
331 ret[last[fnode]] = None
331 ret[last[fnode]] = None
332 last[fnode] = len(ret) - 1
332 last[fnode] = len(ret) - 1
333 return [item for item in ret if item]
333 return [item for item in ret if item]
334
334
335 def tagslist(self):
335 def tagslist(self):
336 '''return a list of tags ordered by revision'''
336 '''return a list of tags ordered by revision'''
337 l = []
337 l = []
338 for t, n in self.tags().iteritems():
338 for t, n in self.tags().iteritems():
339 try:
339 try:
340 r = self.changelog.rev(n)
340 r = self.changelog.rev(n)
341 except:
341 except:
342 r = -2 # sort to the beginning of the list if unknown
342 r = -2 # sort to the beginning of the list if unknown
343 l.append((r, t, n))
343 l.append((r, t, n))
344 return [(t, n) for r, t, n in sorted(l)]
344 return [(t, n) for r, t, n in sorted(l)]
345
345
346 def nodetags(self, node):
346 def nodetags(self, node):
347 '''return the tags associated with a node'''
347 '''return the tags associated with a node'''
348 if not self.nodetagscache:
348 if not self.nodetagscache:
349 self.nodetagscache = {}
349 self.nodetagscache = {}
350 for t, n in self.tags().iteritems():
350 for t, n in self.tags().iteritems():
351 self.nodetagscache.setdefault(n, []).append(t)
351 self.nodetagscache.setdefault(n, []).append(t)
352 return self.nodetagscache.get(node, [])
352 return self.nodetagscache.get(node, [])
353
353
354 def _branchtags(self, partial, lrev):
354 def _branchtags(self, partial, lrev):
355 # TODO: rename this function?
355 # TODO: rename this function?
356 tiprev = len(self) - 1
356 tiprev = len(self) - 1
357 if lrev != tiprev:
357 if lrev != tiprev:
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
360
360
361 return partial
361 return partial
362
362
363 def _branchheads(self):
363 def _branchheads(self):
364 tip = self.changelog.tip()
364 tip = self.changelog.tip()
365 if self.branchcache is not None and self._branchcachetip == tip:
365 if self.branchcache is not None and self._branchcachetip == tip:
366 return self.branchcache
366 return self.branchcache
367
367
368 oldtip = self._branchcachetip
368 oldtip = self._branchcachetip
369 self._branchcachetip = tip
369 self._branchcachetip = tip
370 if self.branchcache is None:
370 if self.branchcache is None:
371 self.branchcache = {} # avoid recursion in changectx
371 self.branchcache = {} # avoid recursion in changectx
372 else:
372 else:
373 self.branchcache.clear() # keep using the same dict
373 self.branchcache.clear() # keep using the same dict
374 if oldtip is None or oldtip not in self.changelog.nodemap:
374 if oldtip is None or oldtip not in self.changelog.nodemap:
375 partial, last, lrev = self._readbranchcache()
375 partial, last, lrev = self._readbranchcache()
376 else:
376 else:
377 lrev = self.changelog.rev(oldtip)
377 lrev = self.changelog.rev(oldtip)
378 partial = self._ubranchcache
378 partial = self._ubranchcache
379
379
380 self._branchtags(partial, lrev)
380 self._branchtags(partial, lrev)
381 # this private cache holds all heads (not just tips)
381 # this private cache holds all heads (not just tips)
382 self._ubranchcache = partial
382 self._ubranchcache = partial
383
383
384 # the branch cache is stored on disk as UTF-8, but in the local
384 # the branch cache is stored on disk as UTF-8, but in the local
385 # charset internally
385 # charset internally
386 for k, v in partial.iteritems():
386 for k, v in partial.iteritems():
387 self.branchcache[encoding.tolocal(k)] = v
387 self.branchcache[encoding.tolocal(k)] = v
388 return self.branchcache
388 return self.branchcache
389
389
390
390
391 def branchtags(self):
391 def branchtags(self):
392 '''return a dict where branch names map to the tipmost head of
392 '''return a dict where branch names map to the tipmost head of
393 the branch, open heads come before closed'''
393 the branch, open heads come before closed'''
394 bt = {}
394 bt = {}
395 for bn, heads in self._branchheads().iteritems():
395 for bn, heads in self._branchheads().iteritems():
396 head = None
396 head = None
397 for i in range(len(heads)-1, -1, -1):
397 for i in range(len(heads)-1, -1, -1):
398 h = heads[i]
398 h = heads[i]
399 if 'close' not in self.changelog.read(h)[5]:
399 if 'close' not in self.changelog.read(h)[5]:
400 head = h
400 head = h
401 break
401 break
402 # no open heads were found
402 # no open heads were found
403 if head is None:
403 if head is None:
404 head = heads[-1]
404 head = heads[-1]
405 bt[bn] = head
405 bt[bn] = head
406 return bt
406 return bt
407
407
408
408
409 def _readbranchcache(self):
409 def _readbranchcache(self):
410 partial = {}
410 partial = {}
411 try:
411 try:
412 f = self.opener("branchheads.cache")
412 f = self.opener("branchheads.cache")
413 lines = f.read().split('\n')
413 lines = f.read().split('\n')
414 f.close()
414 f.close()
415 except (IOError, OSError):
415 except (IOError, OSError):
416 return {}, nullid, nullrev
416 return {}, nullid, nullrev
417
417
418 try:
418 try:
419 last, lrev = lines.pop(0).split(" ", 1)
419 last, lrev = lines.pop(0).split(" ", 1)
420 last, lrev = bin(last), int(lrev)
420 last, lrev = bin(last), int(lrev)
421 if lrev >= len(self) or self[lrev].node() != last:
421 if lrev >= len(self) or self[lrev].node() != last:
422 # invalidate the cache
422 # invalidate the cache
423 raise ValueError('invalidating branch cache (tip differs)')
423 raise ValueError('invalidating branch cache (tip differs)')
424 for l in lines:
424 for l in lines:
425 if not l: continue
425 if not l: continue
426 node, label = l.split(" ", 1)
426 node, label = l.split(" ", 1)
427 partial.setdefault(label.strip(), []).append(bin(node))
427 partial.setdefault(label.strip(), []).append(bin(node))
428 except KeyboardInterrupt:
428 except KeyboardInterrupt:
429 raise
429 raise
430 except Exception, inst:
430 except Exception, inst:
431 if self.ui.debugflag:
431 if self.ui.debugflag:
432 self.ui.warn(str(inst), '\n')
432 self.ui.warn(str(inst), '\n')
433 partial, last, lrev = {}, nullid, nullrev
433 partial, last, lrev = {}, nullid, nullrev
434 return partial, last, lrev
434 return partial, last, lrev
435
435
436 def _writebranchcache(self, branches, tip, tiprev):
436 def _writebranchcache(self, branches, tip, tiprev):
437 try:
437 try:
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
439 f.write("%s %s\n" % (hex(tip), tiprev))
439 f.write("%s %s\n" % (hex(tip), tiprev))
440 for label, nodes in branches.iteritems():
440 for label, nodes in branches.iteritems():
441 for node in nodes:
441 for node in nodes:
442 f.write("%s %s\n" % (hex(node), label))
442 f.write("%s %s\n" % (hex(node), label))
443 f.rename()
443 f.rename()
444 except (IOError, OSError):
444 except (IOError, OSError):
445 pass
445 pass
446
446
447 def _updatebranchcache(self, partial, start, end):
447 def _updatebranchcache(self, partial, start, end):
448 for r in xrange(start, end):
448 for r in xrange(start, end):
449 c = self[r]
449 c = self[r]
450 b = c.branch()
450 b = c.branch()
451 bheads = partial.setdefault(b, [])
451 bheads = partial.setdefault(b, [])
452 bheads.append(c.node())
452 bheads.append(c.node())
453 for p in c.parents():
453 for p in c.parents():
454 pn = p.node()
454 pn = p.node()
455 if pn in bheads:
455 if pn in bheads:
456 bheads.remove(pn)
456 bheads.remove(pn)
457
457
458 def lookup(self, key):
458 def lookup(self, key):
459 if isinstance(key, int):
459 if isinstance(key, int):
460 return self.changelog.node(key)
460 return self.changelog.node(key)
461 elif key == '.':
461 elif key == '.':
462 return self.dirstate.parents()[0]
462 return self.dirstate.parents()[0]
463 elif key == 'null':
463 elif key == 'null':
464 return nullid
464 return nullid
465 elif key == 'tip':
465 elif key == 'tip':
466 return self.changelog.tip()
466 return self.changelog.tip()
467 n = self.changelog._match(key)
467 n = self.changelog._match(key)
468 if n:
468 if n:
469 return n
469 return n
470 if key in self.tags():
470 if key in self.tags():
471 return self.tags()[key]
471 return self.tags()[key]
472 if key in self.branchtags():
472 if key in self.branchtags():
473 return self.branchtags()[key]
473 return self.branchtags()[key]
474 n = self.changelog._partialmatch(key)
474 n = self.changelog._partialmatch(key)
475 if n:
475 if n:
476 return n
476 return n
477 try:
477 try:
478 if len(key) == 20:
478 if len(key) == 20:
479 key = hex(key)
479 key = hex(key)
480 except:
480 except:
481 pass
481 pass
482 raise error.RepoError(_("unknown revision '%s'") % key)
482 raise error.RepoError(_("unknown revision '%s'") % key)
483
483
484 def local(self):
484 def local(self):
485 return True
485 return True
486
486
487 def join(self, f):
487 def join(self, f):
488 return os.path.join(self.path, f)
488 return os.path.join(self.path, f)
489
489
490 def wjoin(self, f):
490 def wjoin(self, f):
491 return os.path.join(self.root, f)
491 return os.path.join(self.root, f)
492
492
493 def rjoin(self, f):
493 def rjoin(self, f):
494 return os.path.join(self.root, util.pconvert(f))
494 return os.path.join(self.root, util.pconvert(f))
495
495
496 def file(self, f):
496 def file(self, f):
497 if f[0] == '/':
497 if f[0] == '/':
498 f = f[1:]
498 f = f[1:]
499 return filelog.filelog(self.sopener, f)
499 return filelog.filelog(self.sopener, f)
500
500
501 def changectx(self, changeid):
501 def changectx(self, changeid):
502 return self[changeid]
502 return self[changeid]
503
503
504 def parents(self, changeid=None):
504 def parents(self, changeid=None):
505 '''get list of changectxs for parents of changeid'''
505 '''get list of changectxs for parents of changeid'''
506 return self[changeid].parents()
506 return self[changeid].parents()
507
507
508 def filectx(self, path, changeid=None, fileid=None):
508 def filectx(self, path, changeid=None, fileid=None):
509 """changeid can be a changeset revision, node, or tag.
509 """changeid can be a changeset revision, node, or tag.
510 fileid can be a file revision or node."""
510 fileid can be a file revision or node."""
511 return context.filectx(self, path, changeid, fileid)
511 return context.filectx(self, path, changeid, fileid)
512
512
513 def getcwd(self):
513 def getcwd(self):
514 return self.dirstate.getcwd()
514 return self.dirstate.getcwd()
515
515
516 def pathto(self, f, cwd=None):
516 def pathto(self, f, cwd=None):
517 return self.dirstate.pathto(f, cwd)
517 return self.dirstate.pathto(f, cwd)
518
518
519 def wfile(self, f, mode='r'):
519 def wfile(self, f, mode='r'):
520 return self.wopener(f, mode)
520 return self.wopener(f, mode)
521
521
522 def _link(self, f):
522 def _link(self, f):
523 return os.path.islink(self.wjoin(f))
523 return os.path.islink(self.wjoin(f))
524
524
525 def _filter(self, filter, filename, data):
525 def _filter(self, filter, filename, data):
526 if filter not in self.filterpats:
526 if filter not in self.filterpats:
527 l = []
527 l = []
528 for pat, cmd in self.ui.configitems(filter):
528 for pat, cmd in self.ui.configitems(filter):
529 if cmd == '!':
529 if cmd == '!':
530 continue
530 continue
531 mf = util.matcher(self.root, "", [pat], [], [])[1]
531 mf = util.matcher(self.root, "", [pat], [], [])[1]
532 fn = None
532 fn = None
533 params = cmd
533 params = cmd
534 for name, filterfn in self._datafilters.iteritems():
534 for name, filterfn in self._datafilters.iteritems():
535 if cmd.startswith(name):
535 if cmd.startswith(name):
536 fn = filterfn
536 fn = filterfn
537 params = cmd[len(name):].lstrip()
537 params = cmd[len(name):].lstrip()
538 break
538 break
539 if not fn:
539 if not fn:
540 fn = lambda s, c, **kwargs: util.filter(s, c)
540 fn = lambda s, c, **kwargs: util.filter(s, c)
541 # Wrap old filters not supporting keyword arguments
541 # Wrap old filters not supporting keyword arguments
542 if not inspect.getargspec(fn)[2]:
542 if not inspect.getargspec(fn)[2]:
543 oldfn = fn
543 oldfn = fn
544 fn = lambda s, c, **kwargs: oldfn(s, c)
544 fn = lambda s, c, **kwargs: oldfn(s, c)
545 l.append((mf, fn, params))
545 l.append((mf, fn, params))
546 self.filterpats[filter] = l
546 self.filterpats[filter] = l
547
547
548 for mf, fn, cmd in self.filterpats[filter]:
548 for mf, fn, cmd in self.filterpats[filter]:
549 if mf(filename):
549 if mf(filename):
550 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
550 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
551 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
551 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
552 break
552 break
553
553
554 return data
554 return data
555
555
556 def adddatafilter(self, name, filter):
556 def adddatafilter(self, name, filter):
557 self._datafilters[name] = filter
557 self._datafilters[name] = filter
558
558
559 def wread(self, filename):
559 def wread(self, filename):
560 if self._link(filename):
560 if self._link(filename):
561 data = os.readlink(self.wjoin(filename))
561 data = os.readlink(self.wjoin(filename))
562 else:
562 else:
563 data = self.wopener(filename, 'r').read()
563 data = self.wopener(filename, 'r').read()
564 return self._filter("encode", filename, data)
564 return self._filter("encode", filename, data)
565
565
566 def wwrite(self, filename, data, flags):
566 def wwrite(self, filename, data, flags):
567 data = self._filter("decode", filename, data)
567 data = self._filter("decode", filename, data)
568 try:
568 try:
569 os.unlink(self.wjoin(filename))
569 os.unlink(self.wjoin(filename))
570 except OSError:
570 except OSError:
571 pass
571 pass
572 if 'l' in flags:
572 if 'l' in flags:
573 self.wopener.symlink(data, filename)
573 self.wopener.symlink(data, filename)
574 else:
574 else:
575 self.wopener(filename, 'w').write(data)
575 self.wopener(filename, 'w').write(data)
576 if 'x' in flags:
576 if 'x' in flags:
577 util.set_flags(self.wjoin(filename), False, True)
577 util.set_flags(self.wjoin(filename), False, True)
578
578
579 def wwritedata(self, filename, data):
579 def wwritedata(self, filename, data):
580 return self._filter("decode", filename, data)
580 return self._filter("decode", filename, data)
581
581
582 def transaction(self):
582 def transaction(self):
583 tr = self._transref and self._transref() or None
583 tr = self._transref and self._transref() or None
584 if tr and tr.running():
584 if tr and tr.running():
585 return tr.nest()
585 return tr.nest()
586
586
587 # abort here if the journal already exists
587 # abort here if the journal already exists
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 raise error.RepoError(_("journal already exists - run hg recover"))
589 raise error.RepoError(_("journal already exists - run hg recover"))
590
590
591 # save dirstate for rollback
591 # save dirstate for rollback
592 try:
592 try:
593 ds = self.opener("dirstate").read()
593 ds = self.opener("dirstate").read()
594 except IOError:
594 except IOError:
595 ds = ""
595 ds = ""
596 self.opener("journal.dirstate", "w").write(ds)
596 self.opener("journal.dirstate", "w").write(ds)
597 self.opener("journal.branch", "w").write(self.dirstate.branch())
597 self.opener("journal.branch", "w").write(self.dirstate.branch())
598
598
599 renames = [(self.sjoin("journal"), self.sjoin("undo")),
599 renames = [(self.sjoin("journal"), self.sjoin("undo")),
600 (self.join("journal.dirstate"), self.join("undo.dirstate")),
600 (self.join("journal.dirstate"), self.join("undo.dirstate")),
601 (self.join("journal.branch"), self.join("undo.branch"))]
601 (self.join("journal.branch"), self.join("undo.branch"))]
602 tr = transaction.transaction(self.ui.warn, self.sopener,
602 tr = transaction.transaction(self.ui.warn, self.sopener,
603 self.sjoin("journal"),
603 self.sjoin("journal"),
604 aftertrans(renames),
604 aftertrans(renames),
605 self.store.createmode)
605 self.store.createmode)
606 self._transref = weakref.ref(tr)
606 self._transref = weakref.ref(tr)
607 return tr
607 return tr
608
608
609 def recover(self):
609 def recover(self):
610 lock = self.lock()
610 lock = self.lock()
611 try:
611 try:
612 if os.path.exists(self.sjoin("journal")):
612 if os.path.exists(self.sjoin("journal")):
613 self.ui.status(_("rolling back interrupted transaction\n"))
613 self.ui.status(_("rolling back interrupted transaction\n"))
614 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
614 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
615 self.invalidate()
615 self.invalidate()
616 return True
616 return True
617 else:
617 else:
618 self.ui.warn(_("no interrupted transaction available\n"))
618 self.ui.warn(_("no interrupted transaction available\n"))
619 return False
619 return False
620 finally:
620 finally:
621 lock.release()
621 lock.release()
622
622
623 def rollback(self):
623 def rollback(self):
624 wlock = lock = None
624 wlock = lock = None
625 try:
625 try:
626 wlock = self.wlock()
626 wlock = self.wlock()
627 lock = self.lock()
627 lock = self.lock()
628 if os.path.exists(self.sjoin("undo")):
628 if os.path.exists(self.sjoin("undo")):
629 self.ui.status(_("rolling back last transaction\n"))
629 self.ui.status(_("rolling back last transaction\n"))
630 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
630 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
631 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
631 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
632 try:
632 try:
633 branch = self.opener("undo.branch").read()
633 branch = self.opener("undo.branch").read()
634 self.dirstate.setbranch(branch)
634 self.dirstate.setbranch(branch)
635 except IOError:
635 except IOError:
636 self.ui.warn(_("Named branch could not be reset, "
636 self.ui.warn(_("Named branch could not be reset, "
637 "current branch still is: %s\n")
637 "current branch still is: %s\n")
638 % encoding.tolocal(self.dirstate.branch()))
638 % encoding.tolocal(self.dirstate.branch()))
639 self.invalidate()
639 self.invalidate()
640 self.dirstate.invalidate()
640 self.dirstate.invalidate()
641 else:
641 else:
642 self.ui.warn(_("no rollback information available\n"))
642 self.ui.warn(_("no rollback information available\n"))
643 finally:
643 finally:
644 release(lock, wlock)
644 release(lock, wlock)
645
645
646 def invalidate(self):
646 def invalidate(self):
647 for a in "changelog manifest".split():
647 for a in "changelog manifest".split():
648 if a in self.__dict__:
648 if a in self.__dict__:
649 delattr(self, a)
649 delattr(self, a)
650 self.tagscache = None
650 self.tagscache = None
651 self._tagstypecache = None
651 self._tagstypecache = None
652 self.nodetagscache = None
652 self.nodetagscache = None
653 self.branchcache = None
653 self.branchcache = None
654 self._ubranchcache = None
654 self._ubranchcache = None
655 self._branchcachetip = None
655 self._branchcachetip = None
656
656
657 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
657 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
658 try:
658 try:
659 l = lock.lock(lockname, 0, releasefn, desc=desc)
659 l = lock.lock(lockname, 0, releasefn, desc=desc)
660 except error.LockHeld, inst:
660 except error.LockHeld, inst:
661 if not wait:
661 if not wait:
662 raise
662 raise
663 self.ui.warn(_("waiting for lock on %s held by %r\n") %
663 self.ui.warn(_("waiting for lock on %s held by %r\n") %
664 (desc, inst.locker))
664 (desc, inst.locker))
665 # default to 600 seconds timeout
665 # default to 600 seconds timeout
666 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
666 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
667 releasefn, desc=desc)
667 releasefn, desc=desc)
668 if acquirefn:
668 if acquirefn:
669 acquirefn()
669 acquirefn()
670 return l
670 return l
671
671
672 def lock(self, wait=True):
672 def lock(self, wait=True):
673 l = self._lockref and self._lockref()
673 l = self._lockref and self._lockref()
674 if l is not None and l.held:
674 if l is not None and l.held:
675 l.lock()
675 l.lock()
676 return l
676 return l
677
677
678 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
678 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
679 _('repository %s') % self.origroot)
679 _('repository %s') % self.origroot)
680 self._lockref = weakref.ref(l)
680 self._lockref = weakref.ref(l)
681 return l
681 return l
682
682
683 def wlock(self, wait=True):
683 def wlock(self, wait=True):
684 l = self._wlockref and self._wlockref()
684 l = self._wlockref and self._wlockref()
685 if l is not None and l.held:
685 if l is not None and l.held:
686 l.lock()
686 l.lock()
687 return l
687 return l
688
688
689 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
689 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
690 self.dirstate.invalidate, _('working directory of %s') %
690 self.dirstate.invalidate, _('working directory of %s') %
691 self.origroot)
691 self.origroot)
692 self._wlockref = weakref.ref(l)
692 self._wlockref = weakref.ref(l)
693 return l
693 return l
694
694
695 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
695 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
696 """
696 """
697 commit an individual file as part of a larger transaction
697 commit an individual file as part of a larger transaction
698 """
698 """
699
699
700 fname = fctx.path()
700 fname = fctx.path()
701 text = fctx.data()
701 text = fctx.data()
702 flog = self.file(fname)
702 flog = self.file(fname)
703 fparent1 = manifest1.get(fname, nullid)
703 fparent1 = manifest1.get(fname, nullid)
704 fparent2 = fparent2o = manifest2.get(fname, nullid)
704 fparent2 = fparent2o = manifest2.get(fname, nullid)
705
705
706 meta = {}
706 meta = {}
707 copy = fctx.renamed()
707 copy = fctx.renamed()
708 if copy and copy[0] != fname:
708 if copy and copy[0] != fname:
709 # Mark the new revision of this file as a copy of another
709 # Mark the new revision of this file as a copy of another
710 # file. This copy data will effectively act as a parent
710 # file. This copy data will effectively act as a parent
711 # of this new revision. If this is a merge, the first
711 # of this new revision. If this is a merge, the first
712 # parent will be the nullid (meaning "look up the copy data")
712 # parent will be the nullid (meaning "look up the copy data")
713 # and the second one will be the other parent. For example:
713 # and the second one will be the other parent. For example:
714 #
714 #
715 # 0 --- 1 --- 3 rev1 changes file foo
715 # 0 --- 1 --- 3 rev1 changes file foo
716 # \ / rev2 renames foo to bar and changes it
716 # \ / rev2 renames foo to bar and changes it
717 # \- 2 -/ rev3 should have bar with all changes and
717 # \- 2 -/ rev3 should have bar with all changes and
718 # should record that bar descends from
718 # should record that bar descends from
719 # bar in rev2 and foo in rev1
719 # bar in rev2 and foo in rev1
720 #
720 #
721 # this allows this merge to succeed:
721 # this allows this merge to succeed:
722 #
722 #
723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
724 # \ / merging rev3 and rev4 should use bar@rev2
724 # \ / merging rev3 and rev4 should use bar@rev2
725 # \- 2 --- 4 as the merge base
725 # \- 2 --- 4 as the merge base
726 #
726 #
727
727
728 cfname = copy[0]
728 cfname = copy[0]
729 crev = manifest1.get(cfname)
729 crev = manifest1.get(cfname)
730 newfparent = fparent2
730 newfparent = fparent2
731
731
732 if manifest2: # branch merge
732 if manifest2: # branch merge
733 if fparent2 == nullid or crev is None: # copied on remote side
733 if fparent2 == nullid or crev is None: # copied on remote side
734 if cfname in manifest2:
734 if cfname in manifest2:
735 crev = manifest2[cfname]
735 crev = manifest2[cfname]
736 newfparent = fparent1
736 newfparent = fparent1
737
737
738 # find source in nearest ancestor if we've lost track
738 # find source in nearest ancestor if we've lost track
739 if not crev:
739 if not crev:
740 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
740 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
741 (fname, cfname))
741 (fname, cfname))
742 for ancestor in self['.'].ancestors():
742 for ancestor in self['.'].ancestors():
743 if cfname in ancestor:
743 if cfname in ancestor:
744 crev = ancestor[cfname].filenode()
744 crev = ancestor[cfname].filenode()
745 break
745 break
746
746
747 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
747 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
748 meta["copy"] = cfname
748 meta["copy"] = cfname
749 meta["copyrev"] = hex(crev)
749 meta["copyrev"] = hex(crev)
750 fparent1, fparent2 = nullid, newfparent
750 fparent1, fparent2 = nullid, newfparent
751 elif fparent2 != nullid:
751 elif fparent2 != nullid:
752 # is one parent an ancestor of the other?
752 # is one parent an ancestor of the other?
753 fparentancestor = flog.ancestor(fparent1, fparent2)
753 fparentancestor = flog.ancestor(fparent1, fparent2)
754 if fparentancestor == fparent1:
754 if fparentancestor == fparent1:
755 fparent1, fparent2 = fparent2, nullid
755 fparent1, fparent2 = fparent2, nullid
756 elif fparentancestor == fparent2:
756 elif fparentancestor == fparent2:
757 fparent2 = nullid
757 fparent2 = nullid
758
758
759 # is the file changed?
759 # is the file changed?
760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
761 changelist.append(fname)
761 changelist.append(fname)
762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
763
763
764 # are just the flags changed during merge?
764 # are just the flags changed during merge?
765 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
765 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
766 changelist.append(fname)
766 changelist.append(fname)
767
767
768 return fparent1
768 return fparent1
769
769
770 def commit(self, files=None, text="", user=None, date=None, match=None,
770 def commit(self, files=None, text="", user=None, date=None, match=None,
771 force=False, force_editor=False, extra={}, empty_ok=False):
771 force=False, force_editor=False, extra={}, empty_ok=False):
772 wlock = lock = None
772 wlock = lock = None
773 if extra.get("close"):
773 if extra.get("close"):
774 force = True
774 force = True
775 if files:
775 if files:
776 files = list(set(files))
776 files = list(set(files))
777 try:
777 try:
778 wlock = self.wlock()
778 wlock = self.wlock()
779 lock = self.lock()
779 lock = self.lock()
780
780
781 p1, p2 = self.dirstate.parents()
781 p1, p2 = self.dirstate.parents()
782
782
783 if (not force and p2 != nullid and
783 if (not force and p2 != nullid and
784 (match and (match.files() or match.anypats()))):
784 (match and (match.files() or match.anypats()))):
785 raise util.Abort(_('cannot partially commit a merge '
785 raise util.Abort(_('cannot partially commit a merge '
786 '(do not specify files or patterns)'))
786 '(do not specify files or patterns)'))
787
787
788 if files:
788 if files:
789 modified, removed = [], []
789 modified, removed = [], []
790 for f in files:
790 for f in files:
791 s = self.dirstate[f]
791 s = self.dirstate[f]
792 if s in 'nma':
792 if s in 'nma':
793 modified.append(f)
793 modified.append(f)
794 elif s == 'r':
794 elif s == 'r':
795 removed.append(f)
795 removed.append(f)
796 else:
796 else:
797 self.ui.warn(_("%s not tracked!\n") % f)
797 self.ui.warn(_("%s not tracked!\n") % f)
798 changes = [modified, [], removed, [], []]
798 changes = [modified, [], removed, [], []]
799 else:
799 else:
800 changes = self.status(match=match)
800 changes = self.status(match=match)
801
801
802 if (not (changes[0] or changes[1] or changes[2])
803 and not force and p2 == nullid and
804 self[None].branch() == self['.'].branch()):
805 self.ui.status(_("nothing changed\n"))
806 return None
807
802 ms = merge_.mergestate(self)
808 ms = merge_.mergestate(self)
803 for f in changes[0]:
809 for f in changes[0]:
804 if f in ms and ms[f] == 'u':
810 if f in ms and ms[f] == 'u':
805 raise util.Abort(_("unresolved merge conflicts "
811 raise util.Abort(_("unresolved merge conflicts "
806 "(see hg resolve)"))
812 "(see hg resolve)"))
807 wctx = context.workingctx(self, (p1, p2), text, user, date,
813 wctx = context.workingctx(self, (p1, p2), text, user, date,
808 extra, changes)
814 extra, changes)
809 r = self._commitctx(wctx, force, force_editor, empty_ok, True)
815 r = self._commitctx(wctx, force, force_editor, empty_ok, True)
810 ms.reset()
816 ms.reset()
811 return r
817 return r
812
818
813 finally:
819 finally:
814 release(lock, wlock)
820 release(lock, wlock)
815
821
816 def commitctx(self, ctx):
822 def commitctx(self, ctx):
817 """Add a new revision to current repository.
823 """Add a new revision to current repository.
818
824
819 Revision information is passed in the context.memctx argument.
825 Revision information is passed in the context.memctx argument.
820 commitctx() does not touch the working directory.
826 commitctx() does not touch the working directory.
821 """
827 """
822 lock = self.lock()
828 lock = self.lock()
823 try:
829 try:
824 return self._commitctx(ctx, force=True, force_editor=False,
830 return self._commitctx(ctx, force=True, force_editor=False,
825 empty_ok=True, working=False)
831 empty_ok=True, working=False)
826 finally:
832 finally:
827 lock.release()
833 lock.release()
828
834
829 def _commitctx(self, ctx, force=False, force_editor=False, empty_ok=False,
835 def _commitctx(self, ctx, force=False, force_editor=False, empty_ok=False,
830 working=True):
836 working=True):
831 tr = None
837 tr = None
832 valid = 0 # don't save the dirstate if this isn't set
838 valid = 0 # don't save the dirstate if this isn't set
833 try:
839 try:
834 commit = sorted(ctx.modified() + ctx.added())
840 commit = sorted(ctx.modified() + ctx.added())
835 remove = ctx.removed()
841 remove = ctx.removed()
836 extra = ctx.extra().copy()
842 extra = ctx.extra().copy()
837 branchname = extra['branch']
843 branchname = extra['branch']
838 user = ctx.user()
844 user = ctx.user()
839 text = ctx.description()
845 text = ctx.description()
840
846
841 p1, p2 = [p.node() for p in ctx.parents()]
847 p1, p2 = [p.node() for p in ctx.parents()]
842 c1 = self.changelog.read(p1)
848 c1 = self.changelog.read(p1)
843 c2 = self.changelog.read(p2)
849 c2 = self.changelog.read(p2)
844 m1 = self.manifest.read(c1[0]).copy()
850 m1 = self.manifest.read(c1[0]).copy()
845 m2 = self.manifest.read(c2[0])
851 m2 = self.manifest.read(c2[0])
846
852
847 if working:
848 oldname = c1[5].get("branch") # stored in UTF-8
849 if (not commit and not remove and not force and p2 == nullid
850 and branchname == oldname):
851 self.ui.status(_("nothing changed\n"))
852 return None
853
854 xp1 = hex(p1)
853 xp1 = hex(p1)
855 if p2 == nullid: xp2 = ''
854 if p2 == nullid: xp2 = ''
856 else: xp2 = hex(p2)
855 else: xp2 = hex(p2)
857
856
858 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
857 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
859
858
860 tr = self.transaction()
859 tr = self.transaction()
861 trp = weakref.proxy(tr)
860 trp = weakref.proxy(tr)
862
861
863 # check in files
862 # check in files
864 new = {}
863 new = {}
865 changed = []
864 changed = []
866 linkrev = len(self)
865 linkrev = len(self)
867 for f in commit:
866 for f in commit:
868 self.ui.note(f + "\n")
867 self.ui.note(f + "\n")
869 try:
868 try:
870 fctx = ctx[f]
869 fctx = ctx[f]
871 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
870 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
872 changed)
871 changed)
873 m1.set(f, fctx.flags())
872 m1.set(f, fctx.flags())
874 if working:
873 if working:
875 self.dirstate.normal(f)
874 self.dirstate.normal(f)
876
875
877 except (OSError, IOError):
876 except (OSError, IOError):
878 if working:
877 if working:
879 self.ui.warn(_("trouble committing %s!\n") % f)
878 self.ui.warn(_("trouble committing %s!\n") % f)
880 raise
879 raise
881 else:
880 else:
882 remove.append(f)
881 remove.append(f)
883
882
884 updated, added = [], []
883 updated, added = [], []
885 for f in sorted(changed):
884 for f in sorted(changed):
886 if f in m1 or f in m2:
885 if f in m1 or f in m2:
887 updated.append(f)
886 updated.append(f)
888 else:
887 else:
889 added.append(f)
888 added.append(f)
890
889
891 # update manifest
890 # update manifest
892 m1.update(new)
891 m1.update(new)
893 removed = [f for f in sorted(remove) if f in m1 or f in m2]
892 removed = [f for f in sorted(remove) if f in m1 or f in m2]
894 removed1 = []
893 removed1 = []
895
894
896 for f in removed:
895 for f in removed:
897 if f in m1:
896 if f in m1:
898 del m1[f]
897 del m1[f]
899 removed1.append(f)
898 removed1.append(f)
900 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
899 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
901 (new, removed1))
900 (new, removed1))
902
901
903 # add changeset
902 # add changeset
904 if (not empty_ok and not text) or force_editor:
903 if (not empty_ok and not text) or force_editor:
905 edittext = []
904 edittext = []
906 if text:
905 if text:
907 edittext.append(text)
906 edittext.append(text)
908 edittext.append("")
907 edittext.append("")
909 edittext.append("") # Empty line between message and comments.
908 edittext.append("") # Empty line between message and comments.
910 edittext.append(_("HG: Enter commit message."
909 edittext.append(_("HG: Enter commit message."
911 " Lines beginning with 'HG:' are removed."))
910 " Lines beginning with 'HG:' are removed."))
912 edittext.append("HG: --")
911 edittext.append("HG: --")
913 edittext.append(_("HG: user: %s") % user)
912 edittext.append(_("HG: user: %s") % user)
914 if p2 != nullid:
913 if p2 != nullid:
915 edittext.append(_("HG: branch merge"))
914 edittext.append(_("HG: branch merge"))
916 if branchname:
915 if branchname:
917 edittext.append(_("HG: branch '%s'")
916 edittext.append(_("HG: branch '%s'")
918 % encoding.tolocal(branchname))
917 % encoding.tolocal(branchname))
919 edittext.extend([_("HG: added %s") % f for f in added])
918 edittext.extend([_("HG: added %s") % f for f in added])
920 edittext.extend([_("HG: changed %s") % f for f in updated])
919 edittext.extend([_("HG: changed %s") % f for f in updated])
921 edittext.extend([_("HG: removed %s") % f for f in removed])
920 edittext.extend([_("HG: removed %s") % f for f in removed])
922 if not added and not updated and not removed:
921 if not added and not updated and not removed:
923 edittext.append(_("HG: no files changed"))
922 edittext.append(_("HG: no files changed"))
924 edittext.append("")
923 edittext.append("")
925 # run editor in the repository root
924 # run editor in the repository root
926 olddir = os.getcwd()
925 olddir = os.getcwd()
927 os.chdir(self.root)
926 os.chdir(self.root)
928 text = self.ui.edit("\n".join(edittext), user)
927 text = self.ui.edit("\n".join(edittext), user)
929 os.chdir(olddir)
928 os.chdir(olddir)
930
929
931 lines = [line.rstrip() for line in text.rstrip().splitlines()]
930 lines = [line.rstrip() for line in text.rstrip().splitlines()]
932 while lines and not lines[0]:
931 while lines and not lines[0]:
933 del lines[0]
932 del lines[0]
934 if not lines and working:
933 if not lines and working:
935 raise util.Abort(_("empty commit message"))
934 raise util.Abort(_("empty commit message"))
936 text = '\n'.join(lines)
935 text = '\n'.join(lines)
937
936
938 self.changelog.delayupdate()
937 self.changelog.delayupdate()
939 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
938 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
940 user, ctx.date(), extra)
939 user, ctx.date(), extra)
941 p = lambda: self.changelog.writepending() and self.root or ""
940 p = lambda: self.changelog.writepending() and self.root or ""
942 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
941 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
943 parent2=xp2, pending=p)
942 parent2=xp2, pending=p)
944 self.changelog.finalize(trp)
943 self.changelog.finalize(trp)
945 tr.close()
944 tr.close()
946
945
947 if self.branchcache:
946 if self.branchcache:
948 self.branchtags()
947 self.branchtags()
949
948
950 if working:
949 if working:
951 self.dirstate.setparents(n)
950 self.dirstate.setparents(n)
952 for f in removed:
951 for f in removed:
953 self.dirstate.forget(f)
952 self.dirstate.forget(f)
954 valid = 1 # our dirstate updates are complete
953 valid = 1 # our dirstate updates are complete
955
954
956 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
955 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
957 return n
956 return n
958 finally:
957 finally:
959 if not valid: # don't save our updated dirstate
958 if not valid: # don't save our updated dirstate
960 self.dirstate.invalidate()
959 self.dirstate.invalidate()
961 del tr
960 del tr
962
961
963 def walk(self, match, node=None):
962 def walk(self, match, node=None):
964 '''
963 '''
965 walk recursively through the directory tree or a given
964 walk recursively through the directory tree or a given
966 changeset, finding all files matched by the match
965 changeset, finding all files matched by the match
967 function
966 function
968 '''
967 '''
969 return self[node].walk(match)
968 return self[node].walk(match)
970
969
971 def status(self, node1='.', node2=None, match=None,
970 def status(self, node1='.', node2=None, match=None,
972 ignored=False, clean=False, unknown=False):
971 ignored=False, clean=False, unknown=False):
973 """return status of files between two nodes or node and working directory
972 """return status of files between two nodes or node and working directory
974
973
975 If node1 is None, use the first dirstate parent instead.
974 If node1 is None, use the first dirstate parent instead.
976 If node2 is None, compare node1 with working directory.
975 If node2 is None, compare node1 with working directory.
977 """
976 """
978
977
979 def mfmatches(ctx):
978 def mfmatches(ctx):
980 mf = ctx.manifest().copy()
979 mf = ctx.manifest().copy()
981 for fn in mf.keys():
980 for fn in mf.keys():
982 if not match(fn):
981 if not match(fn):
983 del mf[fn]
982 del mf[fn]
984 return mf
983 return mf
985
984
986 if isinstance(node1, context.changectx):
985 if isinstance(node1, context.changectx):
987 ctx1 = node1
986 ctx1 = node1
988 else:
987 else:
989 ctx1 = self[node1]
988 ctx1 = self[node1]
990 if isinstance(node2, context.changectx):
989 if isinstance(node2, context.changectx):
991 ctx2 = node2
990 ctx2 = node2
992 else:
991 else:
993 ctx2 = self[node2]
992 ctx2 = self[node2]
994
993
995 working = ctx2.rev() is None
994 working = ctx2.rev() is None
996 parentworking = working and ctx1 == self['.']
995 parentworking = working and ctx1 == self['.']
997 match = match or match_.always(self.root, self.getcwd())
996 match = match or match_.always(self.root, self.getcwd())
998 listignored, listclean, listunknown = ignored, clean, unknown
997 listignored, listclean, listunknown = ignored, clean, unknown
999
998
1000 # load earliest manifest first for caching reasons
999 # load earliest manifest first for caching reasons
1001 if not working and ctx2.rev() < ctx1.rev():
1000 if not working and ctx2.rev() < ctx1.rev():
1002 ctx2.manifest()
1001 ctx2.manifest()
1003
1002
1004 if not parentworking:
1003 if not parentworking:
1005 def bad(f, msg):
1004 def bad(f, msg):
1006 if f not in ctx1:
1005 if f not in ctx1:
1007 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1006 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1008 return False
1007 return False
1009 match.bad = bad
1008 match.bad = bad
1010
1009
1011 if working: # we need to scan the working dir
1010 if working: # we need to scan the working dir
1012 s = self.dirstate.status(match, listignored, listclean, listunknown)
1011 s = self.dirstate.status(match, listignored, listclean, listunknown)
1013 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1012 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1014
1013
1015 # check for any possibly clean files
1014 # check for any possibly clean files
1016 if parentworking and cmp:
1015 if parentworking and cmp:
1017 fixup = []
1016 fixup = []
1018 # do a full compare of any files that might have changed
1017 # do a full compare of any files that might have changed
1019 for f in sorted(cmp):
1018 for f in sorted(cmp):
1020 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1019 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1021 or ctx1[f].cmp(ctx2[f].data())):
1020 or ctx1[f].cmp(ctx2[f].data())):
1022 modified.append(f)
1021 modified.append(f)
1023 else:
1022 else:
1024 fixup.append(f)
1023 fixup.append(f)
1025
1024
1026 if listclean:
1025 if listclean:
1027 clean += fixup
1026 clean += fixup
1028
1027
1029 # update dirstate for files that are actually clean
1028 # update dirstate for files that are actually clean
1030 if fixup:
1029 if fixup:
1031 wlock = None
1030 wlock = None
1032 try:
1031 try:
1033 try:
1032 try:
1034 # updating the dirstate is optional
1033 # updating the dirstate is optional
1035 # so we don't wait on the lock
1034 # so we don't wait on the lock
1036 wlock = self.wlock(False)
1035 wlock = self.wlock(False)
1037 for f in fixup:
1036 for f in fixup:
1038 self.dirstate.normal(f)
1037 self.dirstate.normal(f)
1039 except error.LockError:
1038 except error.LockError:
1040 pass
1039 pass
1041 finally:
1040 finally:
1042 release(wlock)
1041 release(wlock)
1043
1042
1044 if not parentworking:
1043 if not parentworking:
1045 mf1 = mfmatches(ctx1)
1044 mf1 = mfmatches(ctx1)
1046 if working:
1045 if working:
1047 # we are comparing working dir against non-parent
1046 # we are comparing working dir against non-parent
1048 # generate a pseudo-manifest for the working dir
1047 # generate a pseudo-manifest for the working dir
1049 mf2 = mfmatches(self['.'])
1048 mf2 = mfmatches(self['.'])
1050 for f in cmp + modified + added:
1049 for f in cmp + modified + added:
1051 mf2[f] = None
1050 mf2[f] = None
1052 mf2.set(f, ctx2.flags(f))
1051 mf2.set(f, ctx2.flags(f))
1053 for f in removed:
1052 for f in removed:
1054 if f in mf2:
1053 if f in mf2:
1055 del mf2[f]
1054 del mf2[f]
1056 else:
1055 else:
1057 # we are comparing two revisions
1056 # we are comparing two revisions
1058 deleted, unknown, ignored = [], [], []
1057 deleted, unknown, ignored = [], [], []
1059 mf2 = mfmatches(ctx2)
1058 mf2 = mfmatches(ctx2)
1060
1059
1061 modified, added, clean = [], [], []
1060 modified, added, clean = [], [], []
1062 for fn in mf2:
1061 for fn in mf2:
1063 if fn in mf1:
1062 if fn in mf1:
1064 if (mf1.flags(fn) != mf2.flags(fn) or
1063 if (mf1.flags(fn) != mf2.flags(fn) or
1065 (mf1[fn] != mf2[fn] and
1064 (mf1[fn] != mf2[fn] and
1066 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1065 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1067 modified.append(fn)
1066 modified.append(fn)
1068 elif listclean:
1067 elif listclean:
1069 clean.append(fn)
1068 clean.append(fn)
1070 del mf1[fn]
1069 del mf1[fn]
1071 else:
1070 else:
1072 added.append(fn)
1071 added.append(fn)
1073 removed = mf1.keys()
1072 removed = mf1.keys()
1074
1073
1075 r = modified, added, removed, deleted, unknown, ignored, clean
1074 r = modified, added, removed, deleted, unknown, ignored, clean
1076 [l.sort() for l in r]
1075 [l.sort() for l in r]
1077 return r
1076 return r
1078
1077
1079 def add(self, list):
1078 def add(self, list):
1080 wlock = self.wlock()
1079 wlock = self.wlock()
1081 try:
1080 try:
1082 rejected = []
1081 rejected = []
1083 for f in list:
1082 for f in list:
1084 p = self.wjoin(f)
1083 p = self.wjoin(f)
1085 try:
1084 try:
1086 st = os.lstat(p)
1085 st = os.lstat(p)
1087 except:
1086 except:
1088 self.ui.warn(_("%s does not exist!\n") % f)
1087 self.ui.warn(_("%s does not exist!\n") % f)
1089 rejected.append(f)
1088 rejected.append(f)
1090 continue
1089 continue
1091 if st.st_size > 10000000:
1090 if st.st_size > 10000000:
1092 self.ui.warn(_("%s: files over 10MB may cause memory and"
1091 self.ui.warn(_("%s: files over 10MB may cause memory and"
1093 " performance problems\n"
1092 " performance problems\n"
1094 "(use 'hg revert %s' to unadd the file)\n")
1093 "(use 'hg revert %s' to unadd the file)\n")
1095 % (f, f))
1094 % (f, f))
1096 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1095 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1097 self.ui.warn(_("%s not added: only files and symlinks "
1096 self.ui.warn(_("%s not added: only files and symlinks "
1098 "supported currently\n") % f)
1097 "supported currently\n") % f)
1099 rejected.append(p)
1098 rejected.append(p)
1100 elif self.dirstate[f] in 'amn':
1099 elif self.dirstate[f] in 'amn':
1101 self.ui.warn(_("%s already tracked!\n") % f)
1100 self.ui.warn(_("%s already tracked!\n") % f)
1102 elif self.dirstate[f] == 'r':
1101 elif self.dirstate[f] == 'r':
1103 self.dirstate.normallookup(f)
1102 self.dirstate.normallookup(f)
1104 else:
1103 else:
1105 self.dirstate.add(f)
1104 self.dirstate.add(f)
1106 return rejected
1105 return rejected
1107 finally:
1106 finally:
1108 wlock.release()
1107 wlock.release()
1109
1108
1110 def forget(self, list):
1109 def forget(self, list):
1111 wlock = self.wlock()
1110 wlock = self.wlock()
1112 try:
1111 try:
1113 for f in list:
1112 for f in list:
1114 if self.dirstate[f] != 'a':
1113 if self.dirstate[f] != 'a':
1115 self.ui.warn(_("%s not added!\n") % f)
1114 self.ui.warn(_("%s not added!\n") % f)
1116 else:
1115 else:
1117 self.dirstate.forget(f)
1116 self.dirstate.forget(f)
1118 finally:
1117 finally:
1119 wlock.release()
1118 wlock.release()
1120
1119
1121 def remove(self, list, unlink=False):
1120 def remove(self, list, unlink=False):
1122 wlock = None
1121 wlock = None
1123 try:
1122 try:
1124 if unlink:
1123 if unlink:
1125 for f in list:
1124 for f in list:
1126 try:
1125 try:
1127 util.unlink(self.wjoin(f))
1126 util.unlink(self.wjoin(f))
1128 except OSError, inst:
1127 except OSError, inst:
1129 if inst.errno != errno.ENOENT:
1128 if inst.errno != errno.ENOENT:
1130 raise
1129 raise
1131 wlock = self.wlock()
1130 wlock = self.wlock()
1132 for f in list:
1131 for f in list:
1133 if unlink and os.path.exists(self.wjoin(f)):
1132 if unlink and os.path.exists(self.wjoin(f)):
1134 self.ui.warn(_("%s still exists!\n") % f)
1133 self.ui.warn(_("%s still exists!\n") % f)
1135 elif self.dirstate[f] == 'a':
1134 elif self.dirstate[f] == 'a':
1136 self.dirstate.forget(f)
1135 self.dirstate.forget(f)
1137 elif f not in self.dirstate:
1136 elif f not in self.dirstate:
1138 self.ui.warn(_("%s not tracked!\n") % f)
1137 self.ui.warn(_("%s not tracked!\n") % f)
1139 else:
1138 else:
1140 self.dirstate.remove(f)
1139 self.dirstate.remove(f)
1141 finally:
1140 finally:
1142 release(wlock)
1141 release(wlock)
1143
1142
1144 def undelete(self, list):
1143 def undelete(self, list):
1145 manifests = [self.manifest.read(self.changelog.read(p)[0])
1144 manifests = [self.manifest.read(self.changelog.read(p)[0])
1146 for p in self.dirstate.parents() if p != nullid]
1145 for p in self.dirstate.parents() if p != nullid]
1147 wlock = self.wlock()
1146 wlock = self.wlock()
1148 try:
1147 try:
1149 for f in list:
1148 for f in list:
1150 if self.dirstate[f] != 'r':
1149 if self.dirstate[f] != 'r':
1151 self.ui.warn(_("%s not removed!\n") % f)
1150 self.ui.warn(_("%s not removed!\n") % f)
1152 else:
1151 else:
1153 m = f in manifests[0] and manifests[0] or manifests[1]
1152 m = f in manifests[0] and manifests[0] or manifests[1]
1154 t = self.file(f).read(m[f])
1153 t = self.file(f).read(m[f])
1155 self.wwrite(f, t, m.flags(f))
1154 self.wwrite(f, t, m.flags(f))
1156 self.dirstate.normal(f)
1155 self.dirstate.normal(f)
1157 finally:
1156 finally:
1158 wlock.release()
1157 wlock.release()
1159
1158
1160 def copy(self, source, dest):
1159 def copy(self, source, dest):
1161 p = self.wjoin(dest)
1160 p = self.wjoin(dest)
1162 if not (os.path.exists(p) or os.path.islink(p)):
1161 if not (os.path.exists(p) or os.path.islink(p)):
1163 self.ui.warn(_("%s does not exist!\n") % dest)
1162 self.ui.warn(_("%s does not exist!\n") % dest)
1164 elif not (os.path.isfile(p) or os.path.islink(p)):
1163 elif not (os.path.isfile(p) or os.path.islink(p)):
1165 self.ui.warn(_("copy failed: %s is not a file or a "
1164 self.ui.warn(_("copy failed: %s is not a file or a "
1166 "symbolic link\n") % dest)
1165 "symbolic link\n") % dest)
1167 else:
1166 else:
1168 wlock = self.wlock()
1167 wlock = self.wlock()
1169 try:
1168 try:
1170 if self.dirstate[dest] in '?r':
1169 if self.dirstate[dest] in '?r':
1171 self.dirstate.add(dest)
1170 self.dirstate.add(dest)
1172 self.dirstate.copy(source, dest)
1171 self.dirstate.copy(source, dest)
1173 finally:
1172 finally:
1174 wlock.release()
1173 wlock.release()
1175
1174
1176 def heads(self, start=None, closed=True):
1175 def heads(self, start=None, closed=True):
1177 heads = self.changelog.heads(start)
1176 heads = self.changelog.heads(start)
1178 def display(head):
1177 def display(head):
1179 if closed:
1178 if closed:
1180 return True
1179 return True
1181 extras = self.changelog.read(head)[5]
1180 extras = self.changelog.read(head)[5]
1182 return ('close' not in extras)
1181 return ('close' not in extras)
1183 # sort the output in rev descending order
1182 # sort the output in rev descending order
1184 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1183 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1185 return [n for (r, n) in sorted(heads)]
1184 return [n for (r, n) in sorted(heads)]
1186
1185
1187 def branchheads(self, branch=None, start=None, closed=True):
1186 def branchheads(self, branch=None, start=None, closed=True):
1188 if branch is None:
1187 if branch is None:
1189 branch = self[None].branch()
1188 branch = self[None].branch()
1190 branches = self._branchheads()
1189 branches = self._branchheads()
1191 if branch not in branches:
1190 if branch not in branches:
1192 return []
1191 return []
1193 bheads = branches[branch]
1192 bheads = branches[branch]
1194 # the cache returns heads ordered lowest to highest
1193 # the cache returns heads ordered lowest to highest
1195 bheads.reverse()
1194 bheads.reverse()
1196 if start is not None:
1195 if start is not None:
1197 # filter out the heads that cannot be reached from startrev
1196 # filter out the heads that cannot be reached from startrev
1198 bheads = self.changelog.nodesbetween([start], bheads)[2]
1197 bheads = self.changelog.nodesbetween([start], bheads)[2]
1199 if not closed:
1198 if not closed:
1200 bheads = [h for h in bheads if
1199 bheads = [h for h in bheads if
1201 ('close' not in self.changelog.read(h)[5])]
1200 ('close' not in self.changelog.read(h)[5])]
1202 return bheads
1201 return bheads
1203
1202
1204 def branches(self, nodes):
1203 def branches(self, nodes):
1205 if not nodes:
1204 if not nodes:
1206 nodes = [self.changelog.tip()]
1205 nodes = [self.changelog.tip()]
1207 b = []
1206 b = []
1208 for n in nodes:
1207 for n in nodes:
1209 t = n
1208 t = n
1210 while 1:
1209 while 1:
1211 p = self.changelog.parents(n)
1210 p = self.changelog.parents(n)
1212 if p[1] != nullid or p[0] == nullid:
1211 if p[1] != nullid or p[0] == nullid:
1213 b.append((t, n, p[0], p[1]))
1212 b.append((t, n, p[0], p[1]))
1214 break
1213 break
1215 n = p[0]
1214 n = p[0]
1216 return b
1215 return b
1217
1216
1218 def between(self, pairs):
1217 def between(self, pairs):
1219 r = []
1218 r = []
1220
1219
1221 for top, bottom in pairs:
1220 for top, bottom in pairs:
1222 n, l, i = top, [], 0
1221 n, l, i = top, [], 0
1223 f = 1
1222 f = 1
1224
1223
1225 while n != bottom and n != nullid:
1224 while n != bottom and n != nullid:
1226 p = self.changelog.parents(n)[0]
1225 p = self.changelog.parents(n)[0]
1227 if i == f:
1226 if i == f:
1228 l.append(n)
1227 l.append(n)
1229 f = f * 2
1228 f = f * 2
1230 n = p
1229 n = p
1231 i += 1
1230 i += 1
1232
1231
1233 r.append(l)
1232 r.append(l)
1234
1233
1235 return r
1234 return r
1236
1235
1237 def findincoming(self, remote, base=None, heads=None, force=False):
1236 def findincoming(self, remote, base=None, heads=None, force=False):
1238 """Return list of roots of the subsets of missing nodes from remote
1237 """Return list of roots of the subsets of missing nodes from remote
1239
1238
1240 If base dict is specified, assume that these nodes and their parents
1239 If base dict is specified, assume that these nodes and their parents
1241 exist on the remote side and that no child of a node of base exists
1240 exist on the remote side and that no child of a node of base exists
1242 in both remote and self.
1241 in both remote and self.
1243 Furthermore base will be updated to include the nodes that exists
1242 Furthermore base will be updated to include the nodes that exists
1244 in self and remote but no children exists in self and remote.
1243 in self and remote but no children exists in self and remote.
1245 If a list of heads is specified, return only nodes which are heads
1244 If a list of heads is specified, return only nodes which are heads
1246 or ancestors of these heads.
1245 or ancestors of these heads.
1247
1246
1248 All the ancestors of base are in self and in remote.
1247 All the ancestors of base are in self and in remote.
1249 All the descendants of the list returned are missing in self.
1248 All the descendants of the list returned are missing in self.
1250 (and so we know that the rest of the nodes are missing in remote, see
1249 (and so we know that the rest of the nodes are missing in remote, see
1251 outgoing)
1250 outgoing)
1252 """
1251 """
1253 return self.findcommonincoming(remote, base, heads, force)[1]
1252 return self.findcommonincoming(remote, base, heads, force)[1]
1254
1253
1255 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1254 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1256 """Return a tuple (common, missing roots, heads) used to identify
1255 """Return a tuple (common, missing roots, heads) used to identify
1257 missing nodes from remote.
1256 missing nodes from remote.
1258
1257
1259 If base dict is specified, assume that these nodes and their parents
1258 If base dict is specified, assume that these nodes and their parents
1260 exist on the remote side and that no child of a node of base exists
1259 exist on the remote side and that no child of a node of base exists
1261 in both remote and self.
1260 in both remote and self.
1262 Furthermore base will be updated to include the nodes that exists
1261 Furthermore base will be updated to include the nodes that exists
1263 in self and remote but no children exists in self and remote.
1262 in self and remote but no children exists in self and remote.
1264 If a list of heads is specified, return only nodes which are heads
1263 If a list of heads is specified, return only nodes which are heads
1265 or ancestors of these heads.
1264 or ancestors of these heads.
1266
1265
1267 All the ancestors of base are in self and in remote.
1266 All the ancestors of base are in self and in remote.
1268 """
1267 """
1269 m = self.changelog.nodemap
1268 m = self.changelog.nodemap
1270 search = []
1269 search = []
1271 fetch = set()
1270 fetch = set()
1272 seen = set()
1271 seen = set()
1273 seenbranch = set()
1272 seenbranch = set()
1274 if base == None:
1273 if base == None:
1275 base = {}
1274 base = {}
1276
1275
1277 if not heads:
1276 if not heads:
1278 heads = remote.heads()
1277 heads = remote.heads()
1279
1278
1280 if self.changelog.tip() == nullid:
1279 if self.changelog.tip() == nullid:
1281 base[nullid] = 1
1280 base[nullid] = 1
1282 if heads != [nullid]:
1281 if heads != [nullid]:
1283 return [nullid], [nullid], list(heads)
1282 return [nullid], [nullid], list(heads)
1284 return [nullid], [], []
1283 return [nullid], [], []
1285
1284
1286 # assume we're closer to the tip than the root
1285 # assume we're closer to the tip than the root
1287 # and start by examining the heads
1286 # and start by examining the heads
1288 self.ui.status(_("searching for changes\n"))
1287 self.ui.status(_("searching for changes\n"))
1289
1288
1290 unknown = []
1289 unknown = []
1291 for h in heads:
1290 for h in heads:
1292 if h not in m:
1291 if h not in m:
1293 unknown.append(h)
1292 unknown.append(h)
1294 else:
1293 else:
1295 base[h] = 1
1294 base[h] = 1
1296
1295
1297 heads = unknown
1296 heads = unknown
1298 if not unknown:
1297 if not unknown:
1299 return base.keys(), [], []
1298 return base.keys(), [], []
1300
1299
1301 req = set(unknown)
1300 req = set(unknown)
1302 reqcnt = 0
1301 reqcnt = 0
1303
1302
1304 # search through remote branches
1303 # search through remote branches
1305 # a 'branch' here is a linear segment of history, with four parts:
1304 # a 'branch' here is a linear segment of history, with four parts:
1306 # head, root, first parent, second parent
1305 # head, root, first parent, second parent
1307 # (a branch always has two parents (or none) by definition)
1306 # (a branch always has two parents (or none) by definition)
1308 unknown = remote.branches(unknown)
1307 unknown = remote.branches(unknown)
1309 while unknown:
1308 while unknown:
1310 r = []
1309 r = []
1311 while unknown:
1310 while unknown:
1312 n = unknown.pop(0)
1311 n = unknown.pop(0)
1313 if n[0] in seen:
1312 if n[0] in seen:
1314 continue
1313 continue
1315
1314
1316 self.ui.debug(_("examining %s:%s\n")
1315 self.ui.debug(_("examining %s:%s\n")
1317 % (short(n[0]), short(n[1])))
1316 % (short(n[0]), short(n[1])))
1318 if n[0] == nullid: # found the end of the branch
1317 if n[0] == nullid: # found the end of the branch
1319 pass
1318 pass
1320 elif n in seenbranch:
1319 elif n in seenbranch:
1321 self.ui.debug(_("branch already found\n"))
1320 self.ui.debug(_("branch already found\n"))
1322 continue
1321 continue
1323 elif n[1] and n[1] in m: # do we know the base?
1322 elif n[1] and n[1] in m: # do we know the base?
1324 self.ui.debug(_("found incomplete branch %s:%s\n")
1323 self.ui.debug(_("found incomplete branch %s:%s\n")
1325 % (short(n[0]), short(n[1])))
1324 % (short(n[0]), short(n[1])))
1326 search.append(n[0:2]) # schedule branch range for scanning
1325 search.append(n[0:2]) # schedule branch range for scanning
1327 seenbranch.add(n)
1326 seenbranch.add(n)
1328 else:
1327 else:
1329 if n[1] not in seen and n[1] not in fetch:
1328 if n[1] not in seen and n[1] not in fetch:
1330 if n[2] in m and n[3] in m:
1329 if n[2] in m and n[3] in m:
1331 self.ui.debug(_("found new changeset %s\n") %
1330 self.ui.debug(_("found new changeset %s\n") %
1332 short(n[1]))
1331 short(n[1]))
1333 fetch.add(n[1]) # earliest unknown
1332 fetch.add(n[1]) # earliest unknown
1334 for p in n[2:4]:
1333 for p in n[2:4]:
1335 if p in m:
1334 if p in m:
1336 base[p] = 1 # latest known
1335 base[p] = 1 # latest known
1337
1336
1338 for p in n[2:4]:
1337 for p in n[2:4]:
1339 if p not in req and p not in m:
1338 if p not in req and p not in m:
1340 r.append(p)
1339 r.append(p)
1341 req.add(p)
1340 req.add(p)
1342 seen.add(n[0])
1341 seen.add(n[0])
1343
1342
1344 if r:
1343 if r:
1345 reqcnt += 1
1344 reqcnt += 1
1346 self.ui.debug(_("request %d: %s\n") %
1345 self.ui.debug(_("request %d: %s\n") %
1347 (reqcnt, " ".join(map(short, r))))
1346 (reqcnt, " ".join(map(short, r))))
1348 for p in xrange(0, len(r), 10):
1347 for p in xrange(0, len(r), 10):
1349 for b in remote.branches(r[p:p+10]):
1348 for b in remote.branches(r[p:p+10]):
1350 self.ui.debug(_("received %s:%s\n") %
1349 self.ui.debug(_("received %s:%s\n") %
1351 (short(b[0]), short(b[1])))
1350 (short(b[0]), short(b[1])))
1352 unknown.append(b)
1351 unknown.append(b)
1353
1352
1354 # do binary search on the branches we found
1353 # do binary search on the branches we found
1355 while search:
1354 while search:
1356 newsearch = []
1355 newsearch = []
1357 reqcnt += 1
1356 reqcnt += 1
1358 for n, l in zip(search, remote.between(search)):
1357 for n, l in zip(search, remote.between(search)):
1359 l.append(n[1])
1358 l.append(n[1])
1360 p = n[0]
1359 p = n[0]
1361 f = 1
1360 f = 1
1362 for i in l:
1361 for i in l:
1363 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1362 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1364 if i in m:
1363 if i in m:
1365 if f <= 2:
1364 if f <= 2:
1366 self.ui.debug(_("found new branch changeset %s\n") %
1365 self.ui.debug(_("found new branch changeset %s\n") %
1367 short(p))
1366 short(p))
1368 fetch.add(p)
1367 fetch.add(p)
1369 base[i] = 1
1368 base[i] = 1
1370 else:
1369 else:
1371 self.ui.debug(_("narrowed branch search to %s:%s\n")
1370 self.ui.debug(_("narrowed branch search to %s:%s\n")
1372 % (short(p), short(i)))
1371 % (short(p), short(i)))
1373 newsearch.append((p, i))
1372 newsearch.append((p, i))
1374 break
1373 break
1375 p, f = i, f * 2
1374 p, f = i, f * 2
1376 search = newsearch
1375 search = newsearch
1377
1376
1378 # sanity check our fetch list
1377 # sanity check our fetch list
1379 for f in fetch:
1378 for f in fetch:
1380 if f in m:
1379 if f in m:
1381 raise error.RepoError(_("already have changeset ")
1380 raise error.RepoError(_("already have changeset ")
1382 + short(f[:4]))
1381 + short(f[:4]))
1383
1382
1384 if base.keys() == [nullid]:
1383 if base.keys() == [nullid]:
1385 if force:
1384 if force:
1386 self.ui.warn(_("warning: repository is unrelated\n"))
1385 self.ui.warn(_("warning: repository is unrelated\n"))
1387 else:
1386 else:
1388 raise util.Abort(_("repository is unrelated"))
1387 raise util.Abort(_("repository is unrelated"))
1389
1388
1390 self.ui.debug(_("found new changesets starting at ") +
1389 self.ui.debug(_("found new changesets starting at ") +
1391 " ".join([short(f) for f in fetch]) + "\n")
1390 " ".join([short(f) for f in fetch]) + "\n")
1392
1391
1393 self.ui.debug(_("%d total queries\n") % reqcnt)
1392 self.ui.debug(_("%d total queries\n") % reqcnt)
1394
1393
1395 return base.keys(), list(fetch), heads
1394 return base.keys(), list(fetch), heads
1396
1395
1397 def findoutgoing(self, remote, base=None, heads=None, force=False):
1396 def findoutgoing(self, remote, base=None, heads=None, force=False):
1398 """Return list of nodes that are roots of subsets not in remote
1397 """Return list of nodes that are roots of subsets not in remote
1399
1398
1400 If base dict is specified, assume that these nodes and their parents
1399 If base dict is specified, assume that these nodes and their parents
1401 exist on the remote side.
1400 exist on the remote side.
1402 If a list of heads is specified, return only nodes which are heads
1401 If a list of heads is specified, return only nodes which are heads
1403 or ancestors of these heads, and return a second element which
1402 or ancestors of these heads, and return a second element which
1404 contains all remote heads which get new children.
1403 contains all remote heads which get new children.
1405 """
1404 """
1406 if base == None:
1405 if base == None:
1407 base = {}
1406 base = {}
1408 self.findincoming(remote, base, heads, force=force)
1407 self.findincoming(remote, base, heads, force=force)
1409
1408
1410 self.ui.debug(_("common changesets up to ")
1409 self.ui.debug(_("common changesets up to ")
1411 + " ".join(map(short, base.keys())) + "\n")
1410 + " ".join(map(short, base.keys())) + "\n")
1412
1411
1413 remain = set(self.changelog.nodemap)
1412 remain = set(self.changelog.nodemap)
1414
1413
1415 # prune everything remote has from the tree
1414 # prune everything remote has from the tree
1416 remain.remove(nullid)
1415 remain.remove(nullid)
1417 remove = base.keys()
1416 remove = base.keys()
1418 while remove:
1417 while remove:
1419 n = remove.pop(0)
1418 n = remove.pop(0)
1420 if n in remain:
1419 if n in remain:
1421 remain.remove(n)
1420 remain.remove(n)
1422 for p in self.changelog.parents(n):
1421 for p in self.changelog.parents(n):
1423 remove.append(p)
1422 remove.append(p)
1424
1423
1425 # find every node whose parents have been pruned
1424 # find every node whose parents have been pruned
1426 subset = []
1425 subset = []
1427 # find every remote head that will get new children
1426 # find every remote head that will get new children
1428 updated_heads = {}
1427 updated_heads = {}
1429 for n in remain:
1428 for n in remain:
1430 p1, p2 = self.changelog.parents(n)
1429 p1, p2 = self.changelog.parents(n)
1431 if p1 not in remain and p2 not in remain:
1430 if p1 not in remain and p2 not in remain:
1432 subset.append(n)
1431 subset.append(n)
1433 if heads:
1432 if heads:
1434 if p1 in heads:
1433 if p1 in heads:
1435 updated_heads[p1] = True
1434 updated_heads[p1] = True
1436 if p2 in heads:
1435 if p2 in heads:
1437 updated_heads[p2] = True
1436 updated_heads[p2] = True
1438
1437
1439 # this is the set of all roots we have to push
1438 # this is the set of all roots we have to push
1440 if heads:
1439 if heads:
1441 return subset, updated_heads.keys()
1440 return subset, updated_heads.keys()
1442 else:
1441 else:
1443 return subset
1442 return subset
1444
1443
1445 def pull(self, remote, heads=None, force=False):
1444 def pull(self, remote, heads=None, force=False):
1446 lock = self.lock()
1445 lock = self.lock()
1447 try:
1446 try:
1448 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1447 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1449 force=force)
1448 force=force)
1450 if fetch == [nullid]:
1449 if fetch == [nullid]:
1451 self.ui.status(_("requesting all changes\n"))
1450 self.ui.status(_("requesting all changes\n"))
1452
1451
1453 if not fetch:
1452 if not fetch:
1454 self.ui.status(_("no changes found\n"))
1453 self.ui.status(_("no changes found\n"))
1455 return 0
1454 return 0
1456
1455
1457 if heads is None and remote.capable('changegroupsubset'):
1456 if heads is None and remote.capable('changegroupsubset'):
1458 heads = rheads
1457 heads = rheads
1459
1458
1460 if heads is None:
1459 if heads is None:
1461 cg = remote.changegroup(fetch, 'pull')
1460 cg = remote.changegroup(fetch, 'pull')
1462 else:
1461 else:
1463 if not remote.capable('changegroupsubset'):
1462 if not remote.capable('changegroupsubset'):
1464 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1463 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1465 cg = remote.changegroupsubset(fetch, heads, 'pull')
1464 cg = remote.changegroupsubset(fetch, heads, 'pull')
1466 return self.addchangegroup(cg, 'pull', remote.url())
1465 return self.addchangegroup(cg, 'pull', remote.url())
1467 finally:
1466 finally:
1468 lock.release()
1467 lock.release()
1469
1468
1470 def push(self, remote, force=False, revs=None):
1469 def push(self, remote, force=False, revs=None):
1471 # there are two ways to push to remote repo:
1470 # there are two ways to push to remote repo:
1472 #
1471 #
1473 # addchangegroup assumes local user can lock remote
1472 # addchangegroup assumes local user can lock remote
1474 # repo (local filesystem, old ssh servers).
1473 # repo (local filesystem, old ssh servers).
1475 #
1474 #
1476 # unbundle assumes local user cannot lock remote repo (new ssh
1475 # unbundle assumes local user cannot lock remote repo (new ssh
1477 # servers, http servers).
1476 # servers, http servers).
1478
1477
1479 if remote.capable('unbundle'):
1478 if remote.capable('unbundle'):
1480 return self.push_unbundle(remote, force, revs)
1479 return self.push_unbundle(remote, force, revs)
1481 return self.push_addchangegroup(remote, force, revs)
1480 return self.push_addchangegroup(remote, force, revs)
1482
1481
1483 def prepush(self, remote, force, revs):
1482 def prepush(self, remote, force, revs):
1484 common = {}
1483 common = {}
1485 remote_heads = remote.heads()
1484 remote_heads = remote.heads()
1486 inc = self.findincoming(remote, common, remote_heads, force=force)
1485 inc = self.findincoming(remote, common, remote_heads, force=force)
1487
1486
1488 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1487 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1489 if revs is not None:
1488 if revs is not None:
1490 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1489 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1491 else:
1490 else:
1492 bases, heads = update, self.changelog.heads()
1491 bases, heads = update, self.changelog.heads()
1493
1492
1494 if not bases:
1493 if not bases:
1495 self.ui.status(_("no changes found\n"))
1494 self.ui.status(_("no changes found\n"))
1496 return None, 1
1495 return None, 1
1497 elif not force:
1496 elif not force:
1498 # check if we're creating new remote heads
1497 # check if we're creating new remote heads
1499 # to be a remote head after push, node must be either
1498 # to be a remote head after push, node must be either
1500 # - unknown locally
1499 # - unknown locally
1501 # - a local outgoing head descended from update
1500 # - a local outgoing head descended from update
1502 # - a remote head that's known locally and not
1501 # - a remote head that's known locally and not
1503 # ancestral to an outgoing head
1502 # ancestral to an outgoing head
1504
1503
1505 warn = 0
1504 warn = 0
1506
1505
1507 if remote_heads == [nullid]:
1506 if remote_heads == [nullid]:
1508 warn = 0
1507 warn = 0
1509 elif not revs and len(heads) > len(remote_heads):
1508 elif not revs and len(heads) > len(remote_heads):
1510 warn = 1
1509 warn = 1
1511 else:
1510 else:
1512 newheads = list(heads)
1511 newheads = list(heads)
1513 for r in remote_heads:
1512 for r in remote_heads:
1514 if r in self.changelog.nodemap:
1513 if r in self.changelog.nodemap:
1515 desc = self.changelog.heads(r, heads)
1514 desc = self.changelog.heads(r, heads)
1516 l = [h for h in heads if h in desc]
1515 l = [h for h in heads if h in desc]
1517 if not l:
1516 if not l:
1518 newheads.append(r)
1517 newheads.append(r)
1519 else:
1518 else:
1520 newheads.append(r)
1519 newheads.append(r)
1521 if len(newheads) > len(remote_heads):
1520 if len(newheads) > len(remote_heads):
1522 warn = 1
1521 warn = 1
1523
1522
1524 if warn:
1523 if warn:
1525 self.ui.warn(_("abort: push creates new remote heads!\n"))
1524 self.ui.warn(_("abort: push creates new remote heads!\n"))
1526 self.ui.status(_("(did you forget to merge?"
1525 self.ui.status(_("(did you forget to merge?"
1527 " use push -f to force)\n"))
1526 " use push -f to force)\n"))
1528 return None, 0
1527 return None, 0
1529 elif inc:
1528 elif inc:
1530 self.ui.warn(_("note: unsynced remote changes!\n"))
1529 self.ui.warn(_("note: unsynced remote changes!\n"))
1531
1530
1532
1531
1533 if revs is None:
1532 if revs is None:
1534 # use the fast path, no race possible on push
1533 # use the fast path, no race possible on push
1535 cg = self._changegroup(common.keys(), 'push')
1534 cg = self._changegroup(common.keys(), 'push')
1536 else:
1535 else:
1537 cg = self.changegroupsubset(update, revs, 'push')
1536 cg = self.changegroupsubset(update, revs, 'push')
1538 return cg, remote_heads
1537 return cg, remote_heads
1539
1538
1540 def push_addchangegroup(self, remote, force, revs):
1539 def push_addchangegroup(self, remote, force, revs):
1541 lock = remote.lock()
1540 lock = remote.lock()
1542 try:
1541 try:
1543 ret = self.prepush(remote, force, revs)
1542 ret = self.prepush(remote, force, revs)
1544 if ret[0] is not None:
1543 if ret[0] is not None:
1545 cg, remote_heads = ret
1544 cg, remote_heads = ret
1546 return remote.addchangegroup(cg, 'push', self.url())
1545 return remote.addchangegroup(cg, 'push', self.url())
1547 return ret[1]
1546 return ret[1]
1548 finally:
1547 finally:
1549 lock.release()
1548 lock.release()
1550
1549
1551 def push_unbundle(self, remote, force, revs):
1550 def push_unbundle(self, remote, force, revs):
1552 # local repo finds heads on server, finds out what revs it
1551 # local repo finds heads on server, finds out what revs it
1553 # must push. once revs transferred, if server finds it has
1552 # must push. once revs transferred, if server finds it has
1554 # different heads (someone else won commit/push race), server
1553 # different heads (someone else won commit/push race), server
1555 # aborts.
1554 # aborts.
1556
1555
1557 ret = self.prepush(remote, force, revs)
1556 ret = self.prepush(remote, force, revs)
1558 if ret[0] is not None:
1557 if ret[0] is not None:
1559 cg, remote_heads = ret
1558 cg, remote_heads = ret
1560 if force: remote_heads = ['force']
1559 if force: remote_heads = ['force']
1561 return remote.unbundle(cg, remote_heads, 'push')
1560 return remote.unbundle(cg, remote_heads, 'push')
1562 return ret[1]
1561 return ret[1]
1563
1562
1564 def changegroupinfo(self, nodes, source):
1563 def changegroupinfo(self, nodes, source):
1565 if self.ui.verbose or source == 'bundle':
1564 if self.ui.verbose or source == 'bundle':
1566 self.ui.status(_("%d changesets found\n") % len(nodes))
1565 self.ui.status(_("%d changesets found\n") % len(nodes))
1567 if self.ui.debugflag:
1566 if self.ui.debugflag:
1568 self.ui.debug(_("list of changesets:\n"))
1567 self.ui.debug(_("list of changesets:\n"))
1569 for node in nodes:
1568 for node in nodes:
1570 self.ui.debug("%s\n" % hex(node))
1569 self.ui.debug("%s\n" % hex(node))
1571
1570
1572 def changegroupsubset(self, bases, heads, source, extranodes=None):
1571 def changegroupsubset(self, bases, heads, source, extranodes=None):
1573 """This function generates a changegroup consisting of all the nodes
1572 """This function generates a changegroup consisting of all the nodes
1574 that are descendents of any of the bases, and ancestors of any of
1573 that are descendents of any of the bases, and ancestors of any of
1575 the heads.
1574 the heads.
1576
1575
1577 It is fairly complex as determining which filenodes and which
1576 It is fairly complex as determining which filenodes and which
1578 manifest nodes need to be included for the changeset to be complete
1577 manifest nodes need to be included for the changeset to be complete
1579 is non-trivial.
1578 is non-trivial.
1580
1579
1581 Another wrinkle is doing the reverse, figuring out which changeset in
1580 Another wrinkle is doing the reverse, figuring out which changeset in
1582 the changegroup a particular filenode or manifestnode belongs to.
1581 the changegroup a particular filenode or manifestnode belongs to.
1583
1582
1584 The caller can specify some nodes that must be included in the
1583 The caller can specify some nodes that must be included in the
1585 changegroup using the extranodes argument. It should be a dict
1584 changegroup using the extranodes argument. It should be a dict
1586 where the keys are the filenames (or 1 for the manifest), and the
1585 where the keys are the filenames (or 1 for the manifest), and the
1587 values are lists of (node, linknode) tuples, where node is a wanted
1586 values are lists of (node, linknode) tuples, where node is a wanted
1588 node and linknode is the changelog node that should be transmitted as
1587 node and linknode is the changelog node that should be transmitted as
1589 the linkrev.
1588 the linkrev.
1590 """
1589 """
1591
1590
1592 if extranodes is None:
1591 if extranodes is None:
1593 # can we go through the fast path ?
1592 # can we go through the fast path ?
1594 heads.sort()
1593 heads.sort()
1595 allheads = self.heads()
1594 allheads = self.heads()
1596 allheads.sort()
1595 allheads.sort()
1597 if heads == allheads:
1596 if heads == allheads:
1598 common = []
1597 common = []
1599 # parents of bases are known from both sides
1598 # parents of bases are known from both sides
1600 for n in bases:
1599 for n in bases:
1601 for p in self.changelog.parents(n):
1600 for p in self.changelog.parents(n):
1602 if p != nullid:
1601 if p != nullid:
1603 common.append(p)
1602 common.append(p)
1604 return self._changegroup(common, source)
1603 return self._changegroup(common, source)
1605
1604
1606 self.hook('preoutgoing', throw=True, source=source)
1605 self.hook('preoutgoing', throw=True, source=source)
1607
1606
1608 # Set up some initial variables
1607 # Set up some initial variables
1609 # Make it easy to refer to self.changelog
1608 # Make it easy to refer to self.changelog
1610 cl = self.changelog
1609 cl = self.changelog
1611 # msng is short for missing - compute the list of changesets in this
1610 # msng is short for missing - compute the list of changesets in this
1612 # changegroup.
1611 # changegroup.
1613 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1612 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1614 self.changegroupinfo(msng_cl_lst, source)
1613 self.changegroupinfo(msng_cl_lst, source)
1615 # Some bases may turn out to be superfluous, and some heads may be
1614 # Some bases may turn out to be superfluous, and some heads may be
1616 # too. nodesbetween will return the minimal set of bases and heads
1615 # too. nodesbetween will return the minimal set of bases and heads
1617 # necessary to re-create the changegroup.
1616 # necessary to re-create the changegroup.
1618
1617
1619 # Known heads are the list of heads that it is assumed the recipient
1618 # Known heads are the list of heads that it is assumed the recipient
1620 # of this changegroup will know about.
1619 # of this changegroup will know about.
1621 knownheads = {}
1620 knownheads = {}
1622 # We assume that all parents of bases are known heads.
1621 # We assume that all parents of bases are known heads.
1623 for n in bases:
1622 for n in bases:
1624 for p in cl.parents(n):
1623 for p in cl.parents(n):
1625 if p != nullid:
1624 if p != nullid:
1626 knownheads[p] = 1
1625 knownheads[p] = 1
1627 knownheads = knownheads.keys()
1626 knownheads = knownheads.keys()
1628 if knownheads:
1627 if knownheads:
1629 # Now that we know what heads are known, we can compute which
1628 # Now that we know what heads are known, we can compute which
1630 # changesets are known. The recipient must know about all
1629 # changesets are known. The recipient must know about all
1631 # changesets required to reach the known heads from the null
1630 # changesets required to reach the known heads from the null
1632 # changeset.
1631 # changeset.
1633 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1632 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1634 junk = None
1633 junk = None
1635 # Transform the list into a set.
1634 # Transform the list into a set.
1636 has_cl_set = set(has_cl_set)
1635 has_cl_set = set(has_cl_set)
1637 else:
1636 else:
1638 # If there were no known heads, the recipient cannot be assumed to
1637 # If there were no known heads, the recipient cannot be assumed to
1639 # know about any changesets.
1638 # know about any changesets.
1640 has_cl_set = set()
1639 has_cl_set = set()
1641
1640
1642 # Make it easy to refer to self.manifest
1641 # Make it easy to refer to self.manifest
1643 mnfst = self.manifest
1642 mnfst = self.manifest
1644 # We don't know which manifests are missing yet
1643 # We don't know which manifests are missing yet
1645 msng_mnfst_set = {}
1644 msng_mnfst_set = {}
1646 # Nor do we know which filenodes are missing.
1645 # Nor do we know which filenodes are missing.
1647 msng_filenode_set = {}
1646 msng_filenode_set = {}
1648
1647
1649 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1648 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1650 junk = None
1649 junk = None
1651
1650
1652 # A changeset always belongs to itself, so the changenode lookup
1651 # A changeset always belongs to itself, so the changenode lookup
1653 # function for a changenode is identity.
1652 # function for a changenode is identity.
1654 def identity(x):
1653 def identity(x):
1655 return x
1654 return x
1656
1655
1657 # A function generating function. Sets up an environment for the
1656 # A function generating function. Sets up an environment for the
1658 # inner function.
1657 # inner function.
1659 def cmp_by_rev_func(revlog):
1658 def cmp_by_rev_func(revlog):
1660 # Compare two nodes by their revision number in the environment's
1659 # Compare two nodes by their revision number in the environment's
1661 # revision history. Since the revision number both represents the
1660 # revision history. Since the revision number both represents the
1662 # most efficient order to read the nodes in, and represents a
1661 # most efficient order to read the nodes in, and represents a
1663 # topological sorting of the nodes, this function is often useful.
1662 # topological sorting of the nodes, this function is often useful.
1664 def cmp_by_rev(a, b):
1663 def cmp_by_rev(a, b):
1665 return cmp(revlog.rev(a), revlog.rev(b))
1664 return cmp(revlog.rev(a), revlog.rev(b))
1666 return cmp_by_rev
1665 return cmp_by_rev
1667
1666
1668 # If we determine that a particular file or manifest node must be a
1667 # If we determine that a particular file or manifest node must be a
1669 # node that the recipient of the changegroup will already have, we can
1668 # node that the recipient of the changegroup will already have, we can
1670 # also assume the recipient will have all the parents. This function
1669 # also assume the recipient will have all the parents. This function
1671 # prunes them from the set of missing nodes.
1670 # prunes them from the set of missing nodes.
1672 def prune_parents(revlog, hasset, msngset):
1671 def prune_parents(revlog, hasset, msngset):
1673 haslst = hasset.keys()
1672 haslst = hasset.keys()
1674 haslst.sort(cmp_by_rev_func(revlog))
1673 haslst.sort(cmp_by_rev_func(revlog))
1675 for node in haslst:
1674 for node in haslst:
1676 parentlst = [p for p in revlog.parents(node) if p != nullid]
1675 parentlst = [p for p in revlog.parents(node) if p != nullid]
1677 while parentlst:
1676 while parentlst:
1678 n = parentlst.pop()
1677 n = parentlst.pop()
1679 if n not in hasset:
1678 if n not in hasset:
1680 hasset[n] = 1
1679 hasset[n] = 1
1681 p = [p for p in revlog.parents(n) if p != nullid]
1680 p = [p for p in revlog.parents(n) if p != nullid]
1682 parentlst.extend(p)
1681 parentlst.extend(p)
1683 for n in hasset:
1682 for n in hasset:
1684 msngset.pop(n, None)
1683 msngset.pop(n, None)
1685
1684
1686 # This is a function generating function used to set up an environment
1685 # This is a function generating function used to set up an environment
1687 # for the inner function to execute in.
1686 # for the inner function to execute in.
1688 def manifest_and_file_collector(changedfileset):
1687 def manifest_and_file_collector(changedfileset):
1689 # This is an information gathering function that gathers
1688 # This is an information gathering function that gathers
1690 # information from each changeset node that goes out as part of
1689 # information from each changeset node that goes out as part of
1691 # the changegroup. The information gathered is a list of which
1690 # the changegroup. The information gathered is a list of which
1692 # manifest nodes are potentially required (the recipient may
1691 # manifest nodes are potentially required (the recipient may
1693 # already have them) and total list of all files which were
1692 # already have them) and total list of all files which were
1694 # changed in any changeset in the changegroup.
1693 # changed in any changeset in the changegroup.
1695 #
1694 #
1696 # We also remember the first changenode we saw any manifest
1695 # We also remember the first changenode we saw any manifest
1697 # referenced by so we can later determine which changenode 'owns'
1696 # referenced by so we can later determine which changenode 'owns'
1698 # the manifest.
1697 # the manifest.
1699 def collect_manifests_and_files(clnode):
1698 def collect_manifests_and_files(clnode):
1700 c = cl.read(clnode)
1699 c = cl.read(clnode)
1701 for f in c[3]:
1700 for f in c[3]:
1702 # This is to make sure we only have one instance of each
1701 # This is to make sure we only have one instance of each
1703 # filename string for each filename.
1702 # filename string for each filename.
1704 changedfileset.setdefault(f, f)
1703 changedfileset.setdefault(f, f)
1705 msng_mnfst_set.setdefault(c[0], clnode)
1704 msng_mnfst_set.setdefault(c[0], clnode)
1706 return collect_manifests_and_files
1705 return collect_manifests_and_files
1707
1706
1708 # Figure out which manifest nodes (of the ones we think might be part
1707 # Figure out which manifest nodes (of the ones we think might be part
1709 # of the changegroup) the recipient must know about and remove them
1708 # of the changegroup) the recipient must know about and remove them
1710 # from the changegroup.
1709 # from the changegroup.
1711 def prune_manifests():
1710 def prune_manifests():
1712 has_mnfst_set = {}
1711 has_mnfst_set = {}
1713 for n in msng_mnfst_set:
1712 for n in msng_mnfst_set:
1714 # If a 'missing' manifest thinks it belongs to a changenode
1713 # If a 'missing' manifest thinks it belongs to a changenode
1715 # the recipient is assumed to have, obviously the recipient
1714 # the recipient is assumed to have, obviously the recipient
1716 # must have that manifest.
1715 # must have that manifest.
1717 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1716 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1718 if linknode in has_cl_set:
1717 if linknode in has_cl_set:
1719 has_mnfst_set[n] = 1
1718 has_mnfst_set[n] = 1
1720 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1719 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1721
1720
1722 # Use the information collected in collect_manifests_and_files to say
1721 # Use the information collected in collect_manifests_and_files to say
1723 # which changenode any manifestnode belongs to.
1722 # which changenode any manifestnode belongs to.
1724 def lookup_manifest_link(mnfstnode):
1723 def lookup_manifest_link(mnfstnode):
1725 return msng_mnfst_set[mnfstnode]
1724 return msng_mnfst_set[mnfstnode]
1726
1725
1727 # A function generating function that sets up the initial environment
1726 # A function generating function that sets up the initial environment
1728 # the inner function.
1727 # the inner function.
1729 def filenode_collector(changedfiles):
1728 def filenode_collector(changedfiles):
1730 next_rev = [0]
1729 next_rev = [0]
1731 # This gathers information from each manifestnode included in the
1730 # This gathers information from each manifestnode included in the
1732 # changegroup about which filenodes the manifest node references
1731 # changegroup about which filenodes the manifest node references
1733 # so we can include those in the changegroup too.
1732 # so we can include those in the changegroup too.
1734 #
1733 #
1735 # It also remembers which changenode each filenode belongs to. It
1734 # It also remembers which changenode each filenode belongs to. It
1736 # does this by assuming the a filenode belongs to the changenode
1735 # does this by assuming the a filenode belongs to the changenode
1737 # the first manifest that references it belongs to.
1736 # the first manifest that references it belongs to.
1738 def collect_msng_filenodes(mnfstnode):
1737 def collect_msng_filenodes(mnfstnode):
1739 r = mnfst.rev(mnfstnode)
1738 r = mnfst.rev(mnfstnode)
1740 if r == next_rev[0]:
1739 if r == next_rev[0]:
1741 # If the last rev we looked at was the one just previous,
1740 # If the last rev we looked at was the one just previous,
1742 # we only need to see a diff.
1741 # we only need to see a diff.
1743 deltamf = mnfst.readdelta(mnfstnode)
1742 deltamf = mnfst.readdelta(mnfstnode)
1744 # For each line in the delta
1743 # For each line in the delta
1745 for f, fnode in deltamf.iteritems():
1744 for f, fnode in deltamf.iteritems():
1746 f = changedfiles.get(f, None)
1745 f = changedfiles.get(f, None)
1747 # And if the file is in the list of files we care
1746 # And if the file is in the list of files we care
1748 # about.
1747 # about.
1749 if f is not None:
1748 if f is not None:
1750 # Get the changenode this manifest belongs to
1749 # Get the changenode this manifest belongs to
1751 clnode = msng_mnfst_set[mnfstnode]
1750 clnode = msng_mnfst_set[mnfstnode]
1752 # Create the set of filenodes for the file if
1751 # Create the set of filenodes for the file if
1753 # there isn't one already.
1752 # there isn't one already.
1754 ndset = msng_filenode_set.setdefault(f, {})
1753 ndset = msng_filenode_set.setdefault(f, {})
1755 # And set the filenode's changelog node to the
1754 # And set the filenode's changelog node to the
1756 # manifest's if it hasn't been set already.
1755 # manifest's if it hasn't been set already.
1757 ndset.setdefault(fnode, clnode)
1756 ndset.setdefault(fnode, clnode)
1758 else:
1757 else:
1759 # Otherwise we need a full manifest.
1758 # Otherwise we need a full manifest.
1760 m = mnfst.read(mnfstnode)
1759 m = mnfst.read(mnfstnode)
1761 # For every file in we care about.
1760 # For every file in we care about.
1762 for f in changedfiles:
1761 for f in changedfiles:
1763 fnode = m.get(f, None)
1762 fnode = m.get(f, None)
1764 # If it's in the manifest
1763 # If it's in the manifest
1765 if fnode is not None:
1764 if fnode is not None:
1766 # See comments above.
1765 # See comments above.
1767 clnode = msng_mnfst_set[mnfstnode]
1766 clnode = msng_mnfst_set[mnfstnode]
1768 ndset = msng_filenode_set.setdefault(f, {})
1767 ndset = msng_filenode_set.setdefault(f, {})
1769 ndset.setdefault(fnode, clnode)
1768 ndset.setdefault(fnode, clnode)
1770 # Remember the revision we hope to see next.
1769 # Remember the revision we hope to see next.
1771 next_rev[0] = r + 1
1770 next_rev[0] = r + 1
1772 return collect_msng_filenodes
1771 return collect_msng_filenodes
1773
1772
1774 # We have a list of filenodes we think we need for a file, lets remove
1773 # We have a list of filenodes we think we need for a file, lets remove
1775 # all those we know the recipient must have.
1774 # all those we know the recipient must have.
1776 def prune_filenodes(f, filerevlog):
1775 def prune_filenodes(f, filerevlog):
1777 msngset = msng_filenode_set[f]
1776 msngset = msng_filenode_set[f]
1778 hasset = {}
1777 hasset = {}
1779 # If a 'missing' filenode thinks it belongs to a changenode we
1778 # If a 'missing' filenode thinks it belongs to a changenode we
1780 # assume the recipient must have, then the recipient must have
1779 # assume the recipient must have, then the recipient must have
1781 # that filenode.
1780 # that filenode.
1782 for n in msngset:
1781 for n in msngset:
1783 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1782 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1784 if clnode in has_cl_set:
1783 if clnode in has_cl_set:
1785 hasset[n] = 1
1784 hasset[n] = 1
1786 prune_parents(filerevlog, hasset, msngset)
1785 prune_parents(filerevlog, hasset, msngset)
1787
1786
1788 # A function generator function that sets up the a context for the
1787 # A function generator function that sets up the a context for the
1789 # inner function.
1788 # inner function.
1790 def lookup_filenode_link_func(fname):
1789 def lookup_filenode_link_func(fname):
1791 msngset = msng_filenode_set[fname]
1790 msngset = msng_filenode_set[fname]
1792 # Lookup the changenode the filenode belongs to.
1791 # Lookup the changenode the filenode belongs to.
1793 def lookup_filenode_link(fnode):
1792 def lookup_filenode_link(fnode):
1794 return msngset[fnode]
1793 return msngset[fnode]
1795 return lookup_filenode_link
1794 return lookup_filenode_link
1796
1795
1797 # Add the nodes that were explicitly requested.
1796 # Add the nodes that were explicitly requested.
1798 def add_extra_nodes(name, nodes):
1797 def add_extra_nodes(name, nodes):
1799 if not extranodes or name not in extranodes:
1798 if not extranodes or name not in extranodes:
1800 return
1799 return
1801
1800
1802 for node, linknode in extranodes[name]:
1801 for node, linknode in extranodes[name]:
1803 if node not in nodes:
1802 if node not in nodes:
1804 nodes[node] = linknode
1803 nodes[node] = linknode
1805
1804
1806 # Now that we have all theses utility functions to help out and
1805 # Now that we have all theses utility functions to help out and
1807 # logically divide up the task, generate the group.
1806 # logically divide up the task, generate the group.
1808 def gengroup():
1807 def gengroup():
1809 # The set of changed files starts empty.
1808 # The set of changed files starts empty.
1810 changedfiles = {}
1809 changedfiles = {}
1811 # Create a changenode group generator that will call our functions
1810 # Create a changenode group generator that will call our functions
1812 # back to lookup the owning changenode and collect information.
1811 # back to lookup the owning changenode and collect information.
1813 group = cl.group(msng_cl_lst, identity,
1812 group = cl.group(msng_cl_lst, identity,
1814 manifest_and_file_collector(changedfiles))
1813 manifest_and_file_collector(changedfiles))
1815 for chnk in group:
1814 for chnk in group:
1816 yield chnk
1815 yield chnk
1817
1816
1818 # The list of manifests has been collected by the generator
1817 # The list of manifests has been collected by the generator
1819 # calling our functions back.
1818 # calling our functions back.
1820 prune_manifests()
1819 prune_manifests()
1821 add_extra_nodes(1, msng_mnfst_set)
1820 add_extra_nodes(1, msng_mnfst_set)
1822 msng_mnfst_lst = msng_mnfst_set.keys()
1821 msng_mnfst_lst = msng_mnfst_set.keys()
1823 # Sort the manifestnodes by revision number.
1822 # Sort the manifestnodes by revision number.
1824 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1823 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1825 # Create a generator for the manifestnodes that calls our lookup
1824 # Create a generator for the manifestnodes that calls our lookup
1826 # and data collection functions back.
1825 # and data collection functions back.
1827 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1826 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1828 filenode_collector(changedfiles))
1827 filenode_collector(changedfiles))
1829 for chnk in group:
1828 for chnk in group:
1830 yield chnk
1829 yield chnk
1831
1830
1832 # These are no longer needed, dereference and toss the memory for
1831 # These are no longer needed, dereference and toss the memory for
1833 # them.
1832 # them.
1834 msng_mnfst_lst = None
1833 msng_mnfst_lst = None
1835 msng_mnfst_set.clear()
1834 msng_mnfst_set.clear()
1836
1835
1837 if extranodes:
1836 if extranodes:
1838 for fname in extranodes:
1837 for fname in extranodes:
1839 if isinstance(fname, int):
1838 if isinstance(fname, int):
1840 continue
1839 continue
1841 msng_filenode_set.setdefault(fname, {})
1840 msng_filenode_set.setdefault(fname, {})
1842 changedfiles[fname] = 1
1841 changedfiles[fname] = 1
1843 # Go through all our files in order sorted by name.
1842 # Go through all our files in order sorted by name.
1844 for fname in sorted(changedfiles):
1843 for fname in sorted(changedfiles):
1845 filerevlog = self.file(fname)
1844 filerevlog = self.file(fname)
1846 if not len(filerevlog):
1845 if not len(filerevlog):
1847 raise util.Abort(_("empty or missing revlog for %s") % fname)
1846 raise util.Abort(_("empty or missing revlog for %s") % fname)
1848 # Toss out the filenodes that the recipient isn't really
1847 # Toss out the filenodes that the recipient isn't really
1849 # missing.
1848 # missing.
1850 if fname in msng_filenode_set:
1849 if fname in msng_filenode_set:
1851 prune_filenodes(fname, filerevlog)
1850 prune_filenodes(fname, filerevlog)
1852 add_extra_nodes(fname, msng_filenode_set[fname])
1851 add_extra_nodes(fname, msng_filenode_set[fname])
1853 msng_filenode_lst = msng_filenode_set[fname].keys()
1852 msng_filenode_lst = msng_filenode_set[fname].keys()
1854 else:
1853 else:
1855 msng_filenode_lst = []
1854 msng_filenode_lst = []
1856 # If any filenodes are left, generate the group for them,
1855 # If any filenodes are left, generate the group for them,
1857 # otherwise don't bother.
1856 # otherwise don't bother.
1858 if len(msng_filenode_lst) > 0:
1857 if len(msng_filenode_lst) > 0:
1859 yield changegroup.chunkheader(len(fname))
1858 yield changegroup.chunkheader(len(fname))
1860 yield fname
1859 yield fname
1861 # Sort the filenodes by their revision #
1860 # Sort the filenodes by their revision #
1862 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1861 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1863 # Create a group generator and only pass in a changenode
1862 # Create a group generator and only pass in a changenode
1864 # lookup function as we need to collect no information
1863 # lookup function as we need to collect no information
1865 # from filenodes.
1864 # from filenodes.
1866 group = filerevlog.group(msng_filenode_lst,
1865 group = filerevlog.group(msng_filenode_lst,
1867 lookup_filenode_link_func(fname))
1866 lookup_filenode_link_func(fname))
1868 for chnk in group:
1867 for chnk in group:
1869 yield chnk
1868 yield chnk
1870 if fname in msng_filenode_set:
1869 if fname in msng_filenode_set:
1871 # Don't need this anymore, toss it to free memory.
1870 # Don't need this anymore, toss it to free memory.
1872 del msng_filenode_set[fname]
1871 del msng_filenode_set[fname]
1873 # Signal that no more groups are left.
1872 # Signal that no more groups are left.
1874 yield changegroup.closechunk()
1873 yield changegroup.closechunk()
1875
1874
1876 if msng_cl_lst:
1875 if msng_cl_lst:
1877 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1876 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1878
1877
1879 return util.chunkbuffer(gengroup())
1878 return util.chunkbuffer(gengroup())
1880
1879
1881 def changegroup(self, basenodes, source):
1880 def changegroup(self, basenodes, source):
1882 # to avoid a race we use changegroupsubset() (issue1320)
1881 # to avoid a race we use changegroupsubset() (issue1320)
1883 return self.changegroupsubset(basenodes, self.heads(), source)
1882 return self.changegroupsubset(basenodes, self.heads(), source)
1884
1883
1885 def _changegroup(self, common, source):
1884 def _changegroup(self, common, source):
1886 """Generate a changegroup of all nodes that we have that a recipient
1885 """Generate a changegroup of all nodes that we have that a recipient
1887 doesn't.
1886 doesn't.
1888
1887
1889 This is much easier than the previous function as we can assume that
1888 This is much easier than the previous function as we can assume that
1890 the recipient has any changenode we aren't sending them.
1889 the recipient has any changenode we aren't sending them.
1891
1890
1892 common is the set of common nodes between remote and self"""
1891 common is the set of common nodes between remote and self"""
1893
1892
1894 self.hook('preoutgoing', throw=True, source=source)
1893 self.hook('preoutgoing', throw=True, source=source)
1895
1894
1896 cl = self.changelog
1895 cl = self.changelog
1897 nodes = cl.findmissing(common)
1896 nodes = cl.findmissing(common)
1898 revset = set([cl.rev(n) for n in nodes])
1897 revset = set([cl.rev(n) for n in nodes])
1899 self.changegroupinfo(nodes, source)
1898 self.changegroupinfo(nodes, source)
1900
1899
1901 def identity(x):
1900 def identity(x):
1902 return x
1901 return x
1903
1902
1904 def gennodelst(log):
1903 def gennodelst(log):
1905 for r in log:
1904 for r in log:
1906 if log.linkrev(r) in revset:
1905 if log.linkrev(r) in revset:
1907 yield log.node(r)
1906 yield log.node(r)
1908
1907
1909 def changed_file_collector(changedfileset):
1908 def changed_file_collector(changedfileset):
1910 def collect_changed_files(clnode):
1909 def collect_changed_files(clnode):
1911 c = cl.read(clnode)
1910 c = cl.read(clnode)
1912 for fname in c[3]:
1911 for fname in c[3]:
1913 changedfileset[fname] = 1
1912 changedfileset[fname] = 1
1914 return collect_changed_files
1913 return collect_changed_files
1915
1914
1916 def lookuprevlink_func(revlog):
1915 def lookuprevlink_func(revlog):
1917 def lookuprevlink(n):
1916 def lookuprevlink(n):
1918 return cl.node(revlog.linkrev(revlog.rev(n)))
1917 return cl.node(revlog.linkrev(revlog.rev(n)))
1919 return lookuprevlink
1918 return lookuprevlink
1920
1919
1921 def gengroup():
1920 def gengroup():
1922 # construct a list of all changed files
1921 # construct a list of all changed files
1923 changedfiles = {}
1922 changedfiles = {}
1924
1923
1925 for chnk in cl.group(nodes, identity,
1924 for chnk in cl.group(nodes, identity,
1926 changed_file_collector(changedfiles)):
1925 changed_file_collector(changedfiles)):
1927 yield chnk
1926 yield chnk
1928
1927
1929 mnfst = self.manifest
1928 mnfst = self.manifest
1930 nodeiter = gennodelst(mnfst)
1929 nodeiter = gennodelst(mnfst)
1931 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1930 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1932 yield chnk
1931 yield chnk
1933
1932
1934 for fname in sorted(changedfiles):
1933 for fname in sorted(changedfiles):
1935 filerevlog = self.file(fname)
1934 filerevlog = self.file(fname)
1936 if not len(filerevlog):
1935 if not len(filerevlog):
1937 raise util.Abort(_("empty or missing revlog for %s") % fname)
1936 raise util.Abort(_("empty or missing revlog for %s") % fname)
1938 nodeiter = gennodelst(filerevlog)
1937 nodeiter = gennodelst(filerevlog)
1939 nodeiter = list(nodeiter)
1938 nodeiter = list(nodeiter)
1940 if nodeiter:
1939 if nodeiter:
1941 yield changegroup.chunkheader(len(fname))
1940 yield changegroup.chunkheader(len(fname))
1942 yield fname
1941 yield fname
1943 lookup = lookuprevlink_func(filerevlog)
1942 lookup = lookuprevlink_func(filerevlog)
1944 for chnk in filerevlog.group(nodeiter, lookup):
1943 for chnk in filerevlog.group(nodeiter, lookup):
1945 yield chnk
1944 yield chnk
1946
1945
1947 yield changegroup.closechunk()
1946 yield changegroup.closechunk()
1948
1947
1949 if nodes:
1948 if nodes:
1950 self.hook('outgoing', node=hex(nodes[0]), source=source)
1949 self.hook('outgoing', node=hex(nodes[0]), source=source)
1951
1950
1952 return util.chunkbuffer(gengroup())
1951 return util.chunkbuffer(gengroup())
1953
1952
1954 def addchangegroup(self, source, srctype, url, emptyok=False):
1953 def addchangegroup(self, source, srctype, url, emptyok=False):
1955 """add changegroup to repo.
1954 """add changegroup to repo.
1956
1955
1957 return values:
1956 return values:
1958 - nothing changed or no source: 0
1957 - nothing changed or no source: 0
1959 - more heads than before: 1+added heads (2..n)
1958 - more heads than before: 1+added heads (2..n)
1960 - less heads than before: -1-removed heads (-2..-n)
1959 - less heads than before: -1-removed heads (-2..-n)
1961 - number of heads stays the same: 1
1960 - number of heads stays the same: 1
1962 """
1961 """
1963 def csmap(x):
1962 def csmap(x):
1964 self.ui.debug(_("add changeset %s\n") % short(x))
1963 self.ui.debug(_("add changeset %s\n") % short(x))
1965 return len(cl)
1964 return len(cl)
1966
1965
1967 def revmap(x):
1966 def revmap(x):
1968 return cl.rev(x)
1967 return cl.rev(x)
1969
1968
1970 if not source:
1969 if not source:
1971 return 0
1970 return 0
1972
1971
1973 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1972 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1974
1973
1975 changesets = files = revisions = 0
1974 changesets = files = revisions = 0
1976
1975
1977 # write changelog data to temp files so concurrent readers will not see
1976 # write changelog data to temp files so concurrent readers will not see
1978 # inconsistent view
1977 # inconsistent view
1979 cl = self.changelog
1978 cl = self.changelog
1980 cl.delayupdate()
1979 cl.delayupdate()
1981 oldheads = len(cl.heads())
1980 oldheads = len(cl.heads())
1982
1981
1983 tr = self.transaction()
1982 tr = self.transaction()
1984 try:
1983 try:
1985 trp = weakref.proxy(tr)
1984 trp = weakref.proxy(tr)
1986 # pull off the changeset group
1985 # pull off the changeset group
1987 self.ui.status(_("adding changesets\n"))
1986 self.ui.status(_("adding changesets\n"))
1988 clstart = len(cl)
1987 clstart = len(cl)
1989 chunkiter = changegroup.chunkiter(source)
1988 chunkiter = changegroup.chunkiter(source)
1990 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1989 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1991 raise util.Abort(_("received changelog group is empty"))
1990 raise util.Abort(_("received changelog group is empty"))
1992 clend = len(cl)
1991 clend = len(cl)
1993 changesets = clend - clstart
1992 changesets = clend - clstart
1994
1993
1995 # pull off the manifest group
1994 # pull off the manifest group
1996 self.ui.status(_("adding manifests\n"))
1995 self.ui.status(_("adding manifests\n"))
1997 chunkiter = changegroup.chunkiter(source)
1996 chunkiter = changegroup.chunkiter(source)
1998 # no need to check for empty manifest group here:
1997 # no need to check for empty manifest group here:
1999 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1998 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2000 # no new manifest will be created and the manifest group will
1999 # no new manifest will be created and the manifest group will
2001 # be empty during the pull
2000 # be empty during the pull
2002 self.manifest.addgroup(chunkiter, revmap, trp)
2001 self.manifest.addgroup(chunkiter, revmap, trp)
2003
2002
2004 # process the files
2003 # process the files
2005 self.ui.status(_("adding file changes\n"))
2004 self.ui.status(_("adding file changes\n"))
2006 while 1:
2005 while 1:
2007 f = changegroup.getchunk(source)
2006 f = changegroup.getchunk(source)
2008 if not f:
2007 if not f:
2009 break
2008 break
2010 self.ui.debug(_("adding %s revisions\n") % f)
2009 self.ui.debug(_("adding %s revisions\n") % f)
2011 fl = self.file(f)
2010 fl = self.file(f)
2012 o = len(fl)
2011 o = len(fl)
2013 chunkiter = changegroup.chunkiter(source)
2012 chunkiter = changegroup.chunkiter(source)
2014 if fl.addgroup(chunkiter, revmap, trp) is None:
2013 if fl.addgroup(chunkiter, revmap, trp) is None:
2015 raise util.Abort(_("received file revlog group is empty"))
2014 raise util.Abort(_("received file revlog group is empty"))
2016 revisions += len(fl) - o
2015 revisions += len(fl) - o
2017 files += 1
2016 files += 1
2018
2017
2019 newheads = len(cl.heads())
2018 newheads = len(cl.heads())
2020 heads = ""
2019 heads = ""
2021 if oldheads and newheads != oldheads:
2020 if oldheads and newheads != oldheads:
2022 heads = _(" (%+d heads)") % (newheads - oldheads)
2021 heads = _(" (%+d heads)") % (newheads - oldheads)
2023
2022
2024 self.ui.status(_("added %d changesets"
2023 self.ui.status(_("added %d changesets"
2025 " with %d changes to %d files%s\n")
2024 " with %d changes to %d files%s\n")
2026 % (changesets, revisions, files, heads))
2025 % (changesets, revisions, files, heads))
2027
2026
2028 if changesets > 0:
2027 if changesets > 0:
2029 p = lambda: cl.writepending() and self.root or ""
2028 p = lambda: cl.writepending() and self.root or ""
2030 self.hook('pretxnchangegroup', throw=True,
2029 self.hook('pretxnchangegroup', throw=True,
2031 node=hex(cl.node(clstart)), source=srctype,
2030 node=hex(cl.node(clstart)), source=srctype,
2032 url=url, pending=p)
2031 url=url, pending=p)
2033
2032
2034 # make changelog see real files again
2033 # make changelog see real files again
2035 cl.finalize(trp)
2034 cl.finalize(trp)
2036
2035
2037 tr.close()
2036 tr.close()
2038 finally:
2037 finally:
2039 del tr
2038 del tr
2040
2039
2041 if changesets > 0:
2040 if changesets > 0:
2042 # forcefully update the on-disk branch cache
2041 # forcefully update the on-disk branch cache
2043 self.ui.debug(_("updating the branch cache\n"))
2042 self.ui.debug(_("updating the branch cache\n"))
2044 self.branchtags()
2043 self.branchtags()
2045 self.hook("changegroup", node=hex(cl.node(clstart)),
2044 self.hook("changegroup", node=hex(cl.node(clstart)),
2046 source=srctype, url=url)
2045 source=srctype, url=url)
2047
2046
2048 for i in xrange(clstart, clend):
2047 for i in xrange(clstart, clend):
2049 self.hook("incoming", node=hex(cl.node(i)),
2048 self.hook("incoming", node=hex(cl.node(i)),
2050 source=srctype, url=url)
2049 source=srctype, url=url)
2051
2050
2052 # never return 0 here:
2051 # never return 0 here:
2053 if newheads < oldheads:
2052 if newheads < oldheads:
2054 return newheads - oldheads - 1
2053 return newheads - oldheads - 1
2055 else:
2054 else:
2056 return newheads - oldheads + 1
2055 return newheads - oldheads + 1
2057
2056
2058
2057
2059 def stream_in(self, remote):
2058 def stream_in(self, remote):
2060 fp = remote.stream_out()
2059 fp = remote.stream_out()
2061 l = fp.readline()
2060 l = fp.readline()
2062 try:
2061 try:
2063 resp = int(l)
2062 resp = int(l)
2064 except ValueError:
2063 except ValueError:
2065 raise error.ResponseError(
2064 raise error.ResponseError(
2066 _('Unexpected response from remote server:'), l)
2065 _('Unexpected response from remote server:'), l)
2067 if resp == 1:
2066 if resp == 1:
2068 raise util.Abort(_('operation forbidden by server'))
2067 raise util.Abort(_('operation forbidden by server'))
2069 elif resp == 2:
2068 elif resp == 2:
2070 raise util.Abort(_('locking the remote repository failed'))
2069 raise util.Abort(_('locking the remote repository failed'))
2071 elif resp != 0:
2070 elif resp != 0:
2072 raise util.Abort(_('the server sent an unknown error code'))
2071 raise util.Abort(_('the server sent an unknown error code'))
2073 self.ui.status(_('streaming all changes\n'))
2072 self.ui.status(_('streaming all changes\n'))
2074 l = fp.readline()
2073 l = fp.readline()
2075 try:
2074 try:
2076 total_files, total_bytes = map(int, l.split(' ', 1))
2075 total_files, total_bytes = map(int, l.split(' ', 1))
2077 except (ValueError, TypeError):
2076 except (ValueError, TypeError):
2078 raise error.ResponseError(
2077 raise error.ResponseError(
2079 _('Unexpected response from remote server:'), l)
2078 _('Unexpected response from remote server:'), l)
2080 self.ui.status(_('%d files to transfer, %s of data\n') %
2079 self.ui.status(_('%d files to transfer, %s of data\n') %
2081 (total_files, util.bytecount(total_bytes)))
2080 (total_files, util.bytecount(total_bytes)))
2082 start = time.time()
2081 start = time.time()
2083 for i in xrange(total_files):
2082 for i in xrange(total_files):
2084 # XXX doesn't support '\n' or '\r' in filenames
2083 # XXX doesn't support '\n' or '\r' in filenames
2085 l = fp.readline()
2084 l = fp.readline()
2086 try:
2085 try:
2087 name, size = l.split('\0', 1)
2086 name, size = l.split('\0', 1)
2088 size = int(size)
2087 size = int(size)
2089 except (ValueError, TypeError):
2088 except (ValueError, TypeError):
2090 raise error.ResponseError(
2089 raise error.ResponseError(
2091 _('Unexpected response from remote server:'), l)
2090 _('Unexpected response from remote server:'), l)
2092 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2091 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2093 ofp = self.sopener(name, 'w')
2092 ofp = self.sopener(name, 'w')
2094 for chunk in util.filechunkiter(fp, limit=size):
2093 for chunk in util.filechunkiter(fp, limit=size):
2095 ofp.write(chunk)
2094 ofp.write(chunk)
2096 ofp.close()
2095 ofp.close()
2097 elapsed = time.time() - start
2096 elapsed = time.time() - start
2098 if elapsed <= 0:
2097 if elapsed <= 0:
2099 elapsed = 0.001
2098 elapsed = 0.001
2100 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2099 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2101 (util.bytecount(total_bytes), elapsed,
2100 (util.bytecount(total_bytes), elapsed,
2102 util.bytecount(total_bytes / elapsed)))
2101 util.bytecount(total_bytes / elapsed)))
2103 self.invalidate()
2102 self.invalidate()
2104 return len(self.heads()) + 1
2103 return len(self.heads()) + 1
2105
2104
2106 def clone(self, remote, heads=[], stream=False):
2105 def clone(self, remote, heads=[], stream=False):
2107 '''clone remote repository.
2106 '''clone remote repository.
2108
2107
2109 keyword arguments:
2108 keyword arguments:
2110 heads: list of revs to clone (forces use of pull)
2109 heads: list of revs to clone (forces use of pull)
2111 stream: use streaming clone if possible'''
2110 stream: use streaming clone if possible'''
2112
2111
2113 # now, all clients that can request uncompressed clones can
2112 # now, all clients that can request uncompressed clones can
2114 # read repo formats supported by all servers that can serve
2113 # read repo formats supported by all servers that can serve
2115 # them.
2114 # them.
2116
2115
2117 # if revlog format changes, client will have to check version
2116 # if revlog format changes, client will have to check version
2118 # and format flags on "stream" capability, and use
2117 # and format flags on "stream" capability, and use
2119 # uncompressed only if compatible.
2118 # uncompressed only if compatible.
2120
2119
2121 if stream and not heads and remote.capable('stream'):
2120 if stream and not heads and remote.capable('stream'):
2122 return self.stream_in(remote)
2121 return self.stream_in(remote)
2123 return self.pull(remote, heads)
2122 return self.pull(remote, heads)
2124
2123
2125 # used to avoid circular references so destructors work
2124 # used to avoid circular references so destructors work
2126 def aftertrans(files):
2125 def aftertrans(files):
2127 renamefiles = [tuple(t) for t in files]
2126 renamefiles = [tuple(t) for t in files]
2128 def a():
2127 def a():
2129 for src, dest in renamefiles:
2128 for src, dest in renamefiles:
2130 util.rename(src, dest)
2129 util.rename(src, dest)
2131 return a
2130 return a
2132
2131
2133 def instance(ui, path, create):
2132 def instance(ui, path, create):
2134 return localrepository(ui, util.drop_scheme('file', path), create)
2133 return localrepository(ui, util.drop_scheme('file', path), create)
2135
2134
2136 def islocal(path):
2135 def islocal(path):
2137 return True
2136 return True
General Comments 0
You need to be logged in to leave comments. Login now