##// END OF EJS Templates
rawcommit: add removed files to the changelog file list...
Alexis S. L. Carvalho -
r3377:9fe62e2d default
parent child Browse files
Show More
@@ -1,1760 +1,1763 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.nodetagscache = None
82 self.nodetagscache = None
83 self.encodepats = None
83 self.encodepats = None
84 self.decodepats = None
84 self.decodepats = None
85 self.transhandle = None
85 self.transhandle = None
86
86
87 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
87 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88
88
89 def url(self):
89 def url(self):
90 return 'file:' + self.root
90 return 'file:' + self.root
91
91
92 def hook(self, name, throw=False, **args):
92 def hook(self, name, throw=False, **args):
93 def callhook(hname, funcname):
93 def callhook(hname, funcname):
94 '''call python hook. hook is callable object, looked up as
94 '''call python hook. hook is callable object, looked up as
95 name in python module. if callable returns "true", hook
95 name in python module. if callable returns "true", hook
96 fails, else passes. if hook raises exception, treated as
96 fails, else passes. if hook raises exception, treated as
97 hook failure. exception propagates if throw is "true".
97 hook failure. exception propagates if throw is "true".
98
98
99 reason for "true" meaning "hook failed" is so that
99 reason for "true" meaning "hook failed" is so that
100 unmodified commands (e.g. mercurial.commands.update) can
100 unmodified commands (e.g. mercurial.commands.update) can
101 be run as hooks without wrappers to convert return values.'''
101 be run as hooks without wrappers to convert return values.'''
102
102
103 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
103 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 d = funcname.rfind('.')
104 d = funcname.rfind('.')
105 if d == -1:
105 if d == -1:
106 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
106 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 % (hname, funcname))
107 % (hname, funcname))
108 modname = funcname[:d]
108 modname = funcname[:d]
109 try:
109 try:
110 obj = __import__(modname)
110 obj = __import__(modname)
111 except ImportError:
111 except ImportError:
112 try:
112 try:
113 # extensions are loaded with hgext_ prefix
113 # extensions are loaded with hgext_ prefix
114 obj = __import__("hgext_%s" % modname)
114 obj = __import__("hgext_%s" % modname)
115 except ImportError:
115 except ImportError:
116 raise util.Abort(_('%s hook is invalid '
116 raise util.Abort(_('%s hook is invalid '
117 '(import of "%s" failed)') %
117 '(import of "%s" failed)') %
118 (hname, modname))
118 (hname, modname))
119 try:
119 try:
120 for p in funcname.split('.')[1:]:
120 for p in funcname.split('.')[1:]:
121 obj = getattr(obj, p)
121 obj = getattr(obj, p)
122 except AttributeError, err:
122 except AttributeError, err:
123 raise util.Abort(_('%s hook is invalid '
123 raise util.Abort(_('%s hook is invalid '
124 '("%s" is not defined)') %
124 '("%s" is not defined)') %
125 (hname, funcname))
125 (hname, funcname))
126 if not callable(obj):
126 if not callable(obj):
127 raise util.Abort(_('%s hook is invalid '
127 raise util.Abort(_('%s hook is invalid '
128 '("%s" is not callable)') %
128 '("%s" is not callable)') %
129 (hname, funcname))
129 (hname, funcname))
130 try:
130 try:
131 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
131 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 except (KeyboardInterrupt, util.SignalInterrupt):
132 except (KeyboardInterrupt, util.SignalInterrupt):
133 raise
133 raise
134 except Exception, exc:
134 except Exception, exc:
135 if isinstance(exc, util.Abort):
135 if isinstance(exc, util.Abort):
136 self.ui.warn(_('error: %s hook failed: %s\n') %
136 self.ui.warn(_('error: %s hook failed: %s\n') %
137 (hname, exc.args[0]))
137 (hname, exc.args[0]))
138 else:
138 else:
139 self.ui.warn(_('error: %s hook raised an exception: '
139 self.ui.warn(_('error: %s hook raised an exception: '
140 '%s\n') % (hname, exc))
140 '%s\n') % (hname, exc))
141 if throw:
141 if throw:
142 raise
142 raise
143 self.ui.print_exc()
143 self.ui.print_exc()
144 return True
144 return True
145 if r:
145 if r:
146 if throw:
146 if throw:
147 raise util.Abort(_('%s hook failed') % hname)
147 raise util.Abort(_('%s hook failed') % hname)
148 self.ui.warn(_('warning: %s hook failed\n') % hname)
148 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 return r
149 return r
150
150
151 def runhook(name, cmd):
151 def runhook(name, cmd):
152 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
152 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
153 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 r = util.system(cmd, environ=env, cwd=self.root)
154 r = util.system(cmd, environ=env, cwd=self.root)
155 if r:
155 if r:
156 desc, r = util.explain_exit(r)
156 desc, r = util.explain_exit(r)
157 if throw:
157 if throw:
158 raise util.Abort(_('%s hook %s') % (name, desc))
158 raise util.Abort(_('%s hook %s') % (name, desc))
159 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
159 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 return r
160 return r
161
161
162 r = False
162 r = False
163 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
163 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 if hname.split(".", 1)[0] == name and cmd]
164 if hname.split(".", 1)[0] == name and cmd]
165 hooks.sort()
165 hooks.sort()
166 for hname, cmd in hooks:
166 for hname, cmd in hooks:
167 if cmd.startswith('python:'):
167 if cmd.startswith('python:'):
168 r = callhook(hname, cmd[7:].strip()) or r
168 r = callhook(hname, cmd[7:].strip()) or r
169 else:
169 else:
170 r = runhook(hname, cmd) or r
170 r = runhook(hname, cmd) or r
171 return r
171 return r
172
172
173 tag_disallowed = ':\r\n'
173 tag_disallowed = ':\r\n'
174
174
175 def tag(self, name, node, message, local, user, date):
175 def tag(self, name, node, message, local, user, date):
176 '''tag a revision with a symbolic name.
176 '''tag a revision with a symbolic name.
177
177
178 if local is True, the tag is stored in a per-repository file.
178 if local is True, the tag is stored in a per-repository file.
179 otherwise, it is stored in the .hgtags file, and a new
179 otherwise, it is stored in the .hgtags file, and a new
180 changeset is committed with the change.
180 changeset is committed with the change.
181
181
182 keyword arguments:
182 keyword arguments:
183
183
184 local: whether to store tag in non-version-controlled file
184 local: whether to store tag in non-version-controlled file
185 (default False)
185 (default False)
186
186
187 message: commit message to use if committing
187 message: commit message to use if committing
188
188
189 user: name of user to use if committing
189 user: name of user to use if committing
190
190
191 date: date tuple to use if committing'''
191 date: date tuple to use if committing'''
192
192
193 for c in self.tag_disallowed:
193 for c in self.tag_disallowed:
194 if c in name:
194 if c in name:
195 raise util.Abort(_('%r cannot be used in a tag name') % c)
195 raise util.Abort(_('%r cannot be used in a tag name') % c)
196
196
197 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
197 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198
198
199 if local:
199 if local:
200 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
200 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.hook('tag', node=hex(node), tag=name, local=local)
201 self.hook('tag', node=hex(node), tag=name, local=local)
202 return
202 return
203
203
204 for x in self.status()[:5]:
204 for x in self.status()[:5]:
205 if '.hgtags' in x:
205 if '.hgtags' in x:
206 raise util.Abort(_('working copy of .hgtags is changed '
206 raise util.Abort(_('working copy of .hgtags is changed '
207 '(please commit .hgtags manually)'))
207 '(please commit .hgtags manually)'))
208
208
209 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
209 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 if self.dirstate.state('.hgtags') == '?':
210 if self.dirstate.state('.hgtags') == '?':
211 self.add(['.hgtags'])
211 self.add(['.hgtags'])
212
212
213 self.commit(['.hgtags'], message, user, date)
213 self.commit(['.hgtags'], message, user, date)
214 self.hook('tag', node=hex(node), tag=name, local=local)
214 self.hook('tag', node=hex(node), tag=name, local=local)
215
215
216 def tags(self):
216 def tags(self):
217 '''return a mapping of tag to node'''
217 '''return a mapping of tag to node'''
218 if not self.tagscache:
218 if not self.tagscache:
219 self.tagscache = {}
219 self.tagscache = {}
220
220
221 def parsetag(line, context):
221 def parsetag(line, context):
222 if not line:
222 if not line:
223 return
223 return
224 s = l.split(" ", 1)
224 s = l.split(" ", 1)
225 if len(s) != 2:
225 if len(s) != 2:
226 self.ui.warn(_("%s: cannot parse entry\n") % context)
226 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 return
227 return
228 node, key = s
228 node, key = s
229 key = key.strip()
229 key = key.strip()
230 try:
230 try:
231 bin_n = bin(node)
231 bin_n = bin(node)
232 except TypeError:
232 except TypeError:
233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 (context, node))
234 (context, node))
235 return
235 return
236 if bin_n not in self.changelog.nodemap:
236 if bin_n not in self.changelog.nodemap:
237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 (context, key))
238 (context, key))
239 return
239 return
240 self.tagscache[key] = bin_n
240 self.tagscache[key] = bin_n
241
241
242 # read the tags file from each head, ending with the tip,
242 # read the tags file from each head, ending with the tip,
243 # and add each tag found to the map, with "newer" ones
243 # and add each tag found to the map, with "newer" ones
244 # taking precedence
244 # taking precedence
245 heads = self.heads()
245 heads = self.heads()
246 heads.reverse()
246 heads.reverse()
247 fl = self.file(".hgtags")
247 fl = self.file(".hgtags")
248 for node in heads:
248 for node in heads:
249 change = self.changelog.read(node)
249 change = self.changelog.read(node)
250 rev = self.changelog.rev(node)
250 rev = self.changelog.rev(node)
251 fn, ff = self.manifest.find(change[0], '.hgtags')
251 fn, ff = self.manifest.find(change[0], '.hgtags')
252 if fn is None: continue
252 if fn is None: continue
253 count = 0
253 count = 0
254 for l in fl.read(fn).splitlines():
254 for l in fl.read(fn).splitlines():
255 count += 1
255 count += 1
256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 (rev, short(node), count))
257 (rev, short(node), count))
258 try:
258 try:
259 f = self.opener("localtags")
259 f = self.opener("localtags")
260 count = 0
260 count = 0
261 for l in f:
261 for l in f:
262 count += 1
262 count += 1
263 parsetag(l, _("localtags, line %d") % count)
263 parsetag(l, _("localtags, line %d") % count)
264 except IOError:
264 except IOError:
265 pass
265 pass
266
266
267 self.tagscache['tip'] = self.changelog.tip()
267 self.tagscache['tip'] = self.changelog.tip()
268
268
269 return self.tagscache
269 return self.tagscache
270
270
271 def tagslist(self):
271 def tagslist(self):
272 '''return a list of tags ordered by revision'''
272 '''return a list of tags ordered by revision'''
273 l = []
273 l = []
274 for t, n in self.tags().items():
274 for t, n in self.tags().items():
275 try:
275 try:
276 r = self.changelog.rev(n)
276 r = self.changelog.rev(n)
277 except:
277 except:
278 r = -2 # sort to the beginning of the list if unknown
278 r = -2 # sort to the beginning of the list if unknown
279 l.append((r, t, n))
279 l.append((r, t, n))
280 l.sort()
280 l.sort()
281 return [(t, n) for r, t, n in l]
281 return [(t, n) for r, t, n in l]
282
282
283 def nodetags(self, node):
283 def nodetags(self, node):
284 '''return the tags associated with a node'''
284 '''return the tags associated with a node'''
285 if not self.nodetagscache:
285 if not self.nodetagscache:
286 self.nodetagscache = {}
286 self.nodetagscache = {}
287 for t, n in self.tags().items():
287 for t, n in self.tags().items():
288 self.nodetagscache.setdefault(n, []).append(t)
288 self.nodetagscache.setdefault(n, []).append(t)
289 return self.nodetagscache.get(node, [])
289 return self.nodetagscache.get(node, [])
290
290
291 def lookup(self, key):
291 def lookup(self, key):
292 try:
292 try:
293 return self.tags()[key]
293 return self.tags()[key]
294 except KeyError:
294 except KeyError:
295 if key == '.':
295 if key == '.':
296 key = self.dirstate.parents()[0]
296 key = self.dirstate.parents()[0]
297 if key == nullid:
297 if key == nullid:
298 raise repo.RepoError(_("no revision checked out"))
298 raise repo.RepoError(_("no revision checked out"))
299 try:
299 try:
300 return self.changelog.lookup(key)
300 return self.changelog.lookup(key)
301 except:
301 except:
302 raise repo.RepoError(_("unknown revision '%s'") % key)
302 raise repo.RepoError(_("unknown revision '%s'") % key)
303
303
304 def dev(self):
304 def dev(self):
305 return os.lstat(self.path).st_dev
305 return os.lstat(self.path).st_dev
306
306
307 def local(self):
307 def local(self):
308 return True
308 return True
309
309
310 def join(self, f):
310 def join(self, f):
311 return os.path.join(self.path, f)
311 return os.path.join(self.path, f)
312
312
313 def wjoin(self, f):
313 def wjoin(self, f):
314 return os.path.join(self.root, f)
314 return os.path.join(self.root, f)
315
315
316 def file(self, f):
316 def file(self, f):
317 if f[0] == '/':
317 if f[0] == '/':
318 f = f[1:]
318 f = f[1:]
319 return filelog.filelog(self.opener, f, self.revlogversion)
319 return filelog.filelog(self.opener, f, self.revlogversion)
320
320
321 def changectx(self, changeid=None):
321 def changectx(self, changeid=None):
322 return context.changectx(self, changeid)
322 return context.changectx(self, changeid)
323
323
324 def workingctx(self):
324 def workingctx(self):
325 return context.workingctx(self)
325 return context.workingctx(self)
326
326
327 def parents(self, changeid=None):
327 def parents(self, changeid=None):
328 '''
328 '''
329 get list of changectxs for parents of changeid or working directory
329 get list of changectxs for parents of changeid or working directory
330 '''
330 '''
331 if changeid is None:
331 if changeid is None:
332 pl = self.dirstate.parents()
332 pl = self.dirstate.parents()
333 else:
333 else:
334 n = self.changelog.lookup(changeid)
334 n = self.changelog.lookup(changeid)
335 pl = self.changelog.parents(n)
335 pl = self.changelog.parents(n)
336 if pl[1] == nullid:
336 if pl[1] == nullid:
337 return [self.changectx(pl[0])]
337 return [self.changectx(pl[0])]
338 return [self.changectx(pl[0]), self.changectx(pl[1])]
338 return [self.changectx(pl[0]), self.changectx(pl[1])]
339
339
340 def filectx(self, path, changeid=None, fileid=None):
340 def filectx(self, path, changeid=None, fileid=None):
341 """changeid can be a changeset revision, node, or tag.
341 """changeid can be a changeset revision, node, or tag.
342 fileid can be a file revision or node."""
342 fileid can be a file revision or node."""
343 return context.filectx(self, path, changeid, fileid)
343 return context.filectx(self, path, changeid, fileid)
344
344
345 def getcwd(self):
345 def getcwd(self):
346 return self.dirstate.getcwd()
346 return self.dirstate.getcwd()
347
347
348 def wfile(self, f, mode='r'):
348 def wfile(self, f, mode='r'):
349 return self.wopener(f, mode)
349 return self.wopener(f, mode)
350
350
351 def wread(self, filename):
351 def wread(self, filename):
352 if self.encodepats == None:
352 if self.encodepats == None:
353 l = []
353 l = []
354 for pat, cmd in self.ui.configitems("encode"):
354 for pat, cmd in self.ui.configitems("encode"):
355 mf = util.matcher(self.root, "", [pat], [], [])[1]
355 mf = util.matcher(self.root, "", [pat], [], [])[1]
356 l.append((mf, cmd))
356 l.append((mf, cmd))
357 self.encodepats = l
357 self.encodepats = l
358
358
359 data = self.wopener(filename, 'r').read()
359 data = self.wopener(filename, 'r').read()
360
360
361 for mf, cmd in self.encodepats:
361 for mf, cmd in self.encodepats:
362 if mf(filename):
362 if mf(filename):
363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
364 data = util.filter(data, cmd)
364 data = util.filter(data, cmd)
365 break
365 break
366
366
367 return data
367 return data
368
368
369 def wwrite(self, filename, data, fd=None):
369 def wwrite(self, filename, data, fd=None):
370 if self.decodepats == None:
370 if self.decodepats == None:
371 l = []
371 l = []
372 for pat, cmd in self.ui.configitems("decode"):
372 for pat, cmd in self.ui.configitems("decode"):
373 mf = util.matcher(self.root, "", [pat], [], [])[1]
373 mf = util.matcher(self.root, "", [pat], [], [])[1]
374 l.append((mf, cmd))
374 l.append((mf, cmd))
375 self.decodepats = l
375 self.decodepats = l
376
376
377 for mf, cmd in self.decodepats:
377 for mf, cmd in self.decodepats:
378 if mf(filename):
378 if mf(filename):
379 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
379 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
380 data = util.filter(data, cmd)
380 data = util.filter(data, cmd)
381 break
381 break
382
382
383 if fd:
383 if fd:
384 return fd.write(data)
384 return fd.write(data)
385 return self.wopener(filename, 'w').write(data)
385 return self.wopener(filename, 'w').write(data)
386
386
387 def transaction(self):
387 def transaction(self):
388 tr = self.transhandle
388 tr = self.transhandle
389 if tr != None and tr.running():
389 if tr != None and tr.running():
390 return tr.nest()
390 return tr.nest()
391
391
392 # save dirstate for rollback
392 # save dirstate for rollback
393 try:
393 try:
394 ds = self.opener("dirstate").read()
394 ds = self.opener("dirstate").read()
395 except IOError:
395 except IOError:
396 ds = ""
396 ds = ""
397 self.opener("journal.dirstate", "w").write(ds)
397 self.opener("journal.dirstate", "w").write(ds)
398
398
399 tr = transaction.transaction(self.ui.warn, self.opener,
399 tr = transaction.transaction(self.ui.warn, self.opener,
400 self.join("journal"),
400 self.join("journal"),
401 aftertrans(self.path))
401 aftertrans(self.path))
402 self.transhandle = tr
402 self.transhandle = tr
403 return tr
403 return tr
404
404
405 def recover(self):
405 def recover(self):
406 l = self.lock()
406 l = self.lock()
407 if os.path.exists(self.join("journal")):
407 if os.path.exists(self.join("journal")):
408 self.ui.status(_("rolling back interrupted transaction\n"))
408 self.ui.status(_("rolling back interrupted transaction\n"))
409 transaction.rollback(self.opener, self.join("journal"))
409 transaction.rollback(self.opener, self.join("journal"))
410 self.reload()
410 self.reload()
411 return True
411 return True
412 else:
412 else:
413 self.ui.warn(_("no interrupted transaction available\n"))
413 self.ui.warn(_("no interrupted transaction available\n"))
414 return False
414 return False
415
415
416 def rollback(self, wlock=None):
416 def rollback(self, wlock=None):
417 if not wlock:
417 if not wlock:
418 wlock = self.wlock()
418 wlock = self.wlock()
419 l = self.lock()
419 l = self.lock()
420 if os.path.exists(self.join("undo")):
420 if os.path.exists(self.join("undo")):
421 self.ui.status(_("rolling back last transaction\n"))
421 self.ui.status(_("rolling back last transaction\n"))
422 transaction.rollback(self.opener, self.join("undo"))
422 transaction.rollback(self.opener, self.join("undo"))
423 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
423 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
424 self.reload()
424 self.reload()
425 self.wreload()
425 self.wreload()
426 else:
426 else:
427 self.ui.warn(_("no rollback information available\n"))
427 self.ui.warn(_("no rollback information available\n"))
428
428
429 def wreload(self):
429 def wreload(self):
430 self.dirstate.read()
430 self.dirstate.read()
431
431
432 def reload(self):
432 def reload(self):
433 self.changelog.load()
433 self.changelog.load()
434 self.manifest.load()
434 self.manifest.load()
435 self.tagscache = None
435 self.tagscache = None
436 self.nodetagscache = None
436 self.nodetagscache = None
437
437
438 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
438 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
439 desc=None):
439 desc=None):
440 try:
440 try:
441 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
441 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
442 except lock.LockHeld, inst:
442 except lock.LockHeld, inst:
443 if not wait:
443 if not wait:
444 raise
444 raise
445 self.ui.warn(_("waiting for lock on %s held by %s\n") %
445 self.ui.warn(_("waiting for lock on %s held by %s\n") %
446 (desc, inst.args[0]))
446 (desc, inst.args[0]))
447 # default to 600 seconds timeout
447 # default to 600 seconds timeout
448 l = lock.lock(self.join(lockname),
448 l = lock.lock(self.join(lockname),
449 int(self.ui.config("ui", "timeout") or 600),
449 int(self.ui.config("ui", "timeout") or 600),
450 releasefn, desc=desc)
450 releasefn, desc=desc)
451 if acquirefn:
451 if acquirefn:
452 acquirefn()
452 acquirefn()
453 return l
453 return l
454
454
455 def lock(self, wait=1):
455 def lock(self, wait=1):
456 return self.do_lock("lock", wait, acquirefn=self.reload,
456 return self.do_lock("lock", wait, acquirefn=self.reload,
457 desc=_('repository %s') % self.origroot)
457 desc=_('repository %s') % self.origroot)
458
458
459 def wlock(self, wait=1):
459 def wlock(self, wait=1):
460 return self.do_lock("wlock", wait, self.dirstate.write,
460 return self.do_lock("wlock", wait, self.dirstate.write,
461 self.wreload,
461 self.wreload,
462 desc=_('working directory of %s') % self.origroot)
462 desc=_('working directory of %s') % self.origroot)
463
463
464 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
464 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
465 """
465 """
466 commit an individual file as part of a larger transaction
466 commit an individual file as part of a larger transaction
467 """
467 """
468
468
469 t = self.wread(fn)
469 t = self.wread(fn)
470 fl = self.file(fn)
470 fl = self.file(fn)
471 fp1 = manifest1.get(fn, nullid)
471 fp1 = manifest1.get(fn, nullid)
472 fp2 = manifest2.get(fn, nullid)
472 fp2 = manifest2.get(fn, nullid)
473
473
474 meta = {}
474 meta = {}
475 cp = self.dirstate.copied(fn)
475 cp = self.dirstate.copied(fn)
476 if cp:
476 if cp:
477 meta["copy"] = cp
477 meta["copy"] = cp
478 if not manifest2: # not a branch merge
478 if not manifest2: # not a branch merge
479 meta["copyrev"] = hex(manifest1.get(cp, nullid))
479 meta["copyrev"] = hex(manifest1.get(cp, nullid))
480 fp2 = nullid
480 fp2 = nullid
481 elif fp2 != nullid: # copied on remote side
481 elif fp2 != nullid: # copied on remote side
482 meta["copyrev"] = hex(manifest1.get(cp, nullid))
482 meta["copyrev"] = hex(manifest1.get(cp, nullid))
483 else: # copied on local side, reversed
483 else: # copied on local side, reversed
484 meta["copyrev"] = hex(manifest2.get(cp))
484 meta["copyrev"] = hex(manifest2.get(cp))
485 fp2 = nullid
485 fp2 = nullid
486 self.ui.debug(_(" %s: copy %s:%s\n") %
486 self.ui.debug(_(" %s: copy %s:%s\n") %
487 (fn, cp, meta["copyrev"]))
487 (fn, cp, meta["copyrev"]))
488 fp1 = nullid
488 fp1 = nullid
489 elif fp2 != nullid:
489 elif fp2 != nullid:
490 # is one parent an ancestor of the other?
490 # is one parent an ancestor of the other?
491 fpa = fl.ancestor(fp1, fp2)
491 fpa = fl.ancestor(fp1, fp2)
492 if fpa == fp1:
492 if fpa == fp1:
493 fp1, fp2 = fp2, nullid
493 fp1, fp2 = fp2, nullid
494 elif fpa == fp2:
494 elif fpa == fp2:
495 fp2 = nullid
495 fp2 = nullid
496
496
497 # is the file unmodified from the parent? report existing entry
497 # is the file unmodified from the parent? report existing entry
498 if fp2 == nullid and not fl.cmp(fp1, t):
498 if fp2 == nullid and not fl.cmp(fp1, t):
499 return fp1
499 return fp1
500
500
501 changelist.append(fn)
501 changelist.append(fn)
502 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
502 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
503
503
504 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
504 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
505 orig_parent = self.dirstate.parents()[0] or nullid
505 orig_parent = self.dirstate.parents()[0] or nullid
506 p1 = p1 or self.dirstate.parents()[0] or nullid
506 p1 = p1 or self.dirstate.parents()[0] or nullid
507 p2 = p2 or self.dirstate.parents()[1] or nullid
507 p2 = p2 or self.dirstate.parents()[1] or nullid
508 c1 = self.changelog.read(p1)
508 c1 = self.changelog.read(p1)
509 c2 = self.changelog.read(p2)
509 c2 = self.changelog.read(p2)
510 m1 = self.manifest.read(c1[0]).copy()
510 m1 = self.manifest.read(c1[0]).copy()
511 m2 = self.manifest.read(c2[0])
511 m2 = self.manifest.read(c2[0])
512 changed = []
512 changed = []
513 removed = []
513
514
514 if orig_parent == p1:
515 if orig_parent == p1:
515 update_dirstate = 1
516 update_dirstate = 1
516 else:
517 else:
517 update_dirstate = 0
518 update_dirstate = 0
518
519
519 if not wlock:
520 if not wlock:
520 wlock = self.wlock()
521 wlock = self.wlock()
521 l = self.lock()
522 l = self.lock()
522 tr = self.transaction()
523 tr = self.transaction()
523 linkrev = self.changelog.count()
524 linkrev = self.changelog.count()
524 for f in files:
525 for f in files:
525 try:
526 try:
526 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
527 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
527 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
528 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
528 except IOError:
529 except IOError:
529 try:
530 try:
530 del m1[f]
531 del m1[f]
531 if update_dirstate:
532 if update_dirstate:
532 self.dirstate.forget([f])
533 self.dirstate.forget([f])
534 removed.append(f)
533 except:
535 except:
534 # deleted from p2?
536 # deleted from p2?
535 pass
537 pass
536
538
537 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
539 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
538 user = user or self.ui.username()
540 user = user or self.ui.username()
539 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
541 n = self.changelog.add(mnode, changed + removed, text,
542 tr, p1, p2, user, date)
540 tr.close()
543 tr.close()
541 if update_dirstate:
544 if update_dirstate:
542 self.dirstate.setparents(n, nullid)
545 self.dirstate.setparents(n, nullid)
543
546
544 def commit(self, files=None, text="", user=None, date=None,
547 def commit(self, files=None, text="", user=None, date=None,
545 match=util.always, force=False, lock=None, wlock=None,
548 match=util.always, force=False, lock=None, wlock=None,
546 force_editor=False):
549 force_editor=False):
547 commit = []
550 commit = []
548 remove = []
551 remove = []
549 changed = []
552 changed = []
550
553
551 if files:
554 if files:
552 for f in files:
555 for f in files:
553 s = self.dirstate.state(f)
556 s = self.dirstate.state(f)
554 if s in 'nmai':
557 if s in 'nmai':
555 commit.append(f)
558 commit.append(f)
556 elif s == 'r':
559 elif s == 'r':
557 remove.append(f)
560 remove.append(f)
558 else:
561 else:
559 self.ui.warn(_("%s not tracked!\n") % f)
562 self.ui.warn(_("%s not tracked!\n") % f)
560 else:
563 else:
561 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
564 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
562 commit = modified + added
565 commit = modified + added
563 remove = removed
566 remove = removed
564
567
565 p1, p2 = self.dirstate.parents()
568 p1, p2 = self.dirstate.parents()
566 c1 = self.changelog.read(p1)
569 c1 = self.changelog.read(p1)
567 c2 = self.changelog.read(p2)
570 c2 = self.changelog.read(p2)
568 m1 = self.manifest.read(c1[0]).copy()
571 m1 = self.manifest.read(c1[0]).copy()
569 m2 = self.manifest.read(c2[0])
572 m2 = self.manifest.read(c2[0])
570
573
571 if not commit and not remove and not force and p2 == nullid:
574 if not commit and not remove and not force and p2 == nullid:
572 self.ui.status(_("nothing changed\n"))
575 self.ui.status(_("nothing changed\n"))
573 return None
576 return None
574
577
575 xp1 = hex(p1)
578 xp1 = hex(p1)
576 if p2 == nullid: xp2 = ''
579 if p2 == nullid: xp2 = ''
577 else: xp2 = hex(p2)
580 else: xp2 = hex(p2)
578
581
579 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
582 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
580
583
581 if not wlock:
584 if not wlock:
582 wlock = self.wlock()
585 wlock = self.wlock()
583 if not lock:
586 if not lock:
584 lock = self.lock()
587 lock = self.lock()
585 tr = self.transaction()
588 tr = self.transaction()
586
589
587 # check in files
590 # check in files
588 new = {}
591 new = {}
589 linkrev = self.changelog.count()
592 linkrev = self.changelog.count()
590 commit.sort()
593 commit.sort()
591 for f in commit:
594 for f in commit:
592 self.ui.note(f + "\n")
595 self.ui.note(f + "\n")
593 try:
596 try:
594 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
597 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
595 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
598 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
596 except IOError:
599 except IOError:
597 self.ui.warn(_("trouble committing %s!\n") % f)
600 self.ui.warn(_("trouble committing %s!\n") % f)
598 raise
601 raise
599
602
600 # update manifest
603 # update manifest
601 m1.update(new)
604 m1.update(new)
602 for f in remove:
605 for f in remove:
603 if f in m1:
606 if f in m1:
604 del m1[f]
607 del m1[f]
605 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
608 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
606
609
607 # add changeset
610 # add changeset
608 new = new.keys()
611 new = new.keys()
609 new.sort()
612 new.sort()
610
613
611 user = user or self.ui.username()
614 user = user or self.ui.username()
612 if not text or force_editor:
615 if not text or force_editor:
613 edittext = []
616 edittext = []
614 if text:
617 if text:
615 edittext.append(text)
618 edittext.append(text)
616 edittext.append("")
619 edittext.append("")
617 if p2 != nullid:
620 if p2 != nullid:
618 edittext.append("HG: branch merge")
621 edittext.append("HG: branch merge")
619 edittext.extend(["HG: changed %s" % f for f in changed])
622 edittext.extend(["HG: changed %s" % f for f in changed])
620 edittext.extend(["HG: removed %s" % f for f in remove])
623 edittext.extend(["HG: removed %s" % f for f in remove])
621 if not changed and not remove:
624 if not changed and not remove:
622 edittext.append("HG: no files changed")
625 edittext.append("HG: no files changed")
623 edittext.append("")
626 edittext.append("")
624 # run editor in the repository root
627 # run editor in the repository root
625 olddir = os.getcwd()
628 olddir = os.getcwd()
626 os.chdir(self.root)
629 os.chdir(self.root)
627 text = self.ui.edit("\n".join(edittext), user)
630 text = self.ui.edit("\n".join(edittext), user)
628 os.chdir(olddir)
631 os.chdir(olddir)
629
632
630 lines = [line.rstrip() for line in text.rstrip().splitlines()]
633 lines = [line.rstrip() for line in text.rstrip().splitlines()]
631 while lines and not lines[0]:
634 while lines and not lines[0]:
632 del lines[0]
635 del lines[0]
633 if not lines:
636 if not lines:
634 return None
637 return None
635 text = '\n'.join(lines)
638 text = '\n'.join(lines)
636 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
639 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
637 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
640 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
638 parent2=xp2)
641 parent2=xp2)
639 tr.close()
642 tr.close()
640
643
641 self.dirstate.setparents(n)
644 self.dirstate.setparents(n)
642 self.dirstate.update(new, "n")
645 self.dirstate.update(new, "n")
643 self.dirstate.forget(remove)
646 self.dirstate.forget(remove)
644
647
645 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
648 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
646 return n
649 return n
647
650
648 def walk(self, node=None, files=[], match=util.always, badmatch=None):
651 def walk(self, node=None, files=[], match=util.always, badmatch=None):
649 if node:
652 if node:
650 fdict = dict.fromkeys(files)
653 fdict = dict.fromkeys(files)
651 for fn in self.manifest.read(self.changelog.read(node)[0]):
654 for fn in self.manifest.read(self.changelog.read(node)[0]):
652 for ffn in fdict:
655 for ffn in fdict:
653 # match if the file is the exact name or a directory
656 # match if the file is the exact name or a directory
654 if ffn == fn or fn.startswith("%s/" % ffn):
657 if ffn == fn or fn.startswith("%s/" % ffn):
655 del fdict[ffn]
658 del fdict[ffn]
656 break
659 break
657 if match(fn):
660 if match(fn):
658 yield 'm', fn
661 yield 'm', fn
659 for fn in fdict:
662 for fn in fdict:
660 if badmatch and badmatch(fn):
663 if badmatch and badmatch(fn):
661 if match(fn):
664 if match(fn):
662 yield 'b', fn
665 yield 'b', fn
663 else:
666 else:
664 self.ui.warn(_('%s: No such file in rev %s\n') % (
667 self.ui.warn(_('%s: No such file in rev %s\n') % (
665 util.pathto(self.getcwd(), fn), short(node)))
668 util.pathto(self.getcwd(), fn), short(node)))
666 else:
669 else:
667 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
670 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
668 yield src, fn
671 yield src, fn
669
672
670 def status(self, node1=None, node2=None, files=[], match=util.always,
673 def status(self, node1=None, node2=None, files=[], match=util.always,
671 wlock=None, list_ignored=False, list_clean=False):
674 wlock=None, list_ignored=False, list_clean=False):
672 """return status of files between two nodes or node and working directory
675 """return status of files between two nodes or node and working directory
673
676
674 If node1 is None, use the first dirstate parent instead.
677 If node1 is None, use the first dirstate parent instead.
675 If node2 is None, compare node1 with working directory.
678 If node2 is None, compare node1 with working directory.
676 """
679 """
677
680
678 def fcmp(fn, mf):
681 def fcmp(fn, mf):
679 t1 = self.wread(fn)
682 t1 = self.wread(fn)
680 return self.file(fn).cmp(mf.get(fn, nullid), t1)
683 return self.file(fn).cmp(mf.get(fn, nullid), t1)
681
684
682 def mfmatches(node):
685 def mfmatches(node):
683 change = self.changelog.read(node)
686 change = self.changelog.read(node)
684 mf = self.manifest.read(change[0]).copy()
687 mf = self.manifest.read(change[0]).copy()
685 for fn in mf.keys():
688 for fn in mf.keys():
686 if not match(fn):
689 if not match(fn):
687 del mf[fn]
690 del mf[fn]
688 return mf
691 return mf
689
692
690 modified, added, removed, deleted, unknown = [], [], [], [], []
693 modified, added, removed, deleted, unknown = [], [], [], [], []
691 ignored, clean = [], []
694 ignored, clean = [], []
692
695
693 compareworking = False
696 compareworking = False
694 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
697 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
695 compareworking = True
698 compareworking = True
696
699
697 if not compareworking:
700 if not compareworking:
698 # read the manifest from node1 before the manifest from node2,
701 # read the manifest from node1 before the manifest from node2,
699 # so that we'll hit the manifest cache if we're going through
702 # so that we'll hit the manifest cache if we're going through
700 # all the revisions in parent->child order.
703 # all the revisions in parent->child order.
701 mf1 = mfmatches(node1)
704 mf1 = mfmatches(node1)
702
705
703 # are we comparing the working directory?
706 # are we comparing the working directory?
704 if not node2:
707 if not node2:
705 if not wlock:
708 if not wlock:
706 try:
709 try:
707 wlock = self.wlock(wait=0)
710 wlock = self.wlock(wait=0)
708 except lock.LockException:
711 except lock.LockException:
709 wlock = None
712 wlock = None
710 (lookup, modified, added, removed, deleted, unknown,
713 (lookup, modified, added, removed, deleted, unknown,
711 ignored, clean) = self.dirstate.status(files, match,
714 ignored, clean) = self.dirstate.status(files, match,
712 list_ignored, list_clean)
715 list_ignored, list_clean)
713
716
714 # are we comparing working dir against its parent?
717 # are we comparing working dir against its parent?
715 if compareworking:
718 if compareworking:
716 if lookup:
719 if lookup:
717 # do a full compare of any files that might have changed
720 # do a full compare of any files that might have changed
718 mf2 = mfmatches(self.dirstate.parents()[0])
721 mf2 = mfmatches(self.dirstate.parents()[0])
719 for f in lookup:
722 for f in lookup:
720 if fcmp(f, mf2):
723 if fcmp(f, mf2):
721 modified.append(f)
724 modified.append(f)
722 else:
725 else:
723 clean.append(f)
726 clean.append(f)
724 if wlock is not None:
727 if wlock is not None:
725 self.dirstate.update([f], "n")
728 self.dirstate.update([f], "n")
726 else:
729 else:
727 # we are comparing working dir against non-parent
730 # we are comparing working dir against non-parent
728 # generate a pseudo-manifest for the working dir
731 # generate a pseudo-manifest for the working dir
729 # XXX: create it in dirstate.py ?
732 # XXX: create it in dirstate.py ?
730 mf2 = mfmatches(self.dirstate.parents()[0])
733 mf2 = mfmatches(self.dirstate.parents()[0])
731 for f in lookup + modified + added:
734 for f in lookup + modified + added:
732 mf2[f] = ""
735 mf2[f] = ""
733 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
736 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
734 for f in removed:
737 for f in removed:
735 if f in mf2:
738 if f in mf2:
736 del mf2[f]
739 del mf2[f]
737 else:
740 else:
738 # we are comparing two revisions
741 # we are comparing two revisions
739 mf2 = mfmatches(node2)
742 mf2 = mfmatches(node2)
740
743
741 if not compareworking:
744 if not compareworking:
742 # flush lists from dirstate before comparing manifests
745 # flush lists from dirstate before comparing manifests
743 modified, added, clean = [], [], []
746 modified, added, clean = [], [], []
744
747
745 # make sure to sort the files so we talk to the disk in a
748 # make sure to sort the files so we talk to the disk in a
746 # reasonable order
749 # reasonable order
747 mf2keys = mf2.keys()
750 mf2keys = mf2.keys()
748 mf2keys.sort()
751 mf2keys.sort()
749 for fn in mf2keys:
752 for fn in mf2keys:
750 if mf1.has_key(fn):
753 if mf1.has_key(fn):
751 if mf1.flags(fn) != mf2.flags(fn) or \
754 if mf1.flags(fn) != mf2.flags(fn) or \
752 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
755 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
753 modified.append(fn)
756 modified.append(fn)
754 elif list_clean:
757 elif list_clean:
755 clean.append(fn)
758 clean.append(fn)
756 del mf1[fn]
759 del mf1[fn]
757 else:
760 else:
758 added.append(fn)
761 added.append(fn)
759
762
760 removed = mf1.keys()
763 removed = mf1.keys()
761
764
762 # sort and return results:
765 # sort and return results:
763 for l in modified, added, removed, deleted, unknown, ignored, clean:
766 for l in modified, added, removed, deleted, unknown, ignored, clean:
764 l.sort()
767 l.sort()
765 return (modified, added, removed, deleted, unknown, ignored, clean)
768 return (modified, added, removed, deleted, unknown, ignored, clean)
766
769
767 def add(self, list, wlock=None):
770 def add(self, list, wlock=None):
768 if not wlock:
771 if not wlock:
769 wlock = self.wlock()
772 wlock = self.wlock()
770 for f in list:
773 for f in list:
771 p = self.wjoin(f)
774 p = self.wjoin(f)
772 if not os.path.exists(p):
775 if not os.path.exists(p):
773 self.ui.warn(_("%s does not exist!\n") % f)
776 self.ui.warn(_("%s does not exist!\n") % f)
774 elif not os.path.isfile(p):
777 elif not os.path.isfile(p):
775 self.ui.warn(_("%s not added: only files supported currently\n")
778 self.ui.warn(_("%s not added: only files supported currently\n")
776 % f)
779 % f)
777 elif self.dirstate.state(f) in 'an':
780 elif self.dirstate.state(f) in 'an':
778 self.ui.warn(_("%s already tracked!\n") % f)
781 self.ui.warn(_("%s already tracked!\n") % f)
779 else:
782 else:
780 self.dirstate.update([f], "a")
783 self.dirstate.update([f], "a")
781
784
782 def forget(self, list, wlock=None):
785 def forget(self, list, wlock=None):
783 if not wlock:
786 if not wlock:
784 wlock = self.wlock()
787 wlock = self.wlock()
785 for f in list:
788 for f in list:
786 if self.dirstate.state(f) not in 'ai':
789 if self.dirstate.state(f) not in 'ai':
787 self.ui.warn(_("%s not added!\n") % f)
790 self.ui.warn(_("%s not added!\n") % f)
788 else:
791 else:
789 self.dirstate.forget([f])
792 self.dirstate.forget([f])
790
793
791 def remove(self, list, unlink=False, wlock=None):
794 def remove(self, list, unlink=False, wlock=None):
792 if unlink:
795 if unlink:
793 for f in list:
796 for f in list:
794 try:
797 try:
795 util.unlink(self.wjoin(f))
798 util.unlink(self.wjoin(f))
796 except OSError, inst:
799 except OSError, inst:
797 if inst.errno != errno.ENOENT:
800 if inst.errno != errno.ENOENT:
798 raise
801 raise
799 if not wlock:
802 if not wlock:
800 wlock = self.wlock()
803 wlock = self.wlock()
801 for f in list:
804 for f in list:
802 p = self.wjoin(f)
805 p = self.wjoin(f)
803 if os.path.exists(p):
806 if os.path.exists(p):
804 self.ui.warn(_("%s still exists!\n") % f)
807 self.ui.warn(_("%s still exists!\n") % f)
805 elif self.dirstate.state(f) == 'a':
808 elif self.dirstate.state(f) == 'a':
806 self.dirstate.forget([f])
809 self.dirstate.forget([f])
807 elif f not in self.dirstate:
810 elif f not in self.dirstate:
808 self.ui.warn(_("%s not tracked!\n") % f)
811 self.ui.warn(_("%s not tracked!\n") % f)
809 else:
812 else:
810 self.dirstate.update([f], "r")
813 self.dirstate.update([f], "r")
811
814
812 def undelete(self, list, wlock=None):
815 def undelete(self, list, wlock=None):
813 p = self.dirstate.parents()[0]
816 p = self.dirstate.parents()[0]
814 mn = self.changelog.read(p)[0]
817 mn = self.changelog.read(p)[0]
815 m = self.manifest.read(mn)
818 m = self.manifest.read(mn)
816 if not wlock:
819 if not wlock:
817 wlock = self.wlock()
820 wlock = self.wlock()
818 for f in list:
821 for f in list:
819 if self.dirstate.state(f) not in "r":
822 if self.dirstate.state(f) not in "r":
820 self.ui.warn("%s not removed!\n" % f)
823 self.ui.warn("%s not removed!\n" % f)
821 else:
824 else:
822 t = self.file(f).read(m[f])
825 t = self.file(f).read(m[f])
823 self.wwrite(f, t)
826 self.wwrite(f, t)
824 util.set_exec(self.wjoin(f), m.execf(f))
827 util.set_exec(self.wjoin(f), m.execf(f))
825 self.dirstate.update([f], "n")
828 self.dirstate.update([f], "n")
826
829
827 def copy(self, source, dest, wlock=None):
830 def copy(self, source, dest, wlock=None):
828 p = self.wjoin(dest)
831 p = self.wjoin(dest)
829 if not os.path.exists(p):
832 if not os.path.exists(p):
830 self.ui.warn(_("%s does not exist!\n") % dest)
833 self.ui.warn(_("%s does not exist!\n") % dest)
831 elif not os.path.isfile(p):
834 elif not os.path.isfile(p):
832 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
835 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
833 else:
836 else:
834 if not wlock:
837 if not wlock:
835 wlock = self.wlock()
838 wlock = self.wlock()
836 if self.dirstate.state(dest) == '?':
839 if self.dirstate.state(dest) == '?':
837 self.dirstate.update([dest], "a")
840 self.dirstate.update([dest], "a")
838 self.dirstate.copy(source, dest)
841 self.dirstate.copy(source, dest)
839
842
840 def heads(self, start=None):
843 def heads(self, start=None):
841 heads = self.changelog.heads(start)
844 heads = self.changelog.heads(start)
842 # sort the output in rev descending order
845 # sort the output in rev descending order
843 heads = [(-self.changelog.rev(h), h) for h in heads]
846 heads = [(-self.changelog.rev(h), h) for h in heads]
844 heads.sort()
847 heads.sort()
845 return [n for (r, n) in heads]
848 return [n for (r, n) in heads]
846
849
847 # branchlookup returns a dict giving a list of branches for
850 # branchlookup returns a dict giving a list of branches for
848 # each head. A branch is defined as the tag of a node or
851 # each head. A branch is defined as the tag of a node or
849 # the branch of the node's parents. If a node has multiple
852 # the branch of the node's parents. If a node has multiple
850 # branch tags, tags are eliminated if they are visible from other
853 # branch tags, tags are eliminated if they are visible from other
851 # branch tags.
854 # branch tags.
852 #
855 #
853 # So, for this graph: a->b->c->d->e
856 # So, for this graph: a->b->c->d->e
854 # \ /
857 # \ /
855 # aa -----/
858 # aa -----/
856 # a has tag 2.6.12
859 # a has tag 2.6.12
857 # d has tag 2.6.13
860 # d has tag 2.6.13
858 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
861 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
859 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
862 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
860 # from the list.
863 # from the list.
861 #
864 #
862 # It is possible that more than one head will have the same branch tag.
865 # It is possible that more than one head will have the same branch tag.
863 # callers need to check the result for multiple heads under the same
866 # callers need to check the result for multiple heads under the same
864 # branch tag if that is a problem for them (ie checkout of a specific
867 # branch tag if that is a problem for them (ie checkout of a specific
865 # branch).
868 # branch).
866 #
869 #
867 # passing in a specific branch will limit the depth of the search
870 # passing in a specific branch will limit the depth of the search
868 # through the parents. It won't limit the branches returned in the
871 # through the parents. It won't limit the branches returned in the
869 # result though.
872 # result though.
870 def branchlookup(self, heads=None, branch=None):
873 def branchlookup(self, heads=None, branch=None):
871 if not heads:
874 if not heads:
872 heads = self.heads()
875 heads = self.heads()
873 headt = [ h for h in heads ]
876 headt = [ h for h in heads ]
874 chlog = self.changelog
877 chlog = self.changelog
875 branches = {}
878 branches = {}
876 merges = []
879 merges = []
877 seenmerge = {}
880 seenmerge = {}
878
881
879 # traverse the tree once for each head, recording in the branches
882 # traverse the tree once for each head, recording in the branches
880 # dict which tags are visible from this head. The branches
883 # dict which tags are visible from this head. The branches
881 # dict also records which tags are visible from each tag
884 # dict also records which tags are visible from each tag
882 # while we traverse.
885 # while we traverse.
883 while headt or merges:
886 while headt or merges:
884 if merges:
887 if merges:
885 n, found = merges.pop()
888 n, found = merges.pop()
886 visit = [n]
889 visit = [n]
887 else:
890 else:
888 h = headt.pop()
891 h = headt.pop()
889 visit = [h]
892 visit = [h]
890 found = [h]
893 found = [h]
891 seen = {}
894 seen = {}
892 while visit:
895 while visit:
893 n = visit.pop()
896 n = visit.pop()
894 if n in seen:
897 if n in seen:
895 continue
898 continue
896 pp = chlog.parents(n)
899 pp = chlog.parents(n)
897 tags = self.nodetags(n)
900 tags = self.nodetags(n)
898 if tags:
901 if tags:
899 for x in tags:
902 for x in tags:
900 if x == 'tip':
903 if x == 'tip':
901 continue
904 continue
902 for f in found:
905 for f in found:
903 branches.setdefault(f, {})[n] = 1
906 branches.setdefault(f, {})[n] = 1
904 branches.setdefault(n, {})[n] = 1
907 branches.setdefault(n, {})[n] = 1
905 break
908 break
906 if n not in found:
909 if n not in found:
907 found.append(n)
910 found.append(n)
908 if branch in tags:
911 if branch in tags:
909 continue
912 continue
910 seen[n] = 1
913 seen[n] = 1
911 if pp[1] != nullid and n not in seenmerge:
914 if pp[1] != nullid and n not in seenmerge:
912 merges.append((pp[1], [x for x in found]))
915 merges.append((pp[1], [x for x in found]))
913 seenmerge[n] = 1
916 seenmerge[n] = 1
914 if pp[0] != nullid:
917 if pp[0] != nullid:
915 visit.append(pp[0])
918 visit.append(pp[0])
916 # traverse the branches dict, eliminating branch tags from each
919 # traverse the branches dict, eliminating branch tags from each
917 # head that are visible from another branch tag for that head.
920 # head that are visible from another branch tag for that head.
918 out = {}
921 out = {}
919 viscache = {}
922 viscache = {}
920 for h in heads:
923 for h in heads:
921 def visible(node):
924 def visible(node):
922 if node in viscache:
925 if node in viscache:
923 return viscache[node]
926 return viscache[node]
924 ret = {}
927 ret = {}
925 visit = [node]
928 visit = [node]
926 while visit:
929 while visit:
927 x = visit.pop()
930 x = visit.pop()
928 if x in viscache:
931 if x in viscache:
929 ret.update(viscache[x])
932 ret.update(viscache[x])
930 elif x not in ret:
933 elif x not in ret:
931 ret[x] = 1
934 ret[x] = 1
932 if x in branches:
935 if x in branches:
933 visit[len(visit):] = branches[x].keys()
936 visit[len(visit):] = branches[x].keys()
934 viscache[node] = ret
937 viscache[node] = ret
935 return ret
938 return ret
936 if h not in branches:
939 if h not in branches:
937 continue
940 continue
938 # O(n^2), but somewhat limited. This only searches the
941 # O(n^2), but somewhat limited. This only searches the
939 # tags visible from a specific head, not all the tags in the
942 # tags visible from a specific head, not all the tags in the
940 # whole repo.
943 # whole repo.
941 for b in branches[h]:
944 for b in branches[h]:
942 vis = False
945 vis = False
943 for bb in branches[h].keys():
946 for bb in branches[h].keys():
944 if b != bb:
947 if b != bb:
945 if b in visible(bb):
948 if b in visible(bb):
946 vis = True
949 vis = True
947 break
950 break
948 if not vis:
951 if not vis:
949 l = out.setdefault(h, [])
952 l = out.setdefault(h, [])
950 l[len(l):] = self.nodetags(b)
953 l[len(l):] = self.nodetags(b)
951 return out
954 return out
952
955
953 def branches(self, nodes):
956 def branches(self, nodes):
954 if not nodes:
957 if not nodes:
955 nodes = [self.changelog.tip()]
958 nodes = [self.changelog.tip()]
956 b = []
959 b = []
957 for n in nodes:
960 for n in nodes:
958 t = n
961 t = n
959 while 1:
962 while 1:
960 p = self.changelog.parents(n)
963 p = self.changelog.parents(n)
961 if p[1] != nullid or p[0] == nullid:
964 if p[1] != nullid or p[0] == nullid:
962 b.append((t, n, p[0], p[1]))
965 b.append((t, n, p[0], p[1]))
963 break
966 break
964 n = p[0]
967 n = p[0]
965 return b
968 return b
966
969
967 def between(self, pairs):
970 def between(self, pairs):
968 r = []
971 r = []
969
972
970 for top, bottom in pairs:
973 for top, bottom in pairs:
971 n, l, i = top, [], 0
974 n, l, i = top, [], 0
972 f = 1
975 f = 1
973
976
974 while n != bottom:
977 while n != bottom:
975 p = self.changelog.parents(n)[0]
978 p = self.changelog.parents(n)[0]
976 if i == f:
979 if i == f:
977 l.append(n)
980 l.append(n)
978 f = f * 2
981 f = f * 2
979 n = p
982 n = p
980 i += 1
983 i += 1
981
984
982 r.append(l)
985 r.append(l)
983
986
984 return r
987 return r
985
988
986 def findincoming(self, remote, base=None, heads=None, force=False):
989 def findincoming(self, remote, base=None, heads=None, force=False):
987 """Return list of roots of the subsets of missing nodes from remote
990 """Return list of roots of the subsets of missing nodes from remote
988
991
989 If base dict is specified, assume that these nodes and their parents
992 If base dict is specified, assume that these nodes and their parents
990 exist on the remote side and that no child of a node of base exists
993 exist on the remote side and that no child of a node of base exists
991 in both remote and self.
994 in both remote and self.
992 Furthermore base will be updated to include the nodes that exists
995 Furthermore base will be updated to include the nodes that exists
993 in self and remote but no children exists in self and remote.
996 in self and remote but no children exists in self and remote.
994 If a list of heads is specified, return only nodes which are heads
997 If a list of heads is specified, return only nodes which are heads
995 or ancestors of these heads.
998 or ancestors of these heads.
996
999
997 All the ancestors of base are in self and in remote.
1000 All the ancestors of base are in self and in remote.
998 All the descendants of the list returned are missing in self.
1001 All the descendants of the list returned are missing in self.
999 (and so we know that the rest of the nodes are missing in remote, see
1002 (and so we know that the rest of the nodes are missing in remote, see
1000 outgoing)
1003 outgoing)
1001 """
1004 """
1002 m = self.changelog.nodemap
1005 m = self.changelog.nodemap
1003 search = []
1006 search = []
1004 fetch = {}
1007 fetch = {}
1005 seen = {}
1008 seen = {}
1006 seenbranch = {}
1009 seenbranch = {}
1007 if base == None:
1010 if base == None:
1008 base = {}
1011 base = {}
1009
1012
1010 if not heads:
1013 if not heads:
1011 heads = remote.heads()
1014 heads = remote.heads()
1012
1015
1013 if self.changelog.tip() == nullid:
1016 if self.changelog.tip() == nullid:
1014 base[nullid] = 1
1017 base[nullid] = 1
1015 if heads != [nullid]:
1018 if heads != [nullid]:
1016 return [nullid]
1019 return [nullid]
1017 return []
1020 return []
1018
1021
1019 # assume we're closer to the tip than the root
1022 # assume we're closer to the tip than the root
1020 # and start by examining the heads
1023 # and start by examining the heads
1021 self.ui.status(_("searching for changes\n"))
1024 self.ui.status(_("searching for changes\n"))
1022
1025
1023 unknown = []
1026 unknown = []
1024 for h in heads:
1027 for h in heads:
1025 if h not in m:
1028 if h not in m:
1026 unknown.append(h)
1029 unknown.append(h)
1027 else:
1030 else:
1028 base[h] = 1
1031 base[h] = 1
1029
1032
1030 if not unknown:
1033 if not unknown:
1031 return []
1034 return []
1032
1035
1033 req = dict.fromkeys(unknown)
1036 req = dict.fromkeys(unknown)
1034 reqcnt = 0
1037 reqcnt = 0
1035
1038
1036 # search through remote branches
1039 # search through remote branches
1037 # a 'branch' here is a linear segment of history, with four parts:
1040 # a 'branch' here is a linear segment of history, with four parts:
1038 # head, root, first parent, second parent
1041 # head, root, first parent, second parent
1039 # (a branch always has two parents (or none) by definition)
1042 # (a branch always has two parents (or none) by definition)
1040 unknown = remote.branches(unknown)
1043 unknown = remote.branches(unknown)
1041 while unknown:
1044 while unknown:
1042 r = []
1045 r = []
1043 while unknown:
1046 while unknown:
1044 n = unknown.pop(0)
1047 n = unknown.pop(0)
1045 if n[0] in seen:
1048 if n[0] in seen:
1046 continue
1049 continue
1047
1050
1048 self.ui.debug(_("examining %s:%s\n")
1051 self.ui.debug(_("examining %s:%s\n")
1049 % (short(n[0]), short(n[1])))
1052 % (short(n[0]), short(n[1])))
1050 if n[0] == nullid: # found the end of the branch
1053 if n[0] == nullid: # found the end of the branch
1051 pass
1054 pass
1052 elif n in seenbranch:
1055 elif n in seenbranch:
1053 self.ui.debug(_("branch already found\n"))
1056 self.ui.debug(_("branch already found\n"))
1054 continue
1057 continue
1055 elif n[1] and n[1] in m: # do we know the base?
1058 elif n[1] and n[1] in m: # do we know the base?
1056 self.ui.debug(_("found incomplete branch %s:%s\n")
1059 self.ui.debug(_("found incomplete branch %s:%s\n")
1057 % (short(n[0]), short(n[1])))
1060 % (short(n[0]), short(n[1])))
1058 search.append(n) # schedule branch range for scanning
1061 search.append(n) # schedule branch range for scanning
1059 seenbranch[n] = 1
1062 seenbranch[n] = 1
1060 else:
1063 else:
1061 if n[1] not in seen and n[1] not in fetch:
1064 if n[1] not in seen and n[1] not in fetch:
1062 if n[2] in m and n[3] in m:
1065 if n[2] in m and n[3] in m:
1063 self.ui.debug(_("found new changeset %s\n") %
1066 self.ui.debug(_("found new changeset %s\n") %
1064 short(n[1]))
1067 short(n[1]))
1065 fetch[n[1]] = 1 # earliest unknown
1068 fetch[n[1]] = 1 # earliest unknown
1066 for p in n[2:4]:
1069 for p in n[2:4]:
1067 if p in m:
1070 if p in m:
1068 base[p] = 1 # latest known
1071 base[p] = 1 # latest known
1069
1072
1070 for p in n[2:4]:
1073 for p in n[2:4]:
1071 if p not in req and p not in m:
1074 if p not in req and p not in m:
1072 r.append(p)
1075 r.append(p)
1073 req[p] = 1
1076 req[p] = 1
1074 seen[n[0]] = 1
1077 seen[n[0]] = 1
1075
1078
1076 if r:
1079 if r:
1077 reqcnt += 1
1080 reqcnt += 1
1078 self.ui.debug(_("request %d: %s\n") %
1081 self.ui.debug(_("request %d: %s\n") %
1079 (reqcnt, " ".join(map(short, r))))
1082 (reqcnt, " ".join(map(short, r))))
1080 for p in range(0, len(r), 10):
1083 for p in range(0, len(r), 10):
1081 for b in remote.branches(r[p:p+10]):
1084 for b in remote.branches(r[p:p+10]):
1082 self.ui.debug(_("received %s:%s\n") %
1085 self.ui.debug(_("received %s:%s\n") %
1083 (short(b[0]), short(b[1])))
1086 (short(b[0]), short(b[1])))
1084 unknown.append(b)
1087 unknown.append(b)
1085
1088
1086 # do binary search on the branches we found
1089 # do binary search on the branches we found
1087 while search:
1090 while search:
1088 n = search.pop(0)
1091 n = search.pop(0)
1089 reqcnt += 1
1092 reqcnt += 1
1090 l = remote.between([(n[0], n[1])])[0]
1093 l = remote.between([(n[0], n[1])])[0]
1091 l.append(n[1])
1094 l.append(n[1])
1092 p = n[0]
1095 p = n[0]
1093 f = 1
1096 f = 1
1094 for i in l:
1097 for i in l:
1095 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1098 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1096 if i in m:
1099 if i in m:
1097 if f <= 2:
1100 if f <= 2:
1098 self.ui.debug(_("found new branch changeset %s\n") %
1101 self.ui.debug(_("found new branch changeset %s\n") %
1099 short(p))
1102 short(p))
1100 fetch[p] = 1
1103 fetch[p] = 1
1101 base[i] = 1
1104 base[i] = 1
1102 else:
1105 else:
1103 self.ui.debug(_("narrowed branch search to %s:%s\n")
1106 self.ui.debug(_("narrowed branch search to %s:%s\n")
1104 % (short(p), short(i)))
1107 % (short(p), short(i)))
1105 search.append((p, i))
1108 search.append((p, i))
1106 break
1109 break
1107 p, f = i, f * 2
1110 p, f = i, f * 2
1108
1111
1109 # sanity check our fetch list
1112 # sanity check our fetch list
1110 for f in fetch.keys():
1113 for f in fetch.keys():
1111 if f in m:
1114 if f in m:
1112 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1115 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1113
1116
1114 if base.keys() == [nullid]:
1117 if base.keys() == [nullid]:
1115 if force:
1118 if force:
1116 self.ui.warn(_("warning: repository is unrelated\n"))
1119 self.ui.warn(_("warning: repository is unrelated\n"))
1117 else:
1120 else:
1118 raise util.Abort(_("repository is unrelated"))
1121 raise util.Abort(_("repository is unrelated"))
1119
1122
1120 self.ui.debug(_("found new changesets starting at ") +
1123 self.ui.debug(_("found new changesets starting at ") +
1121 " ".join([short(f) for f in fetch]) + "\n")
1124 " ".join([short(f) for f in fetch]) + "\n")
1122
1125
1123 self.ui.debug(_("%d total queries\n") % reqcnt)
1126 self.ui.debug(_("%d total queries\n") % reqcnt)
1124
1127
1125 return fetch.keys()
1128 return fetch.keys()
1126
1129
1127 def findoutgoing(self, remote, base=None, heads=None, force=False):
1130 def findoutgoing(self, remote, base=None, heads=None, force=False):
1128 """Return list of nodes that are roots of subsets not in remote
1131 """Return list of nodes that are roots of subsets not in remote
1129
1132
1130 If base dict is specified, assume that these nodes and their parents
1133 If base dict is specified, assume that these nodes and their parents
1131 exist on the remote side.
1134 exist on the remote side.
1132 If a list of heads is specified, return only nodes which are heads
1135 If a list of heads is specified, return only nodes which are heads
1133 or ancestors of these heads, and return a second element which
1136 or ancestors of these heads, and return a second element which
1134 contains all remote heads which get new children.
1137 contains all remote heads which get new children.
1135 """
1138 """
1136 if base == None:
1139 if base == None:
1137 base = {}
1140 base = {}
1138 self.findincoming(remote, base, heads, force=force)
1141 self.findincoming(remote, base, heads, force=force)
1139
1142
1140 self.ui.debug(_("common changesets up to ")
1143 self.ui.debug(_("common changesets up to ")
1141 + " ".join(map(short, base.keys())) + "\n")
1144 + " ".join(map(short, base.keys())) + "\n")
1142
1145
1143 remain = dict.fromkeys(self.changelog.nodemap)
1146 remain = dict.fromkeys(self.changelog.nodemap)
1144
1147
1145 # prune everything remote has from the tree
1148 # prune everything remote has from the tree
1146 del remain[nullid]
1149 del remain[nullid]
1147 remove = base.keys()
1150 remove = base.keys()
1148 while remove:
1151 while remove:
1149 n = remove.pop(0)
1152 n = remove.pop(0)
1150 if n in remain:
1153 if n in remain:
1151 del remain[n]
1154 del remain[n]
1152 for p in self.changelog.parents(n):
1155 for p in self.changelog.parents(n):
1153 remove.append(p)
1156 remove.append(p)
1154
1157
1155 # find every node whose parents have been pruned
1158 # find every node whose parents have been pruned
1156 subset = []
1159 subset = []
1157 # find every remote head that will get new children
1160 # find every remote head that will get new children
1158 updated_heads = {}
1161 updated_heads = {}
1159 for n in remain:
1162 for n in remain:
1160 p1, p2 = self.changelog.parents(n)
1163 p1, p2 = self.changelog.parents(n)
1161 if p1 not in remain and p2 not in remain:
1164 if p1 not in remain and p2 not in remain:
1162 subset.append(n)
1165 subset.append(n)
1163 if heads:
1166 if heads:
1164 if p1 in heads:
1167 if p1 in heads:
1165 updated_heads[p1] = True
1168 updated_heads[p1] = True
1166 if p2 in heads:
1169 if p2 in heads:
1167 updated_heads[p2] = True
1170 updated_heads[p2] = True
1168
1171
1169 # this is the set of all roots we have to push
1172 # this is the set of all roots we have to push
1170 if heads:
1173 if heads:
1171 return subset, updated_heads.keys()
1174 return subset, updated_heads.keys()
1172 else:
1175 else:
1173 return subset
1176 return subset
1174
1177
1175 def pull(self, remote, heads=None, force=False, lock=None):
1178 def pull(self, remote, heads=None, force=False, lock=None):
1176 mylock = False
1179 mylock = False
1177 if not lock:
1180 if not lock:
1178 lock = self.lock()
1181 lock = self.lock()
1179 mylock = True
1182 mylock = True
1180
1183
1181 try:
1184 try:
1182 fetch = self.findincoming(remote, force=force)
1185 fetch = self.findincoming(remote, force=force)
1183 if fetch == [nullid]:
1186 if fetch == [nullid]:
1184 self.ui.status(_("requesting all changes\n"))
1187 self.ui.status(_("requesting all changes\n"))
1185
1188
1186 if not fetch:
1189 if not fetch:
1187 self.ui.status(_("no changes found\n"))
1190 self.ui.status(_("no changes found\n"))
1188 return 0
1191 return 0
1189
1192
1190 if heads is None:
1193 if heads is None:
1191 cg = remote.changegroup(fetch, 'pull')
1194 cg = remote.changegroup(fetch, 'pull')
1192 else:
1195 else:
1193 cg = remote.changegroupsubset(fetch, heads, 'pull')
1196 cg = remote.changegroupsubset(fetch, heads, 'pull')
1194 return self.addchangegroup(cg, 'pull', remote.url())
1197 return self.addchangegroup(cg, 'pull', remote.url())
1195 finally:
1198 finally:
1196 if mylock:
1199 if mylock:
1197 lock.release()
1200 lock.release()
1198
1201
1199 def push(self, remote, force=False, revs=None):
1202 def push(self, remote, force=False, revs=None):
1200 # there are two ways to push to remote repo:
1203 # there are two ways to push to remote repo:
1201 #
1204 #
1202 # addchangegroup assumes local user can lock remote
1205 # addchangegroup assumes local user can lock remote
1203 # repo (local filesystem, old ssh servers).
1206 # repo (local filesystem, old ssh servers).
1204 #
1207 #
1205 # unbundle assumes local user cannot lock remote repo (new ssh
1208 # unbundle assumes local user cannot lock remote repo (new ssh
1206 # servers, http servers).
1209 # servers, http servers).
1207
1210
1208 if remote.capable('unbundle'):
1211 if remote.capable('unbundle'):
1209 return self.push_unbundle(remote, force, revs)
1212 return self.push_unbundle(remote, force, revs)
1210 return self.push_addchangegroup(remote, force, revs)
1213 return self.push_addchangegroup(remote, force, revs)
1211
1214
1212 def prepush(self, remote, force, revs):
1215 def prepush(self, remote, force, revs):
1213 base = {}
1216 base = {}
1214 remote_heads = remote.heads()
1217 remote_heads = remote.heads()
1215 inc = self.findincoming(remote, base, remote_heads, force=force)
1218 inc = self.findincoming(remote, base, remote_heads, force=force)
1216 if not force and inc:
1219 if not force and inc:
1217 self.ui.warn(_("abort: unsynced remote changes!\n"))
1220 self.ui.warn(_("abort: unsynced remote changes!\n"))
1218 self.ui.status(_("(did you forget to sync?"
1221 self.ui.status(_("(did you forget to sync?"
1219 " use push -f to force)\n"))
1222 " use push -f to force)\n"))
1220 return None, 1
1223 return None, 1
1221
1224
1222 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1225 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1223 if revs is not None:
1226 if revs is not None:
1224 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1227 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1225 else:
1228 else:
1226 bases, heads = update, self.changelog.heads()
1229 bases, heads = update, self.changelog.heads()
1227
1230
1228 if not bases:
1231 if not bases:
1229 self.ui.status(_("no changes found\n"))
1232 self.ui.status(_("no changes found\n"))
1230 return None, 1
1233 return None, 1
1231 elif not force:
1234 elif not force:
1232 # FIXME we don't properly detect creation of new heads
1235 # FIXME we don't properly detect creation of new heads
1233 # in the push -r case, assume the user knows what he's doing
1236 # in the push -r case, assume the user knows what he's doing
1234 if not revs and len(remote_heads) < len(heads) \
1237 if not revs and len(remote_heads) < len(heads) \
1235 and remote_heads != [nullid]:
1238 and remote_heads != [nullid]:
1236 self.ui.warn(_("abort: push creates new remote branches!\n"))
1239 self.ui.warn(_("abort: push creates new remote branches!\n"))
1237 self.ui.status(_("(did you forget to merge?"
1240 self.ui.status(_("(did you forget to merge?"
1238 " use push -f to force)\n"))
1241 " use push -f to force)\n"))
1239 return None, 1
1242 return None, 1
1240
1243
1241 if revs is None:
1244 if revs is None:
1242 cg = self.changegroup(update, 'push')
1245 cg = self.changegroup(update, 'push')
1243 else:
1246 else:
1244 cg = self.changegroupsubset(update, revs, 'push')
1247 cg = self.changegroupsubset(update, revs, 'push')
1245 return cg, remote_heads
1248 return cg, remote_heads
1246
1249
1247 def push_addchangegroup(self, remote, force, revs):
1250 def push_addchangegroup(self, remote, force, revs):
1248 lock = remote.lock()
1251 lock = remote.lock()
1249
1252
1250 ret = self.prepush(remote, force, revs)
1253 ret = self.prepush(remote, force, revs)
1251 if ret[0] is not None:
1254 if ret[0] is not None:
1252 cg, remote_heads = ret
1255 cg, remote_heads = ret
1253 return remote.addchangegroup(cg, 'push', self.url())
1256 return remote.addchangegroup(cg, 'push', self.url())
1254 return ret[1]
1257 return ret[1]
1255
1258
1256 def push_unbundle(self, remote, force, revs):
1259 def push_unbundle(self, remote, force, revs):
1257 # local repo finds heads on server, finds out what revs it
1260 # local repo finds heads on server, finds out what revs it
1258 # must push. once revs transferred, if server finds it has
1261 # must push. once revs transferred, if server finds it has
1259 # different heads (someone else won commit/push race), server
1262 # different heads (someone else won commit/push race), server
1260 # aborts.
1263 # aborts.
1261
1264
1262 ret = self.prepush(remote, force, revs)
1265 ret = self.prepush(remote, force, revs)
1263 if ret[0] is not None:
1266 if ret[0] is not None:
1264 cg, remote_heads = ret
1267 cg, remote_heads = ret
1265 if force: remote_heads = ['force']
1268 if force: remote_heads = ['force']
1266 return remote.unbundle(cg, remote_heads, 'push')
1269 return remote.unbundle(cg, remote_heads, 'push')
1267 return ret[1]
1270 return ret[1]
1268
1271
1269 def changegroupsubset(self, bases, heads, source):
1272 def changegroupsubset(self, bases, heads, source):
1270 """This function generates a changegroup consisting of all the nodes
1273 """This function generates a changegroup consisting of all the nodes
1271 that are descendents of any of the bases, and ancestors of any of
1274 that are descendents of any of the bases, and ancestors of any of
1272 the heads.
1275 the heads.
1273
1276
1274 It is fairly complex as determining which filenodes and which
1277 It is fairly complex as determining which filenodes and which
1275 manifest nodes need to be included for the changeset to be complete
1278 manifest nodes need to be included for the changeset to be complete
1276 is non-trivial.
1279 is non-trivial.
1277
1280
1278 Another wrinkle is doing the reverse, figuring out which changeset in
1281 Another wrinkle is doing the reverse, figuring out which changeset in
1279 the changegroup a particular filenode or manifestnode belongs to."""
1282 the changegroup a particular filenode or manifestnode belongs to."""
1280
1283
1281 self.hook('preoutgoing', throw=True, source=source)
1284 self.hook('preoutgoing', throw=True, source=source)
1282
1285
1283 # Set up some initial variables
1286 # Set up some initial variables
1284 # Make it easy to refer to self.changelog
1287 # Make it easy to refer to self.changelog
1285 cl = self.changelog
1288 cl = self.changelog
1286 # msng is short for missing - compute the list of changesets in this
1289 # msng is short for missing - compute the list of changesets in this
1287 # changegroup.
1290 # changegroup.
1288 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1291 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1289 # Some bases may turn out to be superfluous, and some heads may be
1292 # Some bases may turn out to be superfluous, and some heads may be
1290 # too. nodesbetween will return the minimal set of bases and heads
1293 # too. nodesbetween will return the minimal set of bases and heads
1291 # necessary to re-create the changegroup.
1294 # necessary to re-create the changegroup.
1292
1295
1293 # Known heads are the list of heads that it is assumed the recipient
1296 # Known heads are the list of heads that it is assumed the recipient
1294 # of this changegroup will know about.
1297 # of this changegroup will know about.
1295 knownheads = {}
1298 knownheads = {}
1296 # We assume that all parents of bases are known heads.
1299 # We assume that all parents of bases are known heads.
1297 for n in bases:
1300 for n in bases:
1298 for p in cl.parents(n):
1301 for p in cl.parents(n):
1299 if p != nullid:
1302 if p != nullid:
1300 knownheads[p] = 1
1303 knownheads[p] = 1
1301 knownheads = knownheads.keys()
1304 knownheads = knownheads.keys()
1302 if knownheads:
1305 if knownheads:
1303 # Now that we know what heads are known, we can compute which
1306 # Now that we know what heads are known, we can compute which
1304 # changesets are known. The recipient must know about all
1307 # changesets are known. The recipient must know about all
1305 # changesets required to reach the known heads from the null
1308 # changesets required to reach the known heads from the null
1306 # changeset.
1309 # changeset.
1307 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1310 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1308 junk = None
1311 junk = None
1309 # Transform the list into an ersatz set.
1312 # Transform the list into an ersatz set.
1310 has_cl_set = dict.fromkeys(has_cl_set)
1313 has_cl_set = dict.fromkeys(has_cl_set)
1311 else:
1314 else:
1312 # If there were no known heads, the recipient cannot be assumed to
1315 # If there were no known heads, the recipient cannot be assumed to
1313 # know about any changesets.
1316 # know about any changesets.
1314 has_cl_set = {}
1317 has_cl_set = {}
1315
1318
1316 # Make it easy to refer to self.manifest
1319 # Make it easy to refer to self.manifest
1317 mnfst = self.manifest
1320 mnfst = self.manifest
1318 # We don't know which manifests are missing yet
1321 # We don't know which manifests are missing yet
1319 msng_mnfst_set = {}
1322 msng_mnfst_set = {}
1320 # Nor do we know which filenodes are missing.
1323 # Nor do we know which filenodes are missing.
1321 msng_filenode_set = {}
1324 msng_filenode_set = {}
1322
1325
1323 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1326 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1324 junk = None
1327 junk = None
1325
1328
1326 # A changeset always belongs to itself, so the changenode lookup
1329 # A changeset always belongs to itself, so the changenode lookup
1327 # function for a changenode is identity.
1330 # function for a changenode is identity.
1328 def identity(x):
1331 def identity(x):
1329 return x
1332 return x
1330
1333
1331 # A function generating function. Sets up an environment for the
1334 # A function generating function. Sets up an environment for the
1332 # inner function.
1335 # inner function.
1333 def cmp_by_rev_func(revlog):
1336 def cmp_by_rev_func(revlog):
1334 # Compare two nodes by their revision number in the environment's
1337 # Compare two nodes by their revision number in the environment's
1335 # revision history. Since the revision number both represents the
1338 # revision history. Since the revision number both represents the
1336 # most efficient order to read the nodes in, and represents a
1339 # most efficient order to read the nodes in, and represents a
1337 # topological sorting of the nodes, this function is often useful.
1340 # topological sorting of the nodes, this function is often useful.
1338 def cmp_by_rev(a, b):
1341 def cmp_by_rev(a, b):
1339 return cmp(revlog.rev(a), revlog.rev(b))
1342 return cmp(revlog.rev(a), revlog.rev(b))
1340 return cmp_by_rev
1343 return cmp_by_rev
1341
1344
1342 # If we determine that a particular file or manifest node must be a
1345 # If we determine that a particular file or manifest node must be a
1343 # node that the recipient of the changegroup will already have, we can
1346 # node that the recipient of the changegroup will already have, we can
1344 # also assume the recipient will have all the parents. This function
1347 # also assume the recipient will have all the parents. This function
1345 # prunes them from the set of missing nodes.
1348 # prunes them from the set of missing nodes.
1346 def prune_parents(revlog, hasset, msngset):
1349 def prune_parents(revlog, hasset, msngset):
1347 haslst = hasset.keys()
1350 haslst = hasset.keys()
1348 haslst.sort(cmp_by_rev_func(revlog))
1351 haslst.sort(cmp_by_rev_func(revlog))
1349 for node in haslst:
1352 for node in haslst:
1350 parentlst = [p for p in revlog.parents(node) if p != nullid]
1353 parentlst = [p for p in revlog.parents(node) if p != nullid]
1351 while parentlst:
1354 while parentlst:
1352 n = parentlst.pop()
1355 n = parentlst.pop()
1353 if n not in hasset:
1356 if n not in hasset:
1354 hasset[n] = 1
1357 hasset[n] = 1
1355 p = [p for p in revlog.parents(n) if p != nullid]
1358 p = [p for p in revlog.parents(n) if p != nullid]
1356 parentlst.extend(p)
1359 parentlst.extend(p)
1357 for n in hasset:
1360 for n in hasset:
1358 msngset.pop(n, None)
1361 msngset.pop(n, None)
1359
1362
1360 # This is a function generating function used to set up an environment
1363 # This is a function generating function used to set up an environment
1361 # for the inner function to execute in.
1364 # for the inner function to execute in.
1362 def manifest_and_file_collector(changedfileset):
1365 def manifest_and_file_collector(changedfileset):
1363 # This is an information gathering function that gathers
1366 # This is an information gathering function that gathers
1364 # information from each changeset node that goes out as part of
1367 # information from each changeset node that goes out as part of
1365 # the changegroup. The information gathered is a list of which
1368 # the changegroup. The information gathered is a list of which
1366 # manifest nodes are potentially required (the recipient may
1369 # manifest nodes are potentially required (the recipient may
1367 # already have them) and total list of all files which were
1370 # already have them) and total list of all files which were
1368 # changed in any changeset in the changegroup.
1371 # changed in any changeset in the changegroup.
1369 #
1372 #
1370 # We also remember the first changenode we saw any manifest
1373 # We also remember the first changenode we saw any manifest
1371 # referenced by so we can later determine which changenode 'owns'
1374 # referenced by so we can later determine which changenode 'owns'
1372 # the manifest.
1375 # the manifest.
1373 def collect_manifests_and_files(clnode):
1376 def collect_manifests_and_files(clnode):
1374 c = cl.read(clnode)
1377 c = cl.read(clnode)
1375 for f in c[3]:
1378 for f in c[3]:
1376 # This is to make sure we only have one instance of each
1379 # This is to make sure we only have one instance of each
1377 # filename string for each filename.
1380 # filename string for each filename.
1378 changedfileset.setdefault(f, f)
1381 changedfileset.setdefault(f, f)
1379 msng_mnfst_set.setdefault(c[0], clnode)
1382 msng_mnfst_set.setdefault(c[0], clnode)
1380 return collect_manifests_and_files
1383 return collect_manifests_and_files
1381
1384
1382 # Figure out which manifest nodes (of the ones we think might be part
1385 # Figure out which manifest nodes (of the ones we think might be part
1383 # of the changegroup) the recipient must know about and remove them
1386 # of the changegroup) the recipient must know about and remove them
1384 # from the changegroup.
1387 # from the changegroup.
1385 def prune_manifests():
1388 def prune_manifests():
1386 has_mnfst_set = {}
1389 has_mnfst_set = {}
1387 for n in msng_mnfst_set:
1390 for n in msng_mnfst_set:
1388 # If a 'missing' manifest thinks it belongs to a changenode
1391 # If a 'missing' manifest thinks it belongs to a changenode
1389 # the recipient is assumed to have, obviously the recipient
1392 # the recipient is assumed to have, obviously the recipient
1390 # must have that manifest.
1393 # must have that manifest.
1391 linknode = cl.node(mnfst.linkrev(n))
1394 linknode = cl.node(mnfst.linkrev(n))
1392 if linknode in has_cl_set:
1395 if linknode in has_cl_set:
1393 has_mnfst_set[n] = 1
1396 has_mnfst_set[n] = 1
1394 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1397 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1395
1398
1396 # Use the information collected in collect_manifests_and_files to say
1399 # Use the information collected in collect_manifests_and_files to say
1397 # which changenode any manifestnode belongs to.
1400 # which changenode any manifestnode belongs to.
1398 def lookup_manifest_link(mnfstnode):
1401 def lookup_manifest_link(mnfstnode):
1399 return msng_mnfst_set[mnfstnode]
1402 return msng_mnfst_set[mnfstnode]
1400
1403
1401 # A function generating function that sets up the initial environment
1404 # A function generating function that sets up the initial environment
1402 # the inner function.
1405 # the inner function.
1403 def filenode_collector(changedfiles):
1406 def filenode_collector(changedfiles):
1404 next_rev = [0]
1407 next_rev = [0]
1405 # This gathers information from each manifestnode included in the
1408 # This gathers information from each manifestnode included in the
1406 # changegroup about which filenodes the manifest node references
1409 # changegroup about which filenodes the manifest node references
1407 # so we can include those in the changegroup too.
1410 # so we can include those in the changegroup too.
1408 #
1411 #
1409 # It also remembers which changenode each filenode belongs to. It
1412 # It also remembers which changenode each filenode belongs to. It
1410 # does this by assuming the a filenode belongs to the changenode
1413 # does this by assuming the a filenode belongs to the changenode
1411 # the first manifest that references it belongs to.
1414 # the first manifest that references it belongs to.
1412 def collect_msng_filenodes(mnfstnode):
1415 def collect_msng_filenodes(mnfstnode):
1413 r = mnfst.rev(mnfstnode)
1416 r = mnfst.rev(mnfstnode)
1414 if r == next_rev[0]:
1417 if r == next_rev[0]:
1415 # If the last rev we looked at was the one just previous,
1418 # If the last rev we looked at was the one just previous,
1416 # we only need to see a diff.
1419 # we only need to see a diff.
1417 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1420 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1418 # For each line in the delta
1421 # For each line in the delta
1419 for dline in delta.splitlines():
1422 for dline in delta.splitlines():
1420 # get the filename and filenode for that line
1423 # get the filename and filenode for that line
1421 f, fnode = dline.split('\0')
1424 f, fnode = dline.split('\0')
1422 fnode = bin(fnode[:40])
1425 fnode = bin(fnode[:40])
1423 f = changedfiles.get(f, None)
1426 f = changedfiles.get(f, None)
1424 # And if the file is in the list of files we care
1427 # And if the file is in the list of files we care
1425 # about.
1428 # about.
1426 if f is not None:
1429 if f is not None:
1427 # Get the changenode this manifest belongs to
1430 # Get the changenode this manifest belongs to
1428 clnode = msng_mnfst_set[mnfstnode]
1431 clnode = msng_mnfst_set[mnfstnode]
1429 # Create the set of filenodes for the file if
1432 # Create the set of filenodes for the file if
1430 # there isn't one already.
1433 # there isn't one already.
1431 ndset = msng_filenode_set.setdefault(f, {})
1434 ndset = msng_filenode_set.setdefault(f, {})
1432 # And set the filenode's changelog node to the
1435 # And set the filenode's changelog node to the
1433 # manifest's if it hasn't been set already.
1436 # manifest's if it hasn't been set already.
1434 ndset.setdefault(fnode, clnode)
1437 ndset.setdefault(fnode, clnode)
1435 else:
1438 else:
1436 # Otherwise we need a full manifest.
1439 # Otherwise we need a full manifest.
1437 m = mnfst.read(mnfstnode)
1440 m = mnfst.read(mnfstnode)
1438 # For every file in we care about.
1441 # For every file in we care about.
1439 for f in changedfiles:
1442 for f in changedfiles:
1440 fnode = m.get(f, None)
1443 fnode = m.get(f, None)
1441 # If it's in the manifest
1444 # If it's in the manifest
1442 if fnode is not None:
1445 if fnode is not None:
1443 # See comments above.
1446 # See comments above.
1444 clnode = msng_mnfst_set[mnfstnode]
1447 clnode = msng_mnfst_set[mnfstnode]
1445 ndset = msng_filenode_set.setdefault(f, {})
1448 ndset = msng_filenode_set.setdefault(f, {})
1446 ndset.setdefault(fnode, clnode)
1449 ndset.setdefault(fnode, clnode)
1447 # Remember the revision we hope to see next.
1450 # Remember the revision we hope to see next.
1448 next_rev[0] = r + 1
1451 next_rev[0] = r + 1
1449 return collect_msng_filenodes
1452 return collect_msng_filenodes
1450
1453
1451 # We have a list of filenodes we think we need for a file, lets remove
1454 # We have a list of filenodes we think we need for a file, lets remove
1452 # all those we now the recipient must have.
1455 # all those we now the recipient must have.
1453 def prune_filenodes(f, filerevlog):
1456 def prune_filenodes(f, filerevlog):
1454 msngset = msng_filenode_set[f]
1457 msngset = msng_filenode_set[f]
1455 hasset = {}
1458 hasset = {}
1456 # If a 'missing' filenode thinks it belongs to a changenode we
1459 # If a 'missing' filenode thinks it belongs to a changenode we
1457 # assume the recipient must have, then the recipient must have
1460 # assume the recipient must have, then the recipient must have
1458 # that filenode.
1461 # that filenode.
1459 for n in msngset:
1462 for n in msngset:
1460 clnode = cl.node(filerevlog.linkrev(n))
1463 clnode = cl.node(filerevlog.linkrev(n))
1461 if clnode in has_cl_set:
1464 if clnode in has_cl_set:
1462 hasset[n] = 1
1465 hasset[n] = 1
1463 prune_parents(filerevlog, hasset, msngset)
1466 prune_parents(filerevlog, hasset, msngset)
1464
1467
1465 # A function generator function that sets up the a context for the
1468 # A function generator function that sets up the a context for the
1466 # inner function.
1469 # inner function.
1467 def lookup_filenode_link_func(fname):
1470 def lookup_filenode_link_func(fname):
1468 msngset = msng_filenode_set[fname]
1471 msngset = msng_filenode_set[fname]
1469 # Lookup the changenode the filenode belongs to.
1472 # Lookup the changenode the filenode belongs to.
1470 def lookup_filenode_link(fnode):
1473 def lookup_filenode_link(fnode):
1471 return msngset[fnode]
1474 return msngset[fnode]
1472 return lookup_filenode_link
1475 return lookup_filenode_link
1473
1476
1474 # Now that we have all theses utility functions to help out and
1477 # Now that we have all theses utility functions to help out and
1475 # logically divide up the task, generate the group.
1478 # logically divide up the task, generate the group.
1476 def gengroup():
1479 def gengroup():
1477 # The set of changed files starts empty.
1480 # The set of changed files starts empty.
1478 changedfiles = {}
1481 changedfiles = {}
1479 # Create a changenode group generator that will call our functions
1482 # Create a changenode group generator that will call our functions
1480 # back to lookup the owning changenode and collect information.
1483 # back to lookup the owning changenode and collect information.
1481 group = cl.group(msng_cl_lst, identity,
1484 group = cl.group(msng_cl_lst, identity,
1482 manifest_and_file_collector(changedfiles))
1485 manifest_and_file_collector(changedfiles))
1483 for chnk in group:
1486 for chnk in group:
1484 yield chnk
1487 yield chnk
1485
1488
1486 # The list of manifests has been collected by the generator
1489 # The list of manifests has been collected by the generator
1487 # calling our functions back.
1490 # calling our functions back.
1488 prune_manifests()
1491 prune_manifests()
1489 msng_mnfst_lst = msng_mnfst_set.keys()
1492 msng_mnfst_lst = msng_mnfst_set.keys()
1490 # Sort the manifestnodes by revision number.
1493 # Sort the manifestnodes by revision number.
1491 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1494 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1492 # Create a generator for the manifestnodes that calls our lookup
1495 # Create a generator for the manifestnodes that calls our lookup
1493 # and data collection functions back.
1496 # and data collection functions back.
1494 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1497 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1495 filenode_collector(changedfiles))
1498 filenode_collector(changedfiles))
1496 for chnk in group:
1499 for chnk in group:
1497 yield chnk
1500 yield chnk
1498
1501
1499 # These are no longer needed, dereference and toss the memory for
1502 # These are no longer needed, dereference and toss the memory for
1500 # them.
1503 # them.
1501 msng_mnfst_lst = None
1504 msng_mnfst_lst = None
1502 msng_mnfst_set.clear()
1505 msng_mnfst_set.clear()
1503
1506
1504 changedfiles = changedfiles.keys()
1507 changedfiles = changedfiles.keys()
1505 changedfiles.sort()
1508 changedfiles.sort()
1506 # Go through all our files in order sorted by name.
1509 # Go through all our files in order sorted by name.
1507 for fname in changedfiles:
1510 for fname in changedfiles:
1508 filerevlog = self.file(fname)
1511 filerevlog = self.file(fname)
1509 # Toss out the filenodes that the recipient isn't really
1512 # Toss out the filenodes that the recipient isn't really
1510 # missing.
1513 # missing.
1511 if msng_filenode_set.has_key(fname):
1514 if msng_filenode_set.has_key(fname):
1512 prune_filenodes(fname, filerevlog)
1515 prune_filenodes(fname, filerevlog)
1513 msng_filenode_lst = msng_filenode_set[fname].keys()
1516 msng_filenode_lst = msng_filenode_set[fname].keys()
1514 else:
1517 else:
1515 msng_filenode_lst = []
1518 msng_filenode_lst = []
1516 # If any filenodes are left, generate the group for them,
1519 # If any filenodes are left, generate the group for them,
1517 # otherwise don't bother.
1520 # otherwise don't bother.
1518 if len(msng_filenode_lst) > 0:
1521 if len(msng_filenode_lst) > 0:
1519 yield changegroup.genchunk(fname)
1522 yield changegroup.genchunk(fname)
1520 # Sort the filenodes by their revision #
1523 # Sort the filenodes by their revision #
1521 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1524 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1522 # Create a group generator and only pass in a changenode
1525 # Create a group generator and only pass in a changenode
1523 # lookup function as we need to collect no information
1526 # lookup function as we need to collect no information
1524 # from filenodes.
1527 # from filenodes.
1525 group = filerevlog.group(msng_filenode_lst,
1528 group = filerevlog.group(msng_filenode_lst,
1526 lookup_filenode_link_func(fname))
1529 lookup_filenode_link_func(fname))
1527 for chnk in group:
1530 for chnk in group:
1528 yield chnk
1531 yield chnk
1529 if msng_filenode_set.has_key(fname):
1532 if msng_filenode_set.has_key(fname):
1530 # Don't need this anymore, toss it to free memory.
1533 # Don't need this anymore, toss it to free memory.
1531 del msng_filenode_set[fname]
1534 del msng_filenode_set[fname]
1532 # Signal that no more groups are left.
1535 # Signal that no more groups are left.
1533 yield changegroup.closechunk()
1536 yield changegroup.closechunk()
1534
1537
1535 if msng_cl_lst:
1538 if msng_cl_lst:
1536 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1539 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1537
1540
1538 return util.chunkbuffer(gengroup())
1541 return util.chunkbuffer(gengroup())
1539
1542
1540 def changegroup(self, basenodes, source):
1543 def changegroup(self, basenodes, source):
1541 """Generate a changegroup of all nodes that we have that a recipient
1544 """Generate a changegroup of all nodes that we have that a recipient
1542 doesn't.
1545 doesn't.
1543
1546
1544 This is much easier than the previous function as we can assume that
1547 This is much easier than the previous function as we can assume that
1545 the recipient has any changenode we aren't sending them."""
1548 the recipient has any changenode we aren't sending them."""
1546
1549
1547 self.hook('preoutgoing', throw=True, source=source)
1550 self.hook('preoutgoing', throw=True, source=source)
1548
1551
1549 cl = self.changelog
1552 cl = self.changelog
1550 nodes = cl.nodesbetween(basenodes, None)[0]
1553 nodes = cl.nodesbetween(basenodes, None)[0]
1551 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1554 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1552
1555
1553 def identity(x):
1556 def identity(x):
1554 return x
1557 return x
1555
1558
1556 def gennodelst(revlog):
1559 def gennodelst(revlog):
1557 for r in xrange(0, revlog.count()):
1560 for r in xrange(0, revlog.count()):
1558 n = revlog.node(r)
1561 n = revlog.node(r)
1559 if revlog.linkrev(n) in revset:
1562 if revlog.linkrev(n) in revset:
1560 yield n
1563 yield n
1561
1564
1562 def changed_file_collector(changedfileset):
1565 def changed_file_collector(changedfileset):
1563 def collect_changed_files(clnode):
1566 def collect_changed_files(clnode):
1564 c = cl.read(clnode)
1567 c = cl.read(clnode)
1565 for fname in c[3]:
1568 for fname in c[3]:
1566 changedfileset[fname] = 1
1569 changedfileset[fname] = 1
1567 return collect_changed_files
1570 return collect_changed_files
1568
1571
1569 def lookuprevlink_func(revlog):
1572 def lookuprevlink_func(revlog):
1570 def lookuprevlink(n):
1573 def lookuprevlink(n):
1571 return cl.node(revlog.linkrev(n))
1574 return cl.node(revlog.linkrev(n))
1572 return lookuprevlink
1575 return lookuprevlink
1573
1576
1574 def gengroup():
1577 def gengroup():
1575 # construct a list of all changed files
1578 # construct a list of all changed files
1576 changedfiles = {}
1579 changedfiles = {}
1577
1580
1578 for chnk in cl.group(nodes, identity,
1581 for chnk in cl.group(nodes, identity,
1579 changed_file_collector(changedfiles)):
1582 changed_file_collector(changedfiles)):
1580 yield chnk
1583 yield chnk
1581 changedfiles = changedfiles.keys()
1584 changedfiles = changedfiles.keys()
1582 changedfiles.sort()
1585 changedfiles.sort()
1583
1586
1584 mnfst = self.manifest
1587 mnfst = self.manifest
1585 nodeiter = gennodelst(mnfst)
1588 nodeiter = gennodelst(mnfst)
1586 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1589 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1587 yield chnk
1590 yield chnk
1588
1591
1589 for fname in changedfiles:
1592 for fname in changedfiles:
1590 filerevlog = self.file(fname)
1593 filerevlog = self.file(fname)
1591 nodeiter = gennodelst(filerevlog)
1594 nodeiter = gennodelst(filerevlog)
1592 nodeiter = list(nodeiter)
1595 nodeiter = list(nodeiter)
1593 if nodeiter:
1596 if nodeiter:
1594 yield changegroup.genchunk(fname)
1597 yield changegroup.genchunk(fname)
1595 lookup = lookuprevlink_func(filerevlog)
1598 lookup = lookuprevlink_func(filerevlog)
1596 for chnk in filerevlog.group(nodeiter, lookup):
1599 for chnk in filerevlog.group(nodeiter, lookup):
1597 yield chnk
1600 yield chnk
1598
1601
1599 yield changegroup.closechunk()
1602 yield changegroup.closechunk()
1600
1603
1601 if nodes:
1604 if nodes:
1602 self.hook('outgoing', node=hex(nodes[0]), source=source)
1605 self.hook('outgoing', node=hex(nodes[0]), source=source)
1603
1606
1604 return util.chunkbuffer(gengroup())
1607 return util.chunkbuffer(gengroup())
1605
1608
1606 def addchangegroup(self, source, srctype, url):
1609 def addchangegroup(self, source, srctype, url):
1607 """add changegroup to repo.
1610 """add changegroup to repo.
1608 returns number of heads modified or added + 1."""
1611 returns number of heads modified or added + 1."""
1609
1612
1610 def csmap(x):
1613 def csmap(x):
1611 self.ui.debug(_("add changeset %s\n") % short(x))
1614 self.ui.debug(_("add changeset %s\n") % short(x))
1612 return cl.count()
1615 return cl.count()
1613
1616
1614 def revmap(x):
1617 def revmap(x):
1615 return cl.rev(x)
1618 return cl.rev(x)
1616
1619
1617 if not source:
1620 if not source:
1618 return 0
1621 return 0
1619
1622
1620 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1623 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1621
1624
1622 changesets = files = revisions = 0
1625 changesets = files = revisions = 0
1623
1626
1624 tr = self.transaction()
1627 tr = self.transaction()
1625
1628
1626 # write changelog data to temp files so concurrent readers will not see
1629 # write changelog data to temp files so concurrent readers will not see
1627 # inconsistent view
1630 # inconsistent view
1628 cl = None
1631 cl = None
1629 try:
1632 try:
1630 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1633 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1631
1634
1632 oldheads = len(cl.heads())
1635 oldheads = len(cl.heads())
1633
1636
1634 # pull off the changeset group
1637 # pull off the changeset group
1635 self.ui.status(_("adding changesets\n"))
1638 self.ui.status(_("adding changesets\n"))
1636 cor = cl.count() - 1
1639 cor = cl.count() - 1
1637 chunkiter = changegroup.chunkiter(source)
1640 chunkiter = changegroup.chunkiter(source)
1638 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1641 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1639 raise util.Abort(_("received changelog group is empty"))
1642 raise util.Abort(_("received changelog group is empty"))
1640 cnr = cl.count() - 1
1643 cnr = cl.count() - 1
1641 changesets = cnr - cor
1644 changesets = cnr - cor
1642
1645
1643 # pull off the manifest group
1646 # pull off the manifest group
1644 self.ui.status(_("adding manifests\n"))
1647 self.ui.status(_("adding manifests\n"))
1645 chunkiter = changegroup.chunkiter(source)
1648 chunkiter = changegroup.chunkiter(source)
1646 # no need to check for empty manifest group here:
1649 # no need to check for empty manifest group here:
1647 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1650 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1648 # no new manifest will be created and the manifest group will
1651 # no new manifest will be created and the manifest group will
1649 # be empty during the pull
1652 # be empty during the pull
1650 self.manifest.addgroup(chunkiter, revmap, tr)
1653 self.manifest.addgroup(chunkiter, revmap, tr)
1651
1654
1652 # process the files
1655 # process the files
1653 self.ui.status(_("adding file changes\n"))
1656 self.ui.status(_("adding file changes\n"))
1654 while 1:
1657 while 1:
1655 f = changegroup.getchunk(source)
1658 f = changegroup.getchunk(source)
1656 if not f:
1659 if not f:
1657 break
1660 break
1658 self.ui.debug(_("adding %s revisions\n") % f)
1661 self.ui.debug(_("adding %s revisions\n") % f)
1659 fl = self.file(f)
1662 fl = self.file(f)
1660 o = fl.count()
1663 o = fl.count()
1661 chunkiter = changegroup.chunkiter(source)
1664 chunkiter = changegroup.chunkiter(source)
1662 if fl.addgroup(chunkiter, revmap, tr) is None:
1665 if fl.addgroup(chunkiter, revmap, tr) is None:
1663 raise util.Abort(_("received file revlog group is empty"))
1666 raise util.Abort(_("received file revlog group is empty"))
1664 revisions += fl.count() - o
1667 revisions += fl.count() - o
1665 files += 1
1668 files += 1
1666
1669
1667 cl.writedata()
1670 cl.writedata()
1668 finally:
1671 finally:
1669 if cl:
1672 if cl:
1670 cl.cleanup()
1673 cl.cleanup()
1671
1674
1672 # make changelog see real files again
1675 # make changelog see real files again
1673 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1676 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1674 self.changelog.checkinlinesize(tr)
1677 self.changelog.checkinlinesize(tr)
1675
1678
1676 newheads = len(self.changelog.heads())
1679 newheads = len(self.changelog.heads())
1677 heads = ""
1680 heads = ""
1678 if oldheads and newheads != oldheads:
1681 if oldheads and newheads != oldheads:
1679 heads = _(" (%+d heads)") % (newheads - oldheads)
1682 heads = _(" (%+d heads)") % (newheads - oldheads)
1680
1683
1681 self.ui.status(_("added %d changesets"
1684 self.ui.status(_("added %d changesets"
1682 " with %d changes to %d files%s\n")
1685 " with %d changes to %d files%s\n")
1683 % (changesets, revisions, files, heads))
1686 % (changesets, revisions, files, heads))
1684
1687
1685 if changesets > 0:
1688 if changesets > 0:
1686 self.hook('pretxnchangegroup', throw=True,
1689 self.hook('pretxnchangegroup', throw=True,
1687 node=hex(self.changelog.node(cor+1)), source=srctype,
1690 node=hex(self.changelog.node(cor+1)), source=srctype,
1688 url=url)
1691 url=url)
1689
1692
1690 tr.close()
1693 tr.close()
1691
1694
1692 if changesets > 0:
1695 if changesets > 0:
1693 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1696 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1694 source=srctype, url=url)
1697 source=srctype, url=url)
1695
1698
1696 for i in range(cor + 1, cnr + 1):
1699 for i in range(cor + 1, cnr + 1):
1697 self.hook("incoming", node=hex(self.changelog.node(i)),
1700 self.hook("incoming", node=hex(self.changelog.node(i)),
1698 source=srctype, url=url)
1701 source=srctype, url=url)
1699
1702
1700 return newheads - oldheads + 1
1703 return newheads - oldheads + 1
1701
1704
1702
1705
1703 def stream_in(self, remote):
1706 def stream_in(self, remote):
1704 fp = remote.stream_out()
1707 fp = remote.stream_out()
1705 resp = int(fp.readline())
1708 resp = int(fp.readline())
1706 if resp != 0:
1709 if resp != 0:
1707 raise util.Abort(_('operation forbidden by server'))
1710 raise util.Abort(_('operation forbidden by server'))
1708 self.ui.status(_('streaming all changes\n'))
1711 self.ui.status(_('streaming all changes\n'))
1709 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1712 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1710 self.ui.status(_('%d files to transfer, %s of data\n') %
1713 self.ui.status(_('%d files to transfer, %s of data\n') %
1711 (total_files, util.bytecount(total_bytes)))
1714 (total_files, util.bytecount(total_bytes)))
1712 start = time.time()
1715 start = time.time()
1713 for i in xrange(total_files):
1716 for i in xrange(total_files):
1714 name, size = fp.readline().split('\0', 1)
1717 name, size = fp.readline().split('\0', 1)
1715 size = int(size)
1718 size = int(size)
1716 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1719 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1717 ofp = self.opener(name, 'w')
1720 ofp = self.opener(name, 'w')
1718 for chunk in util.filechunkiter(fp, limit=size):
1721 for chunk in util.filechunkiter(fp, limit=size):
1719 ofp.write(chunk)
1722 ofp.write(chunk)
1720 ofp.close()
1723 ofp.close()
1721 elapsed = time.time() - start
1724 elapsed = time.time() - start
1722 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1725 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1723 (util.bytecount(total_bytes), elapsed,
1726 (util.bytecount(total_bytes), elapsed,
1724 util.bytecount(total_bytes / elapsed)))
1727 util.bytecount(total_bytes / elapsed)))
1725 self.reload()
1728 self.reload()
1726 return len(self.heads()) + 1
1729 return len(self.heads()) + 1
1727
1730
1728 def clone(self, remote, heads=[], stream=False):
1731 def clone(self, remote, heads=[], stream=False):
1729 '''clone remote repository.
1732 '''clone remote repository.
1730
1733
1731 keyword arguments:
1734 keyword arguments:
1732 heads: list of revs to clone (forces use of pull)
1735 heads: list of revs to clone (forces use of pull)
1733 stream: use streaming clone if possible'''
1736 stream: use streaming clone if possible'''
1734
1737
1735 # now, all clients that can request uncompressed clones can
1738 # now, all clients that can request uncompressed clones can
1736 # read repo formats supported by all servers that can serve
1739 # read repo formats supported by all servers that can serve
1737 # them.
1740 # them.
1738
1741
1739 # if revlog format changes, client will have to check version
1742 # if revlog format changes, client will have to check version
1740 # and format flags on "stream" capability, and use
1743 # and format flags on "stream" capability, and use
1741 # uncompressed only if compatible.
1744 # uncompressed only if compatible.
1742
1745
1743 if stream and not heads and remote.capable('stream'):
1746 if stream and not heads and remote.capable('stream'):
1744 return self.stream_in(remote)
1747 return self.stream_in(remote)
1745 return self.pull(remote, heads)
1748 return self.pull(remote, heads)
1746
1749
1747 # used to avoid circular references so destructors work
1750 # used to avoid circular references so destructors work
1748 def aftertrans(base):
1751 def aftertrans(base):
1749 p = base
1752 p = base
1750 def a():
1753 def a():
1751 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1754 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1752 util.rename(os.path.join(p, "journal.dirstate"),
1755 util.rename(os.path.join(p, "journal.dirstate"),
1753 os.path.join(p, "undo.dirstate"))
1756 os.path.join(p, "undo.dirstate"))
1754 return a
1757 return a
1755
1758
1756 def instance(ui, path, create):
1759 def instance(ui, path, create):
1757 return localrepository(ui, util.drop_scheme('file', path), create)
1760 return localrepository(ui, util.drop_scheme('file', path), create)
1758
1761
1759 def islocal(path):
1762 def islocal(path):
1760 return True
1763 return True
@@ -1,33 +1,33 b''
1 #!/bin/sh
1 #!/bin/sh
2 hg --debug init
2 hg --debug init
3 echo this is a1 > a
3 echo this is a1 > a
4 hg add a
4 hg add a
5 hg commit -m0 -d "1000000 0"
5 hg commit -m0 -d "1000000 0"
6 echo this is b1 > b
6 echo this is b1 > b
7 hg add b
7 hg add b
8 hg commit -m1 -d "1000000 0"
8 hg commit -m1 -d "1000000 0"
9 hg manifest 1
9 hg manifest 1
10 echo this is c1 > c
10 echo this is c1 > c
11 hg rawcommit -p 1 -d "1000000 0" -m2 c
11 hg rawcommit -p 1 -d "1000000 0" -m2 c
12 hg manifest 2
12 hg manifest 2
13 hg parents
13 hg -v parents
14 rm b
14 rm b
15 hg rawcommit -p 2 -d "1000000 0" -m3 b
15 hg rawcommit -p 2 -d "1000000 0" -m3 b
16 hg manifest 3
16 hg manifest 3
17 hg parents
17 hg -v parents
18 echo this is a22 > a
18 echo this is a22 > a
19 hg rawcommit -p 3 -d "1000000 0" -m4 a
19 hg rawcommit -p 3 -d "1000000 0" -m4 a
20 hg manifest 4
20 hg manifest 4
21 hg parents
21 hg -v parents
22 echo this is c22 > c
22 echo this is c22 > c
23 hg rawcommit -p 1 -d "1000000 0" -m5 c
23 hg rawcommit -p 1 -d "1000000 0" -m5 c
24 hg manifest 5
24 hg manifest 5
25 hg parents
25 hg -v parents
26 # merge, but no files changed
26 # merge, but no files changed
27 hg rawcommit -p 4 -p 5 -d "1000000 0" -m6
27 hg rawcommit -p 4 -p 5 -d "1000000 0" -m6
28 hg manifest 6
28 hg manifest 6
29 hg parents
29 hg -v parents
30 # no changes what-so-ever
30 # no changes what-so-ever
31 hg rawcommit -p 6 -d "1000000 0" -m7
31 hg rawcommit -p 6 -d "1000000 0" -m7
32 hg manifest 7
32 hg manifest 7
33 hg parents
33 hg -v parents
@@ -1,59 +1,77 b''
1 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
1 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
2 54837d97f2932a8194e69745a280a2c11e61ff9c 644 b
2 54837d97f2932a8194e69745a280a2c11e61ff9c 644 b
3 (the rawcommit command is deprecated)
3 (the rawcommit command is deprecated)
4 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
4 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
5 54837d97f2932a8194e69745a280a2c11e61ff9c 644 b
5 54837d97f2932a8194e69745a280a2c11e61ff9c 644 b
6 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
6 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
7 changeset: 2:e110db3db549
7 changeset: 2:e110db3db549
8 tag: tip
8 tag: tip
9 user: test
9 user: test
10 date: Mon Jan 12 13:46:40 1970 +0000
10 date: Mon Jan 12 13:46:40 1970 +0000
11 summary: 2
11 files: c
12 description:
13 2
14
12
15
13 (the rawcommit command is deprecated)
16 (the rawcommit command is deprecated)
14 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
17 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
15 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
18 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
16 changeset: 3:0f9843914735
19 changeset: 3:20652cf30cc0
17 tag: tip
20 tag: tip
18 user: test
21 user: test
19 date: Mon Jan 12 13:46:40 1970 +0000
22 date: Mon Jan 12 13:46:40 1970 +0000
20 summary: 3
23 files: b
24 description:
25 3
26
21
27
22 (the rawcommit command is deprecated)
28 (the rawcommit command is deprecated)
23 d6e3c4976c13feb1728cd3ac851abaf7256a5c23 644 a
29 d6e3c4976c13feb1728cd3ac851abaf7256a5c23 644 a
24 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
30 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
25 changeset: 4:909a3d1d3ee1
31 changeset: 4:42556b925639
26 tag: tip
32 tag: tip
27 user: test
33 user: test
28 date: Mon Jan 12 13:46:40 1970 +0000
34 date: Mon Jan 12 13:46:40 1970 +0000
29 summary: 4
35 files: a
36 description:
37 4
38
30
39
31 (the rawcommit command is deprecated)
40 (the rawcommit command is deprecated)
32 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
41 05f9e54f4c9b86b09099803d8b49a50edcb4eaab 644 a
33 54837d97f2932a8194e69745a280a2c11e61ff9c 644 b
42 54837d97f2932a8194e69745a280a2c11e61ff9c 644 b
34 3570202ceac2b52517df64ebd0a062cb0d8fe33a 644 c
43 3570202ceac2b52517df64ebd0a062cb0d8fe33a 644 c
35 changeset: 4:909a3d1d3ee1
44 changeset: 4:42556b925639
36 user: test
45 user: test
37 date: Mon Jan 12 13:46:40 1970 +0000
46 date: Mon Jan 12 13:46:40 1970 +0000
38 summary: 4
47 files: a
48 description:
49 4
50
39
51
40 (the rawcommit command is deprecated)
52 (the rawcommit command is deprecated)
41 d6e3c4976c13feb1728cd3ac851abaf7256a5c23 644 a
53 d6e3c4976c13feb1728cd3ac851abaf7256a5c23 644 a
42 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
54 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
43 changeset: 6:725fdd0728db
55 changeset: 6:8a0c9254b0ab
44 tag: tip
56 tag: tip
45 parent: 4:909a3d1d3ee1
57 parent: 4:42556b925639
46 parent: 5:f56d4c64ab98
58 parent: 5:f56d4c64ab98
47 user: test
59 user: test
48 date: Mon Jan 12 13:46:40 1970 +0000
60 date: Mon Jan 12 13:46:40 1970 +0000
49 summary: 6
61 files:
62 description:
63 6
64
50
65
51 (the rawcommit command is deprecated)
66 (the rawcommit command is deprecated)
52 d6e3c4976c13feb1728cd3ac851abaf7256a5c23 644 a
67 d6e3c4976c13feb1728cd3ac851abaf7256a5c23 644 a
53 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
68 76d5e637cbec1bcc04a5a3fa4bcc7d13f6847c00 644 c
54 changeset: 7:2c11b55105cb
69 changeset: 7:a5a6e1f312b9
55 tag: tip
70 tag: tip
56 user: test
71 user: test
57 date: Mon Jan 12 13:46:40 1970 +0000
72 date: Mon Jan 12 13:46:40 1970 +0000
58 summary: 7
73 files:
74 description:
75 7
59
76
77
General Comments 0
You need to be logged in to leave comments. Login now