Home
last modified time | relevance | path

Searched refs:mlog (Results 1 – 25 of 45) sorted by relevance

12

/linux-6.1.9/fs/ocfs2/dlm/
Ddlmrecovery.c105 mlog(0, "%s: changing dead_node from %u to %u\n", in dlm_set_reco_dead_node()
114 mlog(0, "%s: changing new_master from %u to %u\n", in dlm_set_reco_master()
144 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); in dlm_dispatch_work()
181 mlog(0, "starting dlm recovery thread...\n"); in dlm_launch_recovery_thread()
197 mlog(0, "waiting for dlm recovery thread to exit\n"); in dlm_complete_recovery_thread()
233 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", in dlm_print_reco_node_status()
266 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", in dlm_print_reco_node_status()
270 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", in dlm_print_reco_node_status()
283 mlog(0, "dlm thread running for %s...\n", dlm->name); in dlm_recovery_thread()
301 mlog(0, "quitting DLM recovery thread\n"); in dlm_recovery_thread()
[all …]
Ddlmmaster.c222 mlog(ML_ERROR, "bad mle: %p\n", mle); in __dlm_put_mle()
365 mlog(0, "node %u already removed from nodemap!\n", idx); in dlm_mle_node_down()
379 mlog(0, "node %u already in node map!\n", idx); in dlm_mle_node_up()
414 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, in dlm_mle_release()
476 mlog(0, "destroying lockres %.*s\n", res->lockname.len, in dlm_lockres_release()
488 mlog(ML_ERROR, in dlm_lockres_release()
606 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, in dlm_lockres_set_refmap_bit()
617 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, in dlm_lockres_clear_refmap_bit()
628 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, in __dlm_lockres_grab_inflight_ref()
649 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, in dlm_lockres_drop_inflight_ref()
[all …]
Ddlmunlock.c94 mlog(0, "master_node = %d, valblk = %d\n", master_node, in dlmunlock_common()
108 mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock " in dlmunlock_common()
117 mlog(ML_ERROR, "lockres in progress!\n"); in dlmunlock_common()
184 mlog(0, "%s:%.*s: clearing actions, %s\n", in dlmunlock_common()
216 mlog(0, "clearing convert_type at %smaster node\n", in dlmunlock_common()
249 mlog(0, "lock %u:%llu should be gone now! refs=%d\n", in dlmunlock_common()
321 mlog(0, "%.*s\n", res->lockname.len, res->lockname.name); in dlm_send_remote_unlock_request()
327 mlog(0, "%s:%.*s: this node became the master due to a " in dlm_send_remote_unlock_request()
355 mlog(0, "master was in-progress. retry\n"); in dlm_send_remote_unlock_request()
358 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " in dlm_send_remote_unlock_request()
[all …]
Ddlmdomain.c82 mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n", in dlm_alloc_pagevec()
150 mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len, in __dlm_unhash_lockres()
169 mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len, in __dlm_insert_lockres()
181 mlog(0, "%.*s\n", len, name); in __dlm_lookup_lockres_full()
213 mlog(0, "%.*s\n", len, name); in __dlm_lookup_lockres()
321 mlog(0, "freeing memory from domain %s\n", dlm->name); in dlm_ctxt_release()
411 mlog(0, "Migrating locks from domain %s\n", dlm->name); in dlm_migrate_all_locks()
449 mlog(0, "%s: perhaps there are more lock resources " in dlm_migrate_all_locks()
453 mlog(0, "%s: we won't do dlm recovery after migrating " in dlm_migrate_all_locks()
465 mlog(0, "%s: %d lock resources in hash last pass\n", in dlm_migrate_all_locks()
[all …]
Ddlmast.c86 mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, " in __dlm_queue_ast()
95 mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n", in __dlm_queue_ast()
106 mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n", in __dlm_queue_ast()
153 mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n", in __dlm_queue_bast()
177 mlog(0, "getting lvb from lockres for %s node\n", in dlm_update_lvb()
201 mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name, in dlm_do_local_ast()
221 mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name, in dlm_do_remote_ast()
245 mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n", in dlm_do_local_bast()
287 mlog(ML_ERROR, "Invalid name length (%d) in proxy ast " in dlm_proxy_ast_handler()
294 mlog(ML_ERROR, "Both PUT and GET lvb specified, (0x%x)\n", in dlm_proxy_ast_handler()
[all …]
Ddlmconvert.c113 mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n", in __dlmconvert_master()
120 mlog(ML_ERROR, "attempted to convert a lock with a lock " in __dlmconvert_master()
128 mlog(ML_ERROR, "attempted to convert a lock not on grant " in __dlmconvert_master()
138 mlog(0, "will set lvb: converting %s->%s\n", in __dlmconvert_master()
147 mlog(0, "will fetch new value into " in __dlmconvert_master()
153 mlog(0, "will NOT fetch new value " in __dlmconvert_master()
188 mlog(0, "res %.*s, granting %s lock\n", res->lockname.len, in __dlmconvert_master()
193 mlog(0, "doing in-place convert for nonlocal lock\n"); in __dlmconvert_master()
210 mlog(0, "failed to convert NOQUEUE lock %.*s from " in __dlmconvert_master()
216 mlog(0, "res %.*s, queueing...\n", res->lockname.len, in __dlmconvert_master()
[all …]
Ddlmlock.c108 mlog(0, "type=%d\n", lock->ml.type); in dlmlock_master()
125 mlog(0, "I can grant this lock right away\n"); in dlmlock_master()
142 mlog(0, "%s: returning DLM_NORMAL to " in dlmlock_master()
153 mlog(0, "%s: returning NOTQUEUED to " in dlmlock_master()
204 mlog(0, "type=%d, lockres %.*s, flags = 0x%x\n", in dlmlock_remote()
240 mlog(0, "%s: recovery lock was owned by " in dlmlock_remote()
261 mlog(0, "%s: $RECOVERY lock for this node (%u) is " in dlmlock_remote()
304 mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer " in dlm_send_remote_lock_request()
312 mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to " in dlm_send_remote_lock_request()
349 mlog(0, "freeing kernel-allocated lksb\n"); in dlm_lock_release()
[all …]
Ddlmthread.c114 mlog(0, "%s: Adding res %.*s to purge list\n", in __dlm_lockres_calc_usage()
123 mlog(0, "%s: Removing res %.*s from purge list\n", in __dlm_lockres_calc_usage()
157 mlog(0, "%s: Removing res %.*s from purgelist\n", in __dlm_do_purge_lockres()
165 mlog(ML_ERROR, "%s: res %.*s in use after deref\n", in __dlm_do_purge_lockres()
177 mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n", in __dlm_do_purge_lockres()
201 mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name, in dlm_purge_lockres()
206 mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n", in dlm_purge_lockres()
233 mlog(0, "%s: Removing res %.*s from purgelist, master %d\n", in dlm_purge_lockres()
241 mlog(0, "%s: deref %.*s in progress\n", in dlm_purge_lockres()
248 mlog(ML_ERROR, "%s: res %.*s in use after deref\n", in dlm_purge_lockres()
[all …]
/linux-6.1.9/fs/ocfs2/cluster/
Dquorum.c102 mlog(0, "heartbeating: %d, connected: %d, " in o2quo_make_decision()
115 mlog(ML_ERROR, "fencing this node because it is " in o2quo_make_decision()
129 mlog(ML_ERROR, "fencing this node because it is " in o2quo_make_decision()
138 mlog(ML_ERROR, "fencing this node because it is " in o2quo_make_decision()
152 mlog(ML_NOTICE, "not fencing this node, heartbeating: %d, " in o2quo_make_decision()
170 mlog(0, "node %u, %d total\n", node, qs->qs_holds); in o2quo_set_hold()
179 mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1); in o2quo_clear_hold()
207 mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating); in o2quo_hb_up()
232 mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating); in o2quo_hb_down()
250 mlog(0, "node %u\n", node); in o2quo_hb_still_up()
[all …]
Dheartbeat.c288 mlog(ML_ERROR, "Heartbeat write timeout to device %pg after %u " in o2hb_write_timeout()
302 mlog(ML_HEARTBEAT, "Number of regions %d, failed regions %d\n", in o2hb_write_timeout()
322 mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", in o2hb_arm_timeout()
411 mlog(ML_HEARTBEAT, "send NEGO_APPROVE msg to node %d\n", i); in o2hb_nego_timeout()
415 mlog(ML_ERROR, "send NEGO_APPROVE msg to node %d fail %d\n", in o2hb_nego_timeout()
426 mlog(ML_ERROR, "send NEGO_TIMEOUT msg to node %d fail %d\n", in o2hb_nego_timeout()
443 mlog(ML_ERROR, "got nego timeout message from bad node.\n"); in o2hb_nego_timeout_handler()
491 mlog(ML_ERROR, "IO Error %d\n", bio->bi_status); in o2hb_bio_end_io()
520 mlog(ML_ERROR, "Could not alloc slots BIO!\n"); in o2hb_setup_one_bio()
538 mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n", in o2hb_setup_one_bio()
[all …]
Dmasklog.h157 #define mlog(mask, fmt, ...) \ macro
171 mlog(mask, fmt, ##__VA_ARGS__); \
179 mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
185 mlog(ML_ERROR, "bug expression: " #cond "\n"); \
186 mlog(ML_ERROR, fmt, ##args); \
Dtcp.c73 mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \
83 mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \
360 mlog(0, "completed %d messages for node %u\n", num_kills, in o2net_complete_nodes_nsw()
510 mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n", in o2net_set_nn_state()
557 mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay); in o2net_set_nn_state()
806 mlog(0, "max_len for message handler out of range: %u\n", in o2net_register_handler()
813 mlog(0, "no message type provided: %u, %p\n", msg_type, func); in o2net_register_handler()
819 mlog(0, "no message handler provided: %u, %p\n", in o2net_register_handler()
850 mlog(ML_TCP, "registered handler func %p type %u key %08x\n", in o2net_register_handler()
874 mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n", in o2net_unregister_handler_list()
[all …]
/linux-6.1.9/fs/ocfs2/dlmfs/
Duserdlm.c92 mlog(ML_ERROR, "Dlm error %d while calling %s on " \
116 mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n", in user_ast()
124 mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n", in user_ast()
203 mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n", in user_bast()
221 mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n", in user_unlock_ast()
225 mlog(ML_ERROR, "dlm returns status %d\n", status); in user_unlock_ast()
291 mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name); in user_dlm_unblock_lock()
309 mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n", in user_dlm_unblock_lock()
316 mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n", in user_dlm_unblock_lock()
324 mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_CANCEL\n", in user_dlm_unblock_lock()
[all …]
Ddlmfs.c126 mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino, in dlmfs_file_open()
173 mlog(0, "close called on inode %lu\n", inode->i_ino); in dlmfs_file_release()
245 mlog(0, "inode %lu, count = %zu, *ppos = %llu\n", in dlmfs_file_write()
264 mlog(0, "wrote %zu bytes\n", count); in dlmfs_file_write()
304 mlog(0, "inode %lu\n", inode->i_ino); in dlmfs_evict_inode()
322 mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn); in dlmfs_evict_inode()
416 mlog(0, "mkdir %.*s\n", domain->len, domain->name); in dlmfs_mkdir()
421 mlog(ML_ERROR, "invalid domain name for directory.\n"); in dlmfs_mkdir()
437 mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", in dlmfs_mkdir()
464 mlog(0, "create %.*s\n", name->len, name->name); in dlmfs_create()
[all …]
/linux-6.1.9/fs/ocfs2/
Dsuper.c462 mlog(ML_ERROR, "Unable to load system inode %d, " in ocfs2_init_global_system_inodes()
491 mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n", in ocfs2_init_local_system_inodes()
623 mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n"); in ocfs2_remount()
630 mlog(ML_ERROR, "Cannot change data mode on remount\n"); in ocfs2_remount()
639 mlog(ML_ERROR, "Cannot enable inode64 on remount\n"); in ocfs2_remount()
655 mlog(ML_ERROR, "Remount on readonly device is forbidden.\n"); in ocfs2_remount()
665 mlog(ML_ERROR, "Cannot remount RDWR " in ocfs2_remount()
672 mlog(ML_ERROR, "Cannot remount RDWR because " in ocfs2_remount()
736 mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n", in ocfs2_sb_probe()
754 mlog(ML_ERROR, "incompatible version: %u.%u\n", in ocfs2_sb_probe()
[all …]
Dresize.c305 mlog(ML_ERROR, "The disk is too old and small. " in ocfs2_group_extend()
385 mlog(ML_ERROR, "Group descriptor # %llu has bad chain %u " in ocfs2_check_new_group()
390 mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but " in ocfs2_check_new_group()
395 mlog(ML_ERROR, "Group descriptor # %llu has free bit count %u " in ocfs2_check_new_group()
420 mlog(ML_ERROR, "add a group which is in the current volume.\n"); in ocfs2_verify_group_and_input()
422 mlog(ML_ERROR, "input chain exceeds the limit.\n"); in ocfs2_verify_group_and_input()
424 mlog(ML_ERROR, in ocfs2_verify_group_and_input()
427 mlog(ML_ERROR, "add group's clusters overflow.\n"); in ocfs2_verify_group_and_input()
429 mlog(ML_ERROR, "the cluster exceeds the maximum of a group\n"); in ocfs2_verify_group_and_input()
431 mlog(ML_ERROR, "the free cluster exceeds the total clusters\n"); in ocfs2_verify_group_and_input()
[all …]
Ddlmglue.c118 mlog(level, "LVB information for %s (called from %s:%u):\n", in ocfs2_dump_meta_lvb_info()
120 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n", in ocfs2_dump_meta_lvb_info()
123 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n", in ocfs2_dump_meta_lvb_info()
127 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, " in ocfs2_dump_meta_lvb_info()
359 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
362 mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
401 mlog(0, "built lock resource with name: %s\n", name); in ocfs2_build_lock_name()
409 mlog(0, "Add tracking for lockres %s\n", res->l_name); in ocfs2_add_lockres_tracking()
997 mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n", in ocfs2_generic_handle_bast()
1003 mlog(0, "needs_downconvert = %d\n", needs_downconvert); in ocfs2_generic_handle_bast()
[all …]
Dreservations.c80 mlog(ML_NOTICE, "Dumping resmap for device %s. Bitmap length: %u\n", in ocfs2_dump_resv()
87 mlog(ML_NOTICE, "start: %u\tend: %u\tlen: %u\tlast_start: %u" in ocfs2_dump_resv()
96 mlog(ML_NOTICE, "%d reservations found. LRU follows\n", i); in ocfs2_dump_resv()
100 mlog(ML_NOTICE, "LRU(%d) start: %u\tend: %u\tlen: %u\t" in ocfs2_dump_resv()
120 mlog(ML_ERROR, in ocfs2_validate_resmap_bits()
143 mlog(ML_ERROR, "reservation %d has bad start off!\n", in ocfs2_check_resmap()
149 mlog(ML_ERROR, "reservation %d has no length!\n", in ocfs2_check_resmap()
155 mlog(ML_ERROR, "reservation %d has invalid range!\n", in ocfs2_check_resmap()
161 mlog(ML_ERROR, "reservation %d extends past bitmap!\n", in ocfs2_check_resmap()
328 mlog(ML_ERROR, "Duplicate reservation window!\n"); in ocfs2_resv_insert()
[all …]
Dblockcheck.c294 mlog(ML_NOTICE, "Block check count has wrapped\n"); in ocfs2_blockcheck_inc_check()
310 mlog(ML_NOTICE, "Checksum failure count has wrapped\n"); in ocfs2_blockcheck_inc_failure()
326 mlog(ML_NOTICE, "ECC recovery count has wrapped\n"); in ocfs2_blockcheck_inc_recover()
400 mlog(ML_ERROR, in ocfs2_block_check_validate()
415 mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n", in ocfs2_block_check_validate()
513 mlog(ML_ERROR, in ocfs2_block_check_validate_bhs()
546 mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n", in ocfs2_block_check_validate_bhs()
Djournal.c553 mlog(ML_ERROR, in ocfs2_abort_trigger()
653 mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n"); in __ocfs2_journal_access()
654 mlog(ML_ERROR, "b_blocknr=%llu, b_state=0x%lx\n", in __ocfs2_journal_access()
697 mlog(ML_ERROR, "Unknown access type!\n"); in __ocfs2_journal_access()
704 mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", in __ocfs2_journal_access()
784 mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. " in ocfs2_journal_dirty()
824 mlog(ML_ERROR, "unable to alloc journal\n"); in ocfs2_journal_alloc()
864 mlog(ML_ERROR, "access error (bad inode)\n"); in ocfs2_journal_init()
880 mlog(ML_ERROR, "Could not get lock on journal!\n"); in ocfs2_journal_init()
888 mlog(ML_ERROR, "Journal file size (%lld) is too small!\n", in ocfs2_journal_init()
[all …]
Dexport.c64 mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status); in ocfs2_get_dentry()
78 mlog(ML_ERROR, "test inode bit failed %d\n", status); in ocfs2_get_dentry()
142 mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status); in ocfs2_get_parent()
166 mlog(ML_ERROR, "test inode bit failed %d\n", status); in ocfs2_get_parent()
Dinode.c313 mlog(ML_ERROR, in ocfs2_populate_inode()
852 mlog(ML_ERROR, "Skipping delete of root inode.\n"); in ocfs2_inode_is_valid_to_delete()
871 mlog(ML_ERROR, "Skipping delete of system file %llu\n", in ocfs2_inode_is_valid_to_delete()
932 mlog(ML_ERROR, in ocfs2_query_inode_wipe()
1032 mlog(ML_ERROR, "getting nfs sync lock(PR) failed %d\n", status); in ocfs2_delete_inode()
1378 mlog(ML_ERROR, "Checksum failed for dinode %llu\n", in ocfs2_validate_inode_block()
1444 mlog(ML_ERROR, in ocfs2_filecheck_validate_inode_block()
1451 mlog(ML_ERROR, in ocfs2_filecheck_validate_inode_block()
1460 mlog(ML_ERROR, in ocfs2_filecheck_validate_inode_block()
1469 mlog(ML_ERROR, in ocfs2_filecheck_validate_inode_block()
[all …]
Dquota_local.c178 mlog(ML_ERROR, "failed to read quota file header (type=%d)\n", in ocfs2_local_check_quota_file()
184 mlog(ML_ERROR, "quota file magic does not match (%u != %u)," in ocfs2_local_check_quota_file()
190 mlog(ML_ERROR, "quota file version does not match (%u != %u)," in ocfs2_local_check_quota_file()
202 mlog(ML_ERROR, "cannot get global quota file inode " in ocfs2_local_check_quota_file()
210 mlog(ML_ERROR, "failed to read global quota file header " in ocfs2_local_check_quota_file()
216 mlog(ML_ERROR, "global quota file magic does not match " in ocfs2_local_check_quota_file()
222 mlog(ML_ERROR, "global quota file version does not match " in ocfs2_local_check_quota_file()
430 mlog(ML_ERROR, "failed to read quota file info header " in ocfs2_begin_quota_recovery()
503 mlog(ML_ERROR, "Failed to get quota structure " in ocfs2_recover_local_quota_file()
629 mlog(ML_ERROR, "failed to read quota file info header " in ocfs2_finish_quota_recovery()
[all …]
Dbuffer_head_io.c127 mlog(ML_ERROR, in ocfs2_read_blocks_sync()
137 mlog(ML_ERROR, in ocfs2_read_blocks_sync()
217 mlog(ML_ERROR, "asked to read %d blocks!\n", nr); in ocfs2_read_blocks()
306 mlog(ML_ERROR, "block %llu had the JBD bit set " in ocfs2_read_blocks()
Dmove_extents.c185 mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n", in ocfs2_lock_meta_allocator_move_extents()
303 mlog(0, "len_claimed: %u, len: %u\n", new_len, *len); in ocfs2_defrag_extent()
312 mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos, in ocfs2_defrag_extent()
452 mlog(0, "find the victim group: #%llu, " in ocfs2_find_victim_alloc_group()
523 mlog(0, "extents get ready to be moved to #%llu block\n", in ocfs2_validate_and_adjust_move_goal()
568 mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos); in ocfs2_probe_alloc_group()
636 mlog(ML_ERROR, "unable to get global_bitmap inode\n"); in ocfs2_move_extent()
819 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, " in __ocfs2_move_extents_range()
862 mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, " in __ocfs2_move_extents_range()

12