-
Notifications
You must be signed in to change notification settings - Fork 102
Code Examples
Here are some code examples which should help you get off to a quick start using this Python library. This library was designed to reduce the technical barrier to entry for complex tasks and give such power to any user - technical or not. As you can see, a great deal can be achieved through simply entering in a few parameters to the function of your choice. No real coding is necessary to gain value using this approach.
%pip install semantic-link-labs
import sempy_labs as labs
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
labs.run_model_bpa(dataset=dataset, workspace=workspace)
labs.run_model_bpa(dataset=dataset, workspace=workspace, extended=True) # Setting extended=True will fetch Vertipaq Analyzer statistics and use them to run advanced BPA rules against your model
labs.run_model_bpa(dataset=dataset, workspace=workspace, export=True) # Setting export=True will export the results to a delta table in the lakehouse attached to the notebook
labs.run_model_bpa(dataset=dataset, workspace=workspace, language="French") # Setting the 'language' parameter will dynamically translate the rules, categories and descriptions to the specified language.
import sempy_labs as labs
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
x = labs.vertipaq_analyzer(dataset=dataset, workspace=workspace)
x = labs.vertipaq_analyzer(dataset=dataset, workspace=workspace, export='table') # Setting export='table' will export the results to delta tables in the lakehouse attached to the notebook
x = labs.vertipaq_analyzer(dataset=dataset, workspace=workspace, export='zip') # Setting export='zip' will export the results to a .zip file in the lakehouse attached to the notebook.
# Note that this function returns a dictionary of dataframes which is captured above as the parameter 'x'. You can view this as such:
for name, df in x.items():
print(name)
display(df)
import sempy_labs as labs
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
lakehouse = None # Enter the name or ID of the lakehouse in which the .vpax file will be saved
lakehouse_workspace = None # Enter the name or ID of the workspace in which the lakehouse resides.
labs.create_vpax(
dataset=dataset,
workspace=workspace,
read_stats_from_data=True,
lakehouse=lakehouse,
lakehouse_workspace=lakehouse_workspace,
overwrite=True,
direct_lake_stats_mode='Resident'
)
import sempy_labs as labs
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
languages = ['French', 'German', 'Chinese']
labs.translate_semantic_model(dataset=dataset, workspace=workspace, languages=languages)
# Note that this function defaults to a refresh type of 'full'. You can change this using the 'refresh_type' parameter.
import sempy_labs as labs
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
labs.refresh_semantic_model(dataset=dataset, workspace=workspace) # Refresh the entire semantic model
labs.refresh_semantic_model(dataset=dataset, workspace=workspace, tables = ['Sales', 'Geography']) # Refresh just specific tables
labs.refresh_semantic_model(dataset=dataset, workspace=workspace, tables = ['Geography', 'Calendar'], partitions = ["'Sales'[SalesFY2025]", "'Sales'[SalesFY2025]") # Refresh specific tables and specific partitions
df = labs.refresh_semantic_model(dataset=dataset, workspace=workspace, visualize=True) # See a visual representation of your refresh in real time.
import sempy_labs as labs
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
backup_name = '' # Enter the name or path of the backup file
# Optional
key_vault_uri= '' # Enter the Key Vault URI which contains the secret for the password for the backup
key_vault_password='' # Enter the secret name which contains the password for the backup
backup_password = notebookutils.credentials.getSecret(key_vault_uri,key_vault_password)
# Example 1: No password
labs.backup_semantic_model(
dataset=dataset,
file_path=f'{backup_name}.abf',
allow_overwrite=True, # If True, overwrites backup files of the same name. If False, the file you are saving cannot have the same name as a file that already exists in the same location.
apply_compression=True, # If True, compresses the backup file. Compressed backup files save disk space, but require slightly higher CPU utilization.
workspace=workspace,
)
# Example 2: With password
labs.backup_semantic_model(
dataset=dataset,
file_path=f'{backup_name}.abf',
password=backup_password,
allow_overwrite=True, # If True, overwrites backup files of the same name. If False, the file you are saving cannot have the same name as a file that already exists in the same location.
apply_compression=True, # If True, compresses the backup file. Compressed backup files save disk space, but require slightly higher CPU utilization.
workspace=workspace,
)
import sempy_labs as labs
dataset_name = '' # Enter the name of the semantic model to be restored
workspace = None # Enter the name or ID of the workspace in which the semantic model will be restored
backup_name = '' # Enter the name or path of the backup file
# Optional
key_vault_uri= '' # Enter the Key Vault URI which contains the secret for the password for the backup
key_vault_password='' # Enter the secret name which contains the password for the backup
backup_password = notebookutils.credentials.getSecret(key_vault_uri,key_vault_password)
# Example 1: No password
labs.restore_semantic_model(
dataset=dataset_name,
file_path=f"{backup_name}.abf",
allow_overwrite=True, # If True, overwrites backup files of the same name. If False, the file you are saving cannot have the same name as a file that already exists in the same location.
ignore_incompatibilities=True, # If True, ignores incompatibilities between Azure Analysis Services and Power BI Premium.
workspace=workspace,
force_restore=True, # If True, restores the semantic model with the existing semantic model unloaded and offline.
)
# Example 2: With password
labs.restore_semantic_model(
dataset=dataset_name,
file_path=f"{backup_name}.abf",
password=backup_password,
allow_overwrite=True, # If True, overwrites backup files of the same name. If False, the file you are saving cannot have the same name as a file that already exists in the same location.
ignore_incompatibilities=True, # If True, ignores incompatibilities between Azure Analysis Services and Power BI Premium.
workspace=workspace,
force_restore=True, # If True, restores the semantic model with the existing semantic model unloaded and offline.
)
import sempy_labs as labs
source_dataset = # Enter the name of the source semantic model
source_workspace = None # Enter the name or ID of the workspace in which the source semantic model resides
target_dataset = # Enter the name of the target semantic model
target_workspace = None # Enter the name or ID of the workspace in which the target semantic model will reside
refresh_target_dataset = False # If set to True, refreshes the deployed semantic model
overwrite = False # If set to True, overwrites the existing target semantic model if it already exists
# Example 1 (standard)
labs.deploy_semantic_model(source_dataset=source_dataset, source_workspace=source_workspace, target_dataset=target_dataset, target_workspace=target_workspace, refresh_target_dataset=refresh_target_dataset, overwrite=overwrite)
# Example 2 (advanced)
perspective = None # To deploy a 'partial' model akin to the [master model](https://www.elegantbi.com/post/mastermodel) technique, enter the name of a perspective in your model. Only the objects within the perspective will be deployed to the target destination (including object dependencies).
labs.deploy_semantic_model(source_dataset=source_dataset, source_workspace=source_workspace, target_dataset=target_dataset, target_workspace=target_workspace, refresh_target_dataset=refresh_target_dataset, overwrite=overwrite, perspective=perspective)
import sempy_labs as labs
import sempy.fabric as fabric
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
# Example 1: For a single semantic model
model_size = labs.get_semantic_model_size(dataset=dataset, workspace=workspace)
print(model_size)
# Example 2: For all semantic models within a workspace
model_sizes = {}
dfD = fabric.list_datasets(workspace=workspace, mode="rest")
for _, r in dfD.iterrows():
d_name = r["Dataset Name"]
d_id = r["Dataset Id"]
if not labs.is_default_semantic_model(dataset=d_id, workspace=workspace):
model_size = labs.get_semantic_model_size(dataset=d_id, workspace=workspace)
model_sizes[d_name] = model_size
print(model_sizes)
from sempy_labs.tom import connect_semantic_model
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
readonly = True # Set this to False to overwrite existing descriptions
with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=readonly) as tom:
x = tom.generate_measure_descriptions()
x
import sempy_labs as labs
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
dax_string = """ EVALUATE SUMMARIZECOLUMNS('Product'[Color], "Sales", [Sales Amount]) """
dax_string2 = """ EVALUATE SUMMARIZECOLUMNS('Date'[Fiscal Month], "Discounts", [Discount Amount]) """
# Example 1: single DAX query, do not show vertipaq stats
labs.get_dax_query_dependencies(dataset=dataset, workspace=workspace, dax_string=dax_string, show_vertipaq_stats=False)
# Example 2: single DAX query, show vertipaq stats
labs.get_dax_query_dependencies(dataset=dataset, workspace=workspace, dax_string=dax_string, show_vertipaq_stats=True)
# Example 3: multiple DAX queries, show vertipaq stats
labs.get_dax_query_dependencies(dataset=dataset, workspace=workspace, dax_string=[dax_string, dax_string2], show_vertipaq_stats=True)
import sempy_labs as labs
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
labs.list_synonyms(dataset=dataset, workspace=workspace)
This function prints out a URL which navigates to DAX query view connected to the specified model with the specified query.
import sempy_labs as labs
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
dax_string = """EVALUATE SUMMARIZECOLUMNS('Product'[Color], "Sales", [Sales Amount])"""
labs.generate_dax_query_view_url(dataset=dataset, workspace=workspace, dax_string=dax_string)
from sempy_labs import migration
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
migration.migrate_direct_lake_to_import(dataset=dataset, workspace=workspace)
import sempy_labs as labs
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
labs.is_default_semantic_model(dataset=dataset, workspace=workspace)
from sempy_labs.tom import connect_semantic_model
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=True) as tom:
for t in tom.model.Tables:
for c in t.Columns:
print(f"'{t.Name}'[{c.Name}]") # Print the names of all tables/columns in the model
from sempy_labs.tom import connect_semantic_model
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
# Option 1: Note that this will make XMLA Read/Write changes to the model (saving annotations)
with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=False) as tom:
tom.set_vertipaq_annotations()
with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=True) as tom:
for t in tom.model.Tables:
print(f"{t.Name} : {tom.total_size(object=t)}") # Shows the total size (in bytes) of each table in your semantic model
# Option 2: Note that this will **NOT** make XMLA Read/Write changes to the model (saving annotations)
with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=True) as tom:
tom.set_vertipaq_annotations()
for t in tom.model.Tables:
print(f"{t.Name} : {tom.total_size(object=t)}") # Shows the total size (in bytes) of each table in your semantic model
from sempy_labs.tom import connect_semantic_model
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
table_name = '' # Enter the name of the table with an incremental refresh policy
with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=True) as tom:
tom.show_incremental_refresh_policy(table_name=table_name)
from sempy_labs.tom import connect_semantic_model
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=False) as tom:
# Example 1: For a table
t = tom.model.Tables['Sales']
tom.set_translation(object=t, language='fr-FR', property="Name", value='Ventes')
#tom.set_translation(object=t, language='fr-FR', property="Description", value='C'est le tableau des ventes')
# Example 2: For a measure
m = tom.model.Tables['Sales'].Measures['Sales Amount']
tom.set_translation(object=m, language='fr-FR', property="Name", value='Montant des ventes')
#tom.set_translation(object=m, language='fr-FR', property="Description", value='La somme des ventes')
#tom.set_translation(object=m, language='fr-FR', property="Display Folder", value='...')
# Example 3: For a column
c = tom.model.Tables['Sales'].Columns['SalesAmount']
tom.set_translation(object=c, language='fr-FR', property="Name", value='Montant des ventes')
#tom.set_translation(object=c, language='fr-FR', property="Description", value='...')
#tom.set_translation(object=c, language='fr-FR', property="Display Folder", value='...')
Create a Field Parameter
from sempy_labs.tom import connect_semantic_model
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
table_name = '' # Enter the name of the Field Parameter to be created
with connect_semantic_model(dataset=dataset, readonly=False, workspace=workspace) as tom:
tom.add_field_parameter(table_name=table_name, objects=["'Product'[Color]", "[Sales Amount]", "'Geography'[Country]")
from sempy_labs.tom import connect_semantic_model
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=False) as tom:
# Example 1: For a specific column
tom.model.Tables['TableName'].Columns['ColumnName'].IsAvailableInMDX = False
# Example 2: For all relevant columns (according to the [Model BPA rule](https://github.com/microsoft/semantic-link-labs/blob/b1626d1dc0d041f3b19d1a1c9b38c0039f718382/src/sempy_labs/_model_bpa_rules.py#L110))
if not tom.is_direct_lake():
for c in tom.all_columns():
if c.IsAvailableInMDX and (c.IsHidden or c.Parent.IsHidden) and c.SortByColumn is None and not any(tom.used_in_sort_by(column=c)) and not any(tom.used_in_hierarchies(column=c)):
c.IsAvailableInMDX = False
from sempy_labs.tom import connect_semantic_model
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
parameter_name = '' # Enter the parameter name
m_expression = '' # Enter the m expression here
with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=False) as tom:
tom.model.Expressions[parameter_name].Expression = m_expression
from sempy_labs.tom import connect_semantic_model
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
role_name = '' # Enter the name of the role to add to the model
with connect_semantic_model(dataset=dataset, workspace=workspace, readonly=False) as tom:
# Add the role
tom.add_role(role_name=role_name, model_permission='Reader')
# Add members to the role
tom.add_role_member(role_name=role_name, member=['abc@mail.com', 'def@mail.com', 'ghi@mail.com'])
# Add row level security to the role
tom.set_rls(role_name=role_name, table_name='Product', filter_expression="'Product'[Class]='M'")
tom.set_rls(role_name=role_name, table_name='Geography', filter_expression="'Geography'[Country]='United States'")
from sempy_labs import directlake
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
source = 'MyLakehouse' # The name or ID of the lakehouse/warehouse
source_type = "Lakehouse" # Can either be 'Lakehouse' or 'Warehouse'
source_workspace = 'MyLakehouseWorkspace' # Enter the name or ID of the workspace in which the lakehouse/warehouse exists
directlake.update_direct_lake_model_connection(dataset=dataset, workspace=workspace, source=source, source_type=source_type, source_workspace=source_workspace)
from sempy_labs import directlake
dataset = '' # Enter the name or ID of your semantic model
table_name = '' # The name of the table in the semantic model
entity_name = '' # The name of the delta table in the lakehouse
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
schema = None # The schema of the table in the semantic model (optional)
directlake.update_direct_lake_partition_entity(dataset=dataset, table_name=table_name, entity_name=entity_name, schema=schema, workspace=workspace)
from sempy_labs import directlake
dataset = '' # Enter the name or ID of your semantic model
table_name = ['Sales', 'Geography'] # A list of table names in the semantic model
entity_name = ['Fact_Sales', 'Dim_Geography'] # A list of delta table names the source (i.e lakehouse/warehouse)
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
schema = None # The schema of the table in the semantic model (optional)
directlake.update_direct_lake_partition_entity(dataset=dataset, table_name=table_name, entity_name=entity_name, schema=schema, workspace=workspace)
from sempy_labs import directlake
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
# Method 1: Reframes the semantic model and puts the same columns back into memory which resided in memory before the reframe
directlake.warm_direct_lake_cache_isresident(dataset=dataset, workspace=workspace)
# Method 2: Puts the columns listed in the perspective into memory in the semantic model
perspective = '' # Enter the name of the perspective in the semantic model which contains the columns you would like to add into memory
add_dependencies = True
directlake.warm_direct_lake_cache_perspective(dataset=dataset, workspace=workspace, perspective=perspective, add_dependencies=add_dependencies)
from sempy_labs import admin
data_source_details = False
dataset_schema = False
dataset_expressions = False
lineage = False
artifact_users = False
# Example 1 (current workspace)
x = admin.scan_workspaces(workspace=None, data_source_details=data_source_details, dataset_schema=dataset_schema, dataset_expressions=dataset_expressions, lineage=lineage, artifact_users=artifact_users) # Scans the current workspace
x
# Example 2 (one specified workspace)
x = admin.scan_workspaces(workspace="Workspace1", data_source_details=data_source_details, dataset_schema=dataset_schema, dataset_expressions=dataset_expressions, lineage=lineage, artifact_users=artifact_users) # Scans workspace "Workspace1"
x
# Example 3 (list of workspaces)
x = admin.scan_workspaces(workspace=["Workspace1", "Workspace2"], data_source_details=data_source_details, dataset_schema=dataset_schema, dataset_expressions=dataset_expressions, lineage=lineage, artifact_users=artifact_users) # Scans workspace "Workspace1" and "Workspace2"
x
from sempy_labs import admin
start_time = "2025-02-15T07:55:00"
end_time = "2025-02-15T08:55:00"
activity_filter = "viewreport"
admin.list_activity_events(start_time=start_time, end_time=end_time, activity_filter=activity_filter)
from sempy_labs import admin
admin.list_tenant_settings()
import sempy_labs as labs
labs.list_connections()
import sempy_labs as labs
import sempy_labs.lakehouse as lake
lakehouse = None # Enter the name or ID of the lakehouse
workspace = None # Enter the name or ID of the workspace in which the lakehouse exists
# Example 1
df = lake.get_lakehouse_tables(lakehouse=lakehouse, workspace=workspace)
# Example 2
df = lake.get_lakehouse_tables(lakehouse=lakehouse, workspace=workspace, extended=True) # Setting extended=True will show additional stats about tables as well as whether they are at risk of falling back to DirectQuery if used in Direct Lake mode
# Example 3 (use this for schema-enabled lakehouses (the API behind get_lakehouse_tables does not yet work with schema-enabled lakehouses)
with labs.ConnectLakehouse(lakehouse=lakehouse, workspace=workspace) as sql:
df = sql.query(""" select * FROM INFORMATION_SCHEMA.TABLES WHERE [TABLE_TYPE] = 'BASE TABLE' """)
display(df)
import sempy_labs.lakehouse as lake
lakehouse = None # Enter the name or ID of the lakehouse
workspace = None # Enter the name or ID of the workspace in which the lakehouse exists
df = lake.get_lakehouse_columns(lakehouse=lakehouse, workspace=workspace)
df
import sempy_labs.lakehouse as lake
lakehouse = None # Enter the name or ID of the lakehouse
workspace = None # Enter the name or ID of the workspace in which the lakehouse exists
df = lake.list_shortcuts(lakehouse=lakehouse, workspace=workspace)
df
import sempy_labs.lakehouse as lake
table_name = 'MyTable' # Enter the name of the table on which the shortcut will be based
source_lakehouse = 'MyLakehouse1' # Enter the name of the lakehouse in which the table exists
source_workspace = 'MyLakehouse1Workspace' # Enter the name of the workspace in which the source lakehouse exists
destination_lakehouse = 'MyLakehouse2' # Enter the name of the lakehouse in which the shortcut will be created
destination_workspace = 'MyLakehouse2Workspace' # Enter the name of the workspace in which the destination lakehouse exists
shortcut_name = None # Enter the name of the shortcut which will be created. By default it is named after the table_name
lake.create_shortcut_onelake(table_name=table_name, source_lakehouse=source_lakehouse, source_workspace=source_workspace, destination_lakehouse=destination_lakehouse, destination_workspace=destination_workspace, shortcut_name=shortcut_name)
import sempy_labs.lakehouse as lake
lakehouse = None # Enter the name or ID of the lakehouse
workspace = None # Enter the name or ID of the workspace in which the lakehouse exists
# Example 1: Recover a delta table
lake.recover_lakehouse_object(file_path='Tables/FactSales', lakehouse=lakehouse, workspace=workspace)
# Example 2: Recover a delta table (with schema)
lake.recover_lakehouse_object(file_path='Tables/sales/FactSales', lakehouse=lakehouse, workspace=workspace)
# Example 3: Recover a file
lake.recover_lakehouse_object(file_path='Files/Themes/ReportTheme.json', lakehouse=lakehouse, workspace=workspace)
# Example 4: Recover a folder
lake.recover_lakehouse_object(file_path='Files/Themes', lakehouse=lakehouse, workspace=workspace)
import sempy_labs.lakehouse as lake
lakehouse = None # Enter the name or ID of the lakehouse
workspace = None # Enter the name or ID of the workspace in which the lakehouse exists
# Example 1: Optimize a single table
lake.optimize_lakehouse_tables(tables='MyTable', lakehouse=lakehouse, workspace=workspace)
# Example 2: Optimize several tables
lake.optimize_lakehouse_tables(tables=['MyTable', 'MySecondTable'], lakehouse=lakehouse, workspace=workspace)
# Example 3: Optimize all delta tables within the lakehouse
lake.optimize_lakehouse_tables(tables=None, lakehouse=lakehouse, workspace=workspace)
import sempy_labs.lakehouse as lake
lakehouse = None # Enter the name or ID of the lakehouse
workspace = None # Enter the name or ID of the workspace in which the lakehouse exists
retain_n_hours = None # The number of hours to retain historical versions of Delta table files
# Example 1: Vacuum a single table
lake.vacuum_lakehouse_tables(tables='MyTable', lakehouse=lakehouse, workspace=workspace)
# Example 2: Vacuum several tables
lake.vacuum_lakehouse_tables(tables=['MyTable', 'MySecondTable'], lakehouse=lakehouse, workspace=workspace)
# Example 3: Vacuum all delta tables within the lakehouse
lake.vacuum_lakehouse_tables(tables=None, lakehouse=lakehouse, workspace=workspace)
import sempy_labs.report as rep
report = '' # Name or ID of the report
dataset = '' # Name or ID of the semantic model to bind to the report
report_workspace = None # Name or ID of the workspace in which the report resides
dataset_workspace = None # Name or ID of the workspace in which the semantic model resides
rep.report_rebind(report=report, dataset=dataset, report_workspace=report_workspace, dataset_workspace=dataset_workspace)
Note
This function requires the report to be in the PBIR format.
import sempy_labs.report as rep
report = '' # Name or ID of the report
workspace = None # Name or ID of the workspace in which the report resides
rep.run_report_bpa(report=report, workspace=workspace)
rep.run_report_bpa(report=report, workspace=workspace, export=True) # Exports the results to a delta table in the lakehouse attached to the notebook
Note
These functions require the report to be in the PBIR format.
from sempy_labs.report import ReportWrapper
report = '' # Name or ID of the report
workspace = None # Name or ID of the workspace in which the report resides
rpt = ReportWrapper(report=report, workspace=workspace)
rpt.list_pages()
#rpt.list_visuals()
#rpt.list_custom_visuals()
#rpt.list_report_filters()
#rpt.list_page_filters()
#rpt.list_visual_filters()
#rpt.list_visual_objects()
#rpt.list_bookmarks()
#rpt.list_report_level_measures()
#rpt.list_semantic_model_objects()
Note
This function requires the report to be in the PBIR format.
from sempy_labs.report import ReportWrapper
report = '' # Name or ID of the report
workspace = None # Name or ID of the workspace in which the report resides
theme_type = None # Set this to 'customTheme' to get the custom theme. None defaults to the base theme
rpt = ReportWrapper(report=report, workspace=workspace)
theme = rpt.get_theme(theme_type=theme_type)
theme
Note
This function requires the report to be in the PBIR format.
from sempy_labs.report import ReportWrapper
report = '' # Name or ID of the report
workspace = None # Name or ID of the workspace in which the report resides
theme_file_path = "/lakehouse/default/Files/CY23SU09.json" # Enter the file path of the theme file
rpt = ReportWrapper(report=report, workspace=workspace)
rpt.set_theme(theme_file_path=theme_file_path)
import sempy_labs.report as rep
import sempy.fabric as fabric
source_report = '' # Name of the source report
target_report = '' # Name of the target report
source_workspace = None # Name of the workspace in which the source report resides
target_workspace = None # Name of the workspace in which the target report will reside
dataset = # Name of the semantic model the target report will use
dataset_workspace = None # Name of the workspace in which the semantic model resides
report_json = rep.get_report_json(report=source_report, workspace=source_workspace)
dfR = fabric.list_reports(workspace=target_workspace)
dfR_filt = dfR[dfR['Name'] == target_report]
if dfR_filt.empty:
# Create the report if it doesn't exist
rep.create_report_from_reportjson(report=target_report, dataset=dataset, report_json=report_json, workspace=target_workspace)
rep.report_rebind(report=target_report, dataset=dataset, report_workspace=target_workspace, dataset_workspace=dataset_workspace)
else:
# Update the report definition if the report already exists
rep.update_report_from_reportjson(report=target_report, report_json=report_json, workspace=target_workspace)
Note
This function requires the report to be in the PBIR format.
import sempy_labs as labs
from sempy_labs.report import ReportWrapper
# Example 1: Show for a single report
report = '' # The name or ID of the report
workspace = None # The name or ID of the workspace in which the report exists
rpt = ReportWrapper(report=report, workspace=workspace)
df = rpt.list_semantic_model_objects(extended=True)
df[df['Valid Semantic Model Object'] == False]
# Example 2: Show for all downstream reports of a given semantic model (within the same workspace as the semantic model)
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
df = labs.list_report_semantic_model_objects(dataset=dataset, workspace=workspace, extended=True)
df[df['Valid Semantic Model Object'] == False]
Note
This function requires the reports to be in the PBIR format.
import sempy_labs as labs
from sempy_labs.report import ReportWrapper
dataset = '' # Enter the name or ID of your semantic model
workspace = None # Enter the name or ID of the workspace in which the semantic model resides
# Note: This currently only checks the reports which feed from the above semantic model and reside in the same workspace as the semantic model
df = labs.list_semantic_model_object_report_usage(dataset=dataset, workspace=workspace, include_dependencies=True, extended=True)
df
import sempy_labs.report as rep
report = '' # Name or ID of the report
workspace = None # Name or ID of the workspace in which the report resides
thick_report = True # If set to True, saves the report and underlying semantic model. If set to False, saves just the report.
live_connect = True # If set to True, saves a .pbip live-connected to the workspace in the Power BI / Fabric service. If set to False, saves a .pbip with a local model, independent from the Power BI / Fabric service.
lakehouse = None # Enter the name or ID of the lakehouse where you want to save the .pbip file
workspace = None # Enter the name or ID of the workspace in which the lakehouse exists
rep.save_report_as_pbip(report=report, workspace=workspace, thick_report=thick_report, live_connect=live_connect, lakehouse=lakehouse, lakehouse_workspace=lakehouse_workspace)
Note
This function requires the report to be in the PBIR format.
from sempy_labs.report import ReportWrapper
report = '' # Name or ID of the report
workspace = None # Name or ID of the workspace in which the report resides
rpt = ReportWrapper(report=report, workspace=workspace)
rpt.remove_unnecessary_custom_visuals()
Note
This function requires the report to be in the PBIR format.
from sempy_labs.report import ReportWrapper
report = '' # Name or ID of the report
workspace = None # Name or ID of the workspace in which the report resides
rpt = ReportWrapper(report=report, workspace=workspace)
# Example 1: migrate all report-level measures
rpt.migrate_report_level_measures()
# Example 2: migrate a single report-level measure
rpt.migrate_report_level_measures(measures='Sales')
# Example 3: migrate several report-level measures
rpt.migrate_report_level_measures(measures=['Sales', 'Profit'])
Note
This function requires the report to be in the PBIR format.
from sempy_labs.report import ReportWrapper
report = '' # Name or ID of the report
workspace = None # Name or ID of the workspace in which the report resides
rpt = ReportWrapper(report=report, workspace=workspace)
rpt.set_active_page(page_name='MainPage')
Note
This function requires the report to be in the PBIR format.
from sempy_labs.report import ReportWrapper
report = '' # Name or ID of the report
workspace = None # Name or ID of the workspace in which the report resides
rpt = ReportWrapper(report=report, workspace=workspace)
rpt.set_page_visibility(page_name='TooltipPage', hidden=True)
Note
This function requires workspace monitoring to be enabled.
import sempy_labs as labs
workspace = None # Name or ID of the workspace
kql_query = """ SemanticModelLogs
| where OperationName == "QueryEnd"
| extend ctx = parse_json(dynamic_to_json(ApplicationContext))
| extend DatasetId = tostring(ctx.DatasetId)
| extend ReportId = tostring(ctx.Sources[0].ReportId)
| extend VisualId = tostring(ctx.Sources[0].VisualId)
| project WorkspaceName, ItemName, DurationMs, CpuTimeMs, Status, EventText, ApplicationContext, DatasetId, ReportId, VisualId """
sql_query = """ SELECT TOP 10 WorkspaceName, ItemName, DurationMs, CpuTimeMs, Status, EventText, ApplicationContext FROM SemanticModelLogs WHERE OperationName = 'QueryEnd' """
# Example 1: Using KQL
labs.query_workspace_monitoring(query=kql_query, workspace=workspace, language="kql")
# Example 1: Using SQL
labs.query_workspace_monitoring(query=sql_query, workspace=workspace, language="sql")
import sempy_labs as labs
dataset = '' # Name of the semantic model
workspace = None # Name or ID of the workspace
topn = 20 # This will find the top X slowest queries
kql_query = f""" SemanticModelLogs
| where OperationName == "QueryEnd" and (EventText startswith "EVALUATE" or EventText startswith "DEFINE")
| where ItemName == "{dataset}"
| where Timestamp >= ago(1d) # 1d represents 1 day. 1h represents 1 hour. See [here](https://learn.microsoft.com/kusto/query/scalar-data-types/timespan?
| extend ctx = parse_json(dynamic_to_json(ApplicationContext))
| extend DatasetId = tostring(ctx.DatasetId)
| extend ReportId = tostring(ctx.Sources[0].ReportId)
| project ItemName, DatasetId, ReportId, Timestamp, DurationMs, CpuTimeMs, EventText, ExecutingUser
| top {topn} by DurationMs desc
"""
df = labs.query_workspace_monitoring(query=kql_query, workspace=workspace, language="kql")
display(df)
import sempy_labs as labs
workspace = None # Name or ID of the workspace
kql_query = """ SemanticModelLogs
| where OperationName == "QueryEnd" and (EventText startswith "EVALUATE" or EventText startswith "DEFINE")
| where Timestamp >= ago(1d) # 1d represents 1 day. 1h represents 1 hour. See [here](https://learn.microsoft.com/kusto/query/scalar-data-types/timespan?view=microsoft-fabric) for more timestamp literal examples.
| extend ctx = parse_json(dynamic_to_json(ApplicationContext))
| extend DatasetId = tostring(ctx.DatasetId)
| distinct EventText, DatasetId
"""
df = labs.query_workspace_monitoring(query=kql_query, workspace=workspace, language="kql")
display(df)
"""
import sempy_labs as labs
lakehouse = None # Enter the name or ID of the lakehouse
workspace = None # Enter the name or ID of the workspace in which the lakehouse exists
df = labs.list_connections() # This is simply an example. This function works regardless of whether the dataframe submitted is a pandas or spark dataframe
labs.save_as_delta_table(dataframe=df, delta_table_name='list_connections', write_mode='overwrite', lakehouse=lakehouse, workspace=workspace) # Overwrite existing delta table
labs.save_as_delta_table(dataframe=df, delta_table_name='list_connections', write_mode='append', lakehouse=lakehouse, workspace=workspace) # Append to existing delta table
labs.save_as_delta_table(dataframe=df, delta_table_name='list_connections', write_mode='append', lakehouse=lakehouse, workspace=workspace, merge_schema=True) # Merges the schema between the existing delta table and the new data being added to account for differences in columns
import sempy_labs as labs
from sempy_labs import graph
key_vault_uri = '' # Enter your key vault URI
key_vault_tenant_id = '' # Enter the key vault key to the secret storing your Tenant ID
key_vault_client_id = '' # Enter the key vault key to the secret storing your Client ID (Application ID)
key_vault_client_secret = '' # Enter the key vault key to the secret storing your Client Secret
user = '' # Enter your email address (the sender's email address)
subject = '' # Enter the subject of the email
content_type = "Text"
to_recipients = [] # Enter the email address(es) of the 'to' recipients
cc_recipients = None # Enter the email address(es) of the 'cc' recipients'
content = '' # Enter the body of the email
with labs.service_principal_authentication(
key_vault_uri=key_vault_uri,
key_vault_tenant_id=key_vault_tenant_id,
key_vault_client_id=key_vault_client_id,
key_vault_client_secret=key_vault_client_secret):
graph.send_mail(user=user, subject=subject, content_type=content_type, to_recipients=to_recipients, cc_recipients=cc_recipients, content=content)